*** empty log message ***
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
... / ...
CommitLineData
1/* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23/* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30#include "as.h"
31#include "safe-ctype.h"
32#include "subsegs.h"
33#include "dwarf2dbg.h"
34#include "dw2gencfi.h"
35#include "elf/x86-64.h"
36#include "opcodes/i386-init.h"
37
38#ifndef REGISTER_WARNINGS
39#define REGISTER_WARNINGS 1
40#endif
41
42#ifndef INFER_ADDR_PREFIX
43#define INFER_ADDR_PREFIX 1
44#endif
45
46#ifndef DEFAULT_ARCH
47#define DEFAULT_ARCH "i386"
48#endif
49
50#ifndef INLINE
51#if __GNUC__ >= 2
52#define INLINE __inline__
53#else
54#define INLINE
55#endif
56#endif
57
58/* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63#define WAIT_PREFIX 0
64#define SEG_PREFIX 1
65#define ADDR_PREFIX 2
66#define DATA_PREFIX 3
67#define REP_PREFIX 4
68#define LOCK_PREFIX 5
69#define REX_PREFIX 6 /* must come last. */
70#define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72/* we define the syntax here (modulo base,index,scale syntax) */
73#define REGISTER_PREFIX '%'
74#define IMMEDIATE_PREFIX '$'
75#define ABSOLUTE_PREFIX '*'
76
77/* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79#define WORD_MNEM_SUFFIX 'w'
80#define BYTE_MNEM_SUFFIX 'b'
81#define SHORT_MNEM_SUFFIX 's'
82#define LONG_MNEM_SUFFIX 'l'
83#define QWORD_MNEM_SUFFIX 'q'
84#define XMMWORD_MNEM_SUFFIX 'x'
85#define YMMWORD_MNEM_SUFFIX 'y'
86/* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88#define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90#define END_OF_INSN '\0'
91
92/*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99typedef struct
100{
101 const insn_template *start;
102 const insn_template *end;
103}
104templates;
105
106/* 386 operand encoding bytes: see 386 book for details of this. */
107typedef struct
108{
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112}
113modrm_byte;
114
115/* x86-64 extension prefix. */
116typedef int rex_byte;
117
118/* 386 opcode byte to code indirect addressing. */
119typedef struct
120{
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124}
125sib_byte;
126
127/* x86 arch names, types and features */
128typedef struct
129{
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136}
137arch_entry;
138
139static void update_code_flag (int, int);
140static void set_code_flag (int);
141static void set_16bit_gcc_code_flag (int);
142static void set_intel_syntax (int);
143static void set_intel_mnemonic (int);
144static void set_allow_index_reg (int);
145static void set_sse_check (int);
146static void set_cpu_arch (int);
147#ifdef TE_PE
148static void pe_directive_secrel (int);
149#endif
150static void signed_cons (int);
151static char *output_invalid (int c);
152static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156static int i386_att_operand (char *);
157static int i386_intel_operand (char *, int);
158static int i386_intel_simplify (expressionS *);
159static int i386_intel_parse_name (const char *, expressionS *);
160static const reg_entry *parse_register (char *, char **);
161static char *parse_insn (char *, char *);
162static char *parse_operands (char *, const char *);
163static void swap_operands (void);
164static void swap_2_operands (int, int);
165static void optimize_imm (void);
166static void optimize_disp (void);
167static const insn_template *match_template (void);
168static int check_string (void);
169static int process_suffix (void);
170static int check_byte_reg (void);
171static int check_long_reg (void);
172static int check_qword_reg (void);
173static int check_word_reg (void);
174static int finalize_imm (void);
175static int process_operands (void);
176static const seg_entry *build_modrm_byte (void);
177static void output_insn (void);
178static void output_imm (fragS *, offsetT);
179static void output_disp (fragS *, offsetT);
180#ifndef I386COFF
181static void s_bss (int);
182#endif
183#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184static void handle_large_common (int small ATTRIBUTE_UNUSED);
185#endif
186
187static const char *default_arch = DEFAULT_ARCH;
188
189/* VEX prefix. */
190typedef struct
191{
192 /* VEX prefix is either 2 byte or 3 byte. */
193 unsigned char bytes[3];
194 unsigned int length;
195 /* Destination or source register specifier. */
196 const reg_entry *register_specifier;
197} vex_prefix;
198
199/* 'md_assemble ()' gathers together information and puts it into a
200 i386_insn. */
201
202union i386_op
203 {
204 expressionS *disps;
205 expressionS *imms;
206 const reg_entry *regs;
207 };
208
209enum i386_error
210 {
211 operand_size_mismatch,
212 operand_type_mismatch,
213 register_type_mismatch,
214 number_of_operands_mismatch,
215 invalid_instruction_suffix,
216 bad_imm4,
217 old_gcc_only,
218 unsupported_with_intel_mnemonic,
219 unsupported_syntax,
220 unsupported
221 };
222
223struct _i386_insn
224 {
225 /* TM holds the template for the insn were currently assembling. */
226 insn_template tm;
227
228 /* SUFFIX holds the instruction size suffix for byte, word, dword
229 or qword, if given. */
230 char suffix;
231
232 /* OPERANDS gives the number of given operands. */
233 unsigned int operands;
234
235 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
236 of given register, displacement, memory operands and immediate
237 operands. */
238 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
239
240 /* TYPES [i] is the type (see above #defines) which tells us how to
241 use OP[i] for the corresponding operand. */
242 i386_operand_type types[MAX_OPERANDS];
243
244 /* Displacement expression, immediate expression, or register for each
245 operand. */
246 union i386_op op[MAX_OPERANDS];
247
248 /* Flags for operands. */
249 unsigned int flags[MAX_OPERANDS];
250#define Operand_PCrel 1
251
252 /* Relocation type for operand */
253 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
254
255 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
256 the base index byte below. */
257 const reg_entry *base_reg;
258 const reg_entry *index_reg;
259 unsigned int log2_scale_factor;
260
261 /* SEG gives the seg_entries of this insn. They are zero unless
262 explicit segment overrides are given. */
263 const seg_entry *seg[2];
264
265 /* PREFIX holds all the given prefix opcodes (usually null).
266 PREFIXES is the number of prefix opcodes. */
267 unsigned int prefixes;
268 unsigned char prefix[MAX_PREFIXES];
269
270 /* RM and SIB are the modrm byte and the sib byte where the
271 addressing modes of this insn are encoded. */
272 modrm_byte rm;
273 rex_byte rex;
274 sib_byte sib;
275 vex_prefix vex;
276
277 /* Swap operand in encoding. */
278 unsigned int swap_operand;
279
280 /* Force 32bit displacement in encoding. */
281 unsigned int disp32_encoding;
282
283 /* Error message. */
284 enum i386_error error;
285 };
286
287typedef struct _i386_insn i386_insn;
288
289/* List of chars besides those in app.c:symbol_chars that can start an
290 operand. Used to prevent the scrubber eating vital white-space. */
291const char extra_symbol_chars[] = "*%-(["
292#ifdef LEX_AT
293 "@"
294#endif
295#ifdef LEX_QM
296 "?"
297#endif
298 ;
299
300#if (defined (TE_I386AIX) \
301 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
302 && !defined (TE_GNU) \
303 && !defined (TE_LINUX) \
304 && !defined (TE_NETWARE) \
305 && !defined (TE_FreeBSD) \
306 && !defined (TE_NetBSD)))
307/* This array holds the chars that always start a comment. If the
308 pre-processor is disabled, these aren't very useful. The option
309 --divide will remove '/' from this list. */
310const char *i386_comment_chars = "#/";
311#define SVR4_COMMENT_CHARS 1
312#define PREFIX_SEPARATOR '\\'
313
314#else
315const char *i386_comment_chars = "#";
316#define PREFIX_SEPARATOR '/'
317#endif
318
319/* This array holds the chars that only start a comment at the beginning of
320 a line. If the line seems to have the form '# 123 filename'
321 .line and .file directives will appear in the pre-processed output.
322 Note that input_file.c hand checks for '#' at the beginning of the
323 first line of the input file. This is because the compiler outputs
324 #NO_APP at the beginning of its output.
325 Also note that comments started like this one will always work if
326 '/' isn't otherwise defined. */
327const char line_comment_chars[] = "#/";
328
329const char line_separator_chars[] = ";";
330
331/* Chars that can be used to separate mant from exp in floating point
332 nums. */
333const char EXP_CHARS[] = "eE";
334
335/* Chars that mean this number is a floating point constant
336 As in 0f12.456
337 or 0d1.2345e12. */
338const char FLT_CHARS[] = "fFdDxX";
339
340/* Tables for lexical analysis. */
341static char mnemonic_chars[256];
342static char register_chars[256];
343static char operand_chars[256];
344static char identifier_chars[256];
345static char digit_chars[256];
346
347/* Lexical macros. */
348#define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
349#define is_operand_char(x) (operand_chars[(unsigned char) x])
350#define is_register_char(x) (register_chars[(unsigned char) x])
351#define is_space_char(x) ((x) == ' ')
352#define is_identifier_char(x) (identifier_chars[(unsigned char) x])
353#define is_digit_char(x) (digit_chars[(unsigned char) x])
354
355/* All non-digit non-letter characters that may occur in an operand. */
356static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
357
358/* md_assemble() always leaves the strings it's passed unaltered. To
359 effect this we maintain a stack of saved characters that we've smashed
360 with '\0's (indicating end of strings for various sub-fields of the
361 assembler instruction). */
362static char save_stack[32];
363static char *save_stack_p;
364#define END_STRING_AND_SAVE(s) \
365 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
366#define RESTORE_END_STRING(s) \
367 do { *(s) = *--save_stack_p; } while (0)
368
369/* The instruction we're assembling. */
370static i386_insn i;
371
372/* Possible templates for current insn. */
373static const templates *current_templates;
374
375/* Per instruction expressionS buffers: max displacements & immediates. */
376static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
377static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
378
379/* Current operand we are working on. */
380static int this_operand = -1;
381
382/* We support four different modes. FLAG_CODE variable is used to distinguish
383 these. */
384
385enum flag_code {
386 CODE_32BIT,
387 CODE_16BIT,
388 CODE_64BIT };
389
390static enum flag_code flag_code;
391static unsigned int object_64bit;
392static unsigned int disallow_64bit_disp;
393static int use_rela_relocations = 0;
394
395#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
396 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
397 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
398
399/* The ELF ABI to use. */
400enum x86_elf_abi
401{
402 I386_ABI,
403 X86_64_ABI,
404 X86_64_X32_ABI
405};
406
407static enum x86_elf_abi x86_elf_abi = I386_ABI;
408#endif
409
410/* The names used to print error messages. */
411static const char *flag_code_names[] =
412 {
413 "32",
414 "16",
415 "64"
416 };
417
418/* 1 for intel syntax,
419 0 if att syntax. */
420static int intel_syntax = 0;
421
422/* 1 for intel mnemonic,
423 0 if att mnemonic. */
424static int intel_mnemonic = !SYSV386_COMPAT;
425
426/* 1 if support old (<= 2.8.1) versions of gcc. */
427static int old_gcc = OLDGCC_COMPAT;
428
429/* 1 if pseudo registers are permitted. */
430static int allow_pseudo_reg = 0;
431
432/* 1 if register prefix % not required. */
433static int allow_naked_reg = 0;
434
435/* 1 if pseudo index register, eiz/riz, is allowed . */
436static int allow_index_reg = 0;
437
438static enum
439 {
440 sse_check_none = 0,
441 sse_check_warning,
442 sse_check_error
443 }
444sse_check;
445
446/* Register prefix used for error message. */
447static const char *register_prefix = "%";
448
449/* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
450 leave, push, and pop instructions so that gcc has the same stack
451 frame as in 32 bit mode. */
452static char stackop_size = '\0';
453
454/* Non-zero to optimize code alignment. */
455int optimize_align_code = 1;
456
457/* Non-zero to quieten some warnings. */
458static int quiet_warnings = 0;
459
460/* CPU name. */
461static const char *cpu_arch_name = NULL;
462static char *cpu_sub_arch_name = NULL;
463
464/* CPU feature flags. */
465static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
466
467/* If we have selected a cpu we are generating instructions for. */
468static int cpu_arch_tune_set = 0;
469
470/* Cpu we are generating instructions for. */
471enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
472
473/* CPU feature flags of cpu we are generating instructions for. */
474static i386_cpu_flags cpu_arch_tune_flags;
475
476/* CPU instruction set architecture used. */
477enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
478
479/* CPU feature flags of instruction set architecture used. */
480i386_cpu_flags cpu_arch_isa_flags;
481
482/* If set, conditional jumps are not automatically promoted to handle
483 larger than a byte offset. */
484static unsigned int no_cond_jump_promotion = 0;
485
486/* Encode SSE instructions with VEX prefix. */
487static unsigned int sse2avx;
488
489/* Encode scalar AVX instructions with specific vector length. */
490static enum
491 {
492 vex128 = 0,
493 vex256
494 } avxscalar;
495
496/* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
497static symbolS *GOT_symbol;
498
499/* The dwarf2 return column, adjusted for 32 or 64 bit. */
500unsigned int x86_dwarf2_return_column;
501
502/* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
503int x86_cie_data_alignment;
504
505/* Interface to relax_segment.
506 There are 3 major relax states for 386 jump insns because the
507 different types of jumps add different sizes to frags when we're
508 figuring out what sort of jump to choose to reach a given label. */
509
510/* Types. */
511#define UNCOND_JUMP 0
512#define COND_JUMP 1
513#define COND_JUMP86 2
514
515/* Sizes. */
516#define CODE16 1
517#define SMALL 0
518#define SMALL16 (SMALL | CODE16)
519#define BIG 2
520#define BIG16 (BIG | CODE16)
521
522#ifndef INLINE
523#ifdef __GNUC__
524#define INLINE __inline__
525#else
526#define INLINE
527#endif
528#endif
529
530#define ENCODE_RELAX_STATE(type, size) \
531 ((relax_substateT) (((type) << 2) | (size)))
532#define TYPE_FROM_RELAX_STATE(s) \
533 ((s) >> 2)
534#define DISP_SIZE_FROM_RELAX_STATE(s) \
535 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
536
537/* This table is used by relax_frag to promote short jumps to long
538 ones where necessary. SMALL (short) jumps may be promoted to BIG
539 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
540 don't allow a short jump in a 32 bit code segment to be promoted to
541 a 16 bit offset jump because it's slower (requires data size
542 prefix), and doesn't work, unless the destination is in the bottom
543 64k of the code segment (The top 16 bits of eip are zeroed). */
544
545const relax_typeS md_relax_table[] =
546{
547 /* The fields are:
548 1) most positive reach of this state,
549 2) most negative reach of this state,
550 3) how many bytes this mode will have in the variable part of the frag
551 4) which index into the table to try if we can't fit into this one. */
552
553 /* UNCOND_JUMP states. */
554 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
555 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
556 /* dword jmp adds 4 bytes to frag:
557 0 extra opcode bytes, 4 displacement bytes. */
558 {0, 0, 4, 0},
559 /* word jmp adds 2 byte2 to frag:
560 0 extra opcode bytes, 2 displacement bytes. */
561 {0, 0, 2, 0},
562
563 /* COND_JUMP states. */
564 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
565 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
566 /* dword conditionals adds 5 bytes to frag:
567 1 extra opcode byte, 4 displacement bytes. */
568 {0, 0, 5, 0},
569 /* word conditionals add 3 bytes to frag:
570 1 extra opcode byte, 2 displacement bytes. */
571 {0, 0, 3, 0},
572
573 /* COND_JUMP86 states. */
574 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
575 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
576 /* dword conditionals adds 5 bytes to frag:
577 1 extra opcode byte, 4 displacement bytes. */
578 {0, 0, 5, 0},
579 /* word conditionals add 4 bytes to frag:
580 1 displacement byte and a 3 byte long branch insn. */
581 {0, 0, 4, 0}
582};
583
584static const arch_entry cpu_arch[] =
585{
586 /* Do not replace the first two entries - i386_target_format()
587 relies on them being there in this order. */
588 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
589 CPU_GENERIC32_FLAGS, 0, 0 },
590 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
591 CPU_GENERIC64_FLAGS, 0, 0 },
592 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
593 CPU_NONE_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
595 CPU_I186_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
597 CPU_I286_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
599 CPU_I386_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
601 CPU_I486_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
603 CPU_I586_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
605 CPU_I686_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
609 CPU_PENTIUMPRO_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
611 CPU_P2_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
613 CPU_P3_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
615 CPU_P4_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
617 CPU_CORE_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
619 CPU_NOCONA_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
621 CPU_CORE_FLAGS, 1, 0 },
622 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
623 CPU_CORE_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
625 CPU_CORE2_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
627 CPU_CORE2_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
629 CPU_COREI7_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
631 CPU_L1OM_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
633 CPU_K6_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
635 CPU_K6_2_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
637 CPU_ATHLON_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
639 CPU_K8_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
641 CPU_K8_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
643 CPU_K8_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
645 CPU_AMDFAM10_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
647 CPU_BDVER1_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
649 CPU_8087_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
651 CPU_287_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
653 CPU_387_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
655 CPU_ANY87_FLAGS, 0, 1 },
656 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
657 CPU_MMX_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
659 CPU_3DNOWA_FLAGS, 0, 1 },
660 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
661 CPU_SSE_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
663 CPU_SSE2_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
665 CPU_SSE3_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
667 CPU_SSSE3_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
669 CPU_SSE4_1_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
671 CPU_SSE4_2_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
673 CPU_SSE4_2_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
675 CPU_ANY_SSE_FLAGS, 0, 1 },
676 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
677 CPU_AVX_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
679 CPU_ANY_AVX_FLAGS, 0, 1 },
680 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
681 CPU_VMX_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
683 CPU_SMX_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
685 CPU_XSAVE_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
687 CPU_XSAVEOPT_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
689 CPU_AES_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
691 CPU_PCLMUL_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
693 CPU_PCLMUL_FLAGS, 1, 0 },
694 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
695 CPU_FSGSBASE_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
697 CPU_RDRND_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
699 CPU_F16C_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
701 CPU_FMA_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
703 CPU_FMA4_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
705 CPU_XOP_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
707 CPU_LWP_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
709 CPU_MOVBE_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
711 CPU_EPT_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
713 CPU_CLFLUSH_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
715 CPU_NOP_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
717 CPU_SYSCALL_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
719 CPU_RDTSCP_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
721 CPU_3DNOW_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
723 CPU_3DNOWA_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
725 CPU_PADLOCK_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
727 CPU_SVME_FLAGS, 1, 0 },
728 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
729 CPU_SVME_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
731 CPU_SSE4A_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
733 CPU_ABM_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
735 CPU_BMI_FLAGS, 0, 0 },
736};
737
738#ifdef I386COFF
739/* Like s_lcomm_internal in gas/read.c but the alignment string
740 is allowed to be optional. */
741
742static symbolS *
743pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
744{
745 addressT align = 0;
746
747 SKIP_WHITESPACE ();
748
749 if (needs_align
750 && *input_line_pointer == ',')
751 {
752 align = parse_align (needs_align - 1);
753
754 if (align == (addressT) -1)
755 return NULL;
756 }
757 else
758 {
759 if (size >= 8)
760 align = 3;
761 else if (size >= 4)
762 align = 2;
763 else if (size >= 2)
764 align = 1;
765 else
766 align = 0;
767 }
768
769 bss_alloc (symbolP, size, align);
770 return symbolP;
771}
772
773static void
774pe_lcomm (int needs_align)
775{
776 s_comm_internal (needs_align * 2, pe_lcomm_internal);
777}
778#endif
779
780const pseudo_typeS md_pseudo_table[] =
781{
782#if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
783 {"align", s_align_bytes, 0},
784#else
785 {"align", s_align_ptwo, 0},
786#endif
787 {"arch", set_cpu_arch, 0},
788#ifndef I386COFF
789 {"bss", s_bss, 0},
790#else
791 {"lcomm", pe_lcomm, 1},
792#endif
793 {"ffloat", float_cons, 'f'},
794 {"dfloat", float_cons, 'd'},
795 {"tfloat", float_cons, 'x'},
796 {"value", cons, 2},
797 {"slong", signed_cons, 4},
798 {"noopt", s_ignore, 0},
799 {"optim", s_ignore, 0},
800 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
801 {"code16", set_code_flag, CODE_16BIT},
802 {"code32", set_code_flag, CODE_32BIT},
803 {"code64", set_code_flag, CODE_64BIT},
804 {"intel_syntax", set_intel_syntax, 1},
805 {"att_syntax", set_intel_syntax, 0},
806 {"intel_mnemonic", set_intel_mnemonic, 1},
807 {"att_mnemonic", set_intel_mnemonic, 0},
808 {"allow_index_reg", set_allow_index_reg, 1},
809 {"disallow_index_reg", set_allow_index_reg, 0},
810 {"sse_check", set_sse_check, 0},
811#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
812 {"largecomm", handle_large_common, 0},
813#else
814 {"file", (void (*) (int)) dwarf2_directive_file, 0},
815 {"loc", dwarf2_directive_loc, 0},
816 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
817#endif
818#ifdef TE_PE
819 {"secrel32", pe_directive_secrel, 0},
820#endif
821 {0, 0, 0}
822};
823
824/* For interface with expression (). */
825extern char *input_line_pointer;
826
827/* Hash table for instruction mnemonic lookup. */
828static struct hash_control *op_hash;
829
830/* Hash table for register lookup. */
831static struct hash_control *reg_hash;
832\f
833void
834i386_align_code (fragS *fragP, int count)
835{
836 /* Various efficient no-op patterns for aligning code labels.
837 Note: Don't try to assemble the instructions in the comments.
838 0L and 0w are not legal. */
839 static const char f32_1[] =
840 {0x90}; /* nop */
841 static const char f32_2[] =
842 {0x66,0x90}; /* xchg %ax,%ax */
843 static const char f32_3[] =
844 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
845 static const char f32_4[] =
846 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
847 static const char f32_5[] =
848 {0x90, /* nop */
849 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
850 static const char f32_6[] =
851 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
852 static const char f32_7[] =
853 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
854 static const char f32_8[] =
855 {0x90, /* nop */
856 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
857 static const char f32_9[] =
858 {0x89,0xf6, /* movl %esi,%esi */
859 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
860 static const char f32_10[] =
861 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
862 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
863 static const char f32_11[] =
864 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
865 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
866 static const char f32_12[] =
867 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
868 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
869 static const char f32_13[] =
870 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
871 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
872 static const char f32_14[] =
873 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
874 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
875 static const char f16_3[] =
876 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
877 static const char f16_4[] =
878 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
879 static const char f16_5[] =
880 {0x90, /* nop */
881 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
882 static const char f16_6[] =
883 {0x89,0xf6, /* mov %si,%si */
884 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
885 static const char f16_7[] =
886 {0x8d,0x74,0x00, /* lea 0(%si),%si */
887 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
888 static const char f16_8[] =
889 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
890 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
891 static const char jump_31[] =
892 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
893 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
894 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
895 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
896 static const char *const f32_patt[] = {
897 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
898 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
899 };
900 static const char *const f16_patt[] = {
901 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
902 };
903 /* nopl (%[re]ax) */
904 static const char alt_3[] =
905 {0x0f,0x1f,0x00};
906 /* nopl 0(%[re]ax) */
907 static const char alt_4[] =
908 {0x0f,0x1f,0x40,0x00};
909 /* nopl 0(%[re]ax,%[re]ax,1) */
910 static const char alt_5[] =
911 {0x0f,0x1f,0x44,0x00,0x00};
912 /* nopw 0(%[re]ax,%[re]ax,1) */
913 static const char alt_6[] =
914 {0x66,0x0f,0x1f,0x44,0x00,0x00};
915 /* nopl 0L(%[re]ax) */
916 static const char alt_7[] =
917 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
918 /* nopl 0L(%[re]ax,%[re]ax,1) */
919 static const char alt_8[] =
920 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
921 /* nopw 0L(%[re]ax,%[re]ax,1) */
922 static const char alt_9[] =
923 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
924 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
925 static const char alt_10[] =
926 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
927 /* data16
928 nopw %cs:0L(%[re]ax,%[re]ax,1) */
929 static const char alt_long_11[] =
930 {0x66,
931 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
932 /* data16
933 data16
934 nopw %cs:0L(%[re]ax,%[re]ax,1) */
935 static const char alt_long_12[] =
936 {0x66,
937 0x66,
938 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
939 /* data16
940 data16
941 data16
942 nopw %cs:0L(%[re]ax,%[re]ax,1) */
943 static const char alt_long_13[] =
944 {0x66,
945 0x66,
946 0x66,
947 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
948 /* data16
949 data16
950 data16
951 data16
952 nopw %cs:0L(%[re]ax,%[re]ax,1) */
953 static const char alt_long_14[] =
954 {0x66,
955 0x66,
956 0x66,
957 0x66,
958 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
959 /* data16
960 data16
961 data16
962 data16
963 data16
964 nopw %cs:0L(%[re]ax,%[re]ax,1) */
965 static const char alt_long_15[] =
966 {0x66,
967 0x66,
968 0x66,
969 0x66,
970 0x66,
971 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
972 /* nopl 0(%[re]ax,%[re]ax,1)
973 nopw 0(%[re]ax,%[re]ax,1) */
974 static const char alt_short_11[] =
975 {0x0f,0x1f,0x44,0x00,0x00,
976 0x66,0x0f,0x1f,0x44,0x00,0x00};
977 /* nopw 0(%[re]ax,%[re]ax,1)
978 nopw 0(%[re]ax,%[re]ax,1) */
979 static const char alt_short_12[] =
980 {0x66,0x0f,0x1f,0x44,0x00,0x00,
981 0x66,0x0f,0x1f,0x44,0x00,0x00};
982 /* nopw 0(%[re]ax,%[re]ax,1)
983 nopl 0L(%[re]ax) */
984 static const char alt_short_13[] =
985 {0x66,0x0f,0x1f,0x44,0x00,0x00,
986 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
987 /* nopl 0L(%[re]ax)
988 nopl 0L(%[re]ax) */
989 static const char alt_short_14[] =
990 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
991 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
992 /* nopl 0L(%[re]ax)
993 nopl 0L(%[re]ax,%[re]ax,1) */
994 static const char alt_short_15[] =
995 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
996 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
997 static const char *const alt_short_patt[] = {
998 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
999 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1000 alt_short_14, alt_short_15
1001 };
1002 static const char *const alt_long_patt[] = {
1003 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1004 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1005 alt_long_14, alt_long_15
1006 };
1007
1008 /* Only align for at least a positive non-zero boundary. */
1009 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1010 return;
1011
1012 /* We need to decide which NOP sequence to use for 32bit and
1013 64bit. When -mtune= is used:
1014
1015 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1016 PROCESSOR_GENERIC32, f32_patt will be used.
1017 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1018 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1019 PROCESSOR_GENERIC64, alt_long_patt will be used.
1020 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1021 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
1022 will be used.
1023
1024 When -mtune= isn't used, alt_long_patt will be used if
1025 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1026 be used.
1027
1028 When -march= or .arch is used, we can't use anything beyond
1029 cpu_arch_isa_flags. */
1030
1031 if (flag_code == CODE_16BIT)
1032 {
1033 if (count > 8)
1034 {
1035 memcpy (fragP->fr_literal + fragP->fr_fix,
1036 jump_31, count);
1037 /* Adjust jump offset. */
1038 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1039 }
1040 else
1041 memcpy (fragP->fr_literal + fragP->fr_fix,
1042 f16_patt[count - 1], count);
1043 }
1044 else
1045 {
1046 const char *const *patt = NULL;
1047
1048 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1049 {
1050 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1051 switch (cpu_arch_tune)
1052 {
1053 case PROCESSOR_UNKNOWN:
1054 /* We use cpu_arch_isa_flags to check if we SHOULD
1055 optimize with nops. */
1056 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1057 patt = alt_long_patt;
1058 else
1059 patt = f32_patt;
1060 break;
1061 case PROCESSOR_PENTIUMPRO:
1062 case PROCESSOR_PENTIUM4:
1063 case PROCESSOR_NOCONA:
1064 case PROCESSOR_CORE:
1065 case PROCESSOR_CORE2:
1066 case PROCESSOR_COREI7:
1067 case PROCESSOR_L1OM:
1068 case PROCESSOR_GENERIC64:
1069 patt = alt_long_patt;
1070 break;
1071 case PROCESSOR_K6:
1072 case PROCESSOR_ATHLON:
1073 case PROCESSOR_K8:
1074 case PROCESSOR_AMDFAM10:
1075 case PROCESSOR_BDVER1:
1076 patt = alt_short_patt;
1077 break;
1078 case PROCESSOR_I386:
1079 case PROCESSOR_I486:
1080 case PROCESSOR_PENTIUM:
1081 case PROCESSOR_GENERIC32:
1082 patt = f32_patt;
1083 break;
1084 }
1085 }
1086 else
1087 {
1088 switch (fragP->tc_frag_data.tune)
1089 {
1090 case PROCESSOR_UNKNOWN:
1091 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1092 PROCESSOR_UNKNOWN. */
1093 abort ();
1094 break;
1095
1096 case PROCESSOR_I386:
1097 case PROCESSOR_I486:
1098 case PROCESSOR_PENTIUM:
1099 case PROCESSOR_K6:
1100 case PROCESSOR_ATHLON:
1101 case PROCESSOR_K8:
1102 case PROCESSOR_AMDFAM10:
1103 case PROCESSOR_BDVER1:
1104 case PROCESSOR_GENERIC32:
1105 /* We use cpu_arch_isa_flags to check if we CAN optimize
1106 with nops. */
1107 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1108 patt = alt_short_patt;
1109 else
1110 patt = f32_patt;
1111 break;
1112 case PROCESSOR_PENTIUMPRO:
1113 case PROCESSOR_PENTIUM4:
1114 case PROCESSOR_NOCONA:
1115 case PROCESSOR_CORE:
1116 case PROCESSOR_CORE2:
1117 case PROCESSOR_COREI7:
1118 case PROCESSOR_L1OM:
1119 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1120 patt = alt_long_patt;
1121 else
1122 patt = f32_patt;
1123 break;
1124 case PROCESSOR_GENERIC64:
1125 patt = alt_long_patt;
1126 break;
1127 }
1128 }
1129
1130 if (patt == f32_patt)
1131 {
1132 /* If the padding is less than 15 bytes, we use the normal
1133 ones. Otherwise, we use a jump instruction and adjust
1134 its offset. */
1135 int limit;
1136
1137 /* For 64bit, the limit is 3 bytes. */
1138 if (flag_code == CODE_64BIT
1139 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1140 limit = 3;
1141 else
1142 limit = 15;
1143 if (count < limit)
1144 memcpy (fragP->fr_literal + fragP->fr_fix,
1145 patt[count - 1], count);
1146 else
1147 {
1148 memcpy (fragP->fr_literal + fragP->fr_fix,
1149 jump_31, count);
1150 /* Adjust jump offset. */
1151 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1152 }
1153 }
1154 else
1155 {
1156 /* Maximum length of an instruction is 15 byte. If the
1157 padding is greater than 15 bytes and we don't use jump,
1158 we have to break it into smaller pieces. */
1159 int padding = count;
1160 while (padding > 15)
1161 {
1162 padding -= 15;
1163 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1164 patt [14], 15);
1165 }
1166
1167 if (padding)
1168 memcpy (fragP->fr_literal + fragP->fr_fix,
1169 patt [padding - 1], padding);
1170 }
1171 }
1172 fragP->fr_var = count;
1173}
1174
1175static INLINE int
1176operand_type_all_zero (const union i386_operand_type *x)
1177{
1178 switch (ARRAY_SIZE(x->array))
1179 {
1180 case 3:
1181 if (x->array[2])
1182 return 0;
1183 case 2:
1184 if (x->array[1])
1185 return 0;
1186 case 1:
1187 return !x->array[0];
1188 default:
1189 abort ();
1190 }
1191}
1192
1193static INLINE void
1194operand_type_set (union i386_operand_type *x, unsigned int v)
1195{
1196 switch (ARRAY_SIZE(x->array))
1197 {
1198 case 3:
1199 x->array[2] = v;
1200 case 2:
1201 x->array[1] = v;
1202 case 1:
1203 x->array[0] = v;
1204 break;
1205 default:
1206 abort ();
1207 }
1208}
1209
1210static INLINE int
1211operand_type_equal (const union i386_operand_type *x,
1212 const union i386_operand_type *y)
1213{
1214 switch (ARRAY_SIZE(x->array))
1215 {
1216 case 3:
1217 if (x->array[2] != y->array[2])
1218 return 0;
1219 case 2:
1220 if (x->array[1] != y->array[1])
1221 return 0;
1222 case 1:
1223 return x->array[0] == y->array[0];
1224 break;
1225 default:
1226 abort ();
1227 }
1228}
1229
1230static INLINE int
1231cpu_flags_all_zero (const union i386_cpu_flags *x)
1232{
1233 switch (ARRAY_SIZE(x->array))
1234 {
1235 case 3:
1236 if (x->array[2])
1237 return 0;
1238 case 2:
1239 if (x->array[1])
1240 return 0;
1241 case 1:
1242 return !x->array[0];
1243 default:
1244 abort ();
1245 }
1246}
1247
1248static INLINE void
1249cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1250{
1251 switch (ARRAY_SIZE(x->array))
1252 {
1253 case 3:
1254 x->array[2] = v;
1255 case 2:
1256 x->array[1] = v;
1257 case 1:
1258 x->array[0] = v;
1259 break;
1260 default:
1261 abort ();
1262 }
1263}
1264
1265static INLINE int
1266cpu_flags_equal (const union i386_cpu_flags *x,
1267 const union i386_cpu_flags *y)
1268{
1269 switch (ARRAY_SIZE(x->array))
1270 {
1271 case 3:
1272 if (x->array[2] != y->array[2])
1273 return 0;
1274 case 2:
1275 if (x->array[1] != y->array[1])
1276 return 0;
1277 case 1:
1278 return x->array[0] == y->array[0];
1279 break;
1280 default:
1281 abort ();
1282 }
1283}
1284
1285static INLINE int
1286cpu_flags_check_cpu64 (i386_cpu_flags f)
1287{
1288 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1289 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1290}
1291
1292static INLINE i386_cpu_flags
1293cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1294{
1295 switch (ARRAY_SIZE (x.array))
1296 {
1297 case 3:
1298 x.array [2] &= y.array [2];
1299 case 2:
1300 x.array [1] &= y.array [1];
1301 case 1:
1302 x.array [0] &= y.array [0];
1303 break;
1304 default:
1305 abort ();
1306 }
1307 return x;
1308}
1309
1310static INLINE i386_cpu_flags
1311cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1312{
1313 switch (ARRAY_SIZE (x.array))
1314 {
1315 case 3:
1316 x.array [2] |= y.array [2];
1317 case 2:
1318 x.array [1] |= y.array [1];
1319 case 1:
1320 x.array [0] |= y.array [0];
1321 break;
1322 default:
1323 abort ();
1324 }
1325 return x;
1326}
1327
1328static INLINE i386_cpu_flags
1329cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1330{
1331 switch (ARRAY_SIZE (x.array))
1332 {
1333 case 3:
1334 x.array [2] &= ~y.array [2];
1335 case 2:
1336 x.array [1] &= ~y.array [1];
1337 case 1:
1338 x.array [0] &= ~y.array [0];
1339 break;
1340 default:
1341 abort ();
1342 }
1343 return x;
1344}
1345
1346#define CPU_FLAGS_ARCH_MATCH 0x1
1347#define CPU_FLAGS_64BIT_MATCH 0x2
1348#define CPU_FLAGS_AES_MATCH 0x4
1349#define CPU_FLAGS_PCLMUL_MATCH 0x8
1350#define CPU_FLAGS_AVX_MATCH 0x10
1351
1352#define CPU_FLAGS_32BIT_MATCH \
1353 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1354 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1355#define CPU_FLAGS_PERFECT_MATCH \
1356 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1357
1358/* Return CPU flags match bits. */
1359
1360static int
1361cpu_flags_match (const insn_template *t)
1362{
1363 i386_cpu_flags x = t->cpu_flags;
1364 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1365
1366 x.bitfield.cpu64 = 0;
1367 x.bitfield.cpuno64 = 0;
1368
1369 if (cpu_flags_all_zero (&x))
1370 {
1371 /* This instruction is available on all archs. */
1372 match |= CPU_FLAGS_32BIT_MATCH;
1373 }
1374 else
1375 {
1376 /* This instruction is available only on some archs. */
1377 i386_cpu_flags cpu = cpu_arch_flags;
1378
1379 cpu.bitfield.cpu64 = 0;
1380 cpu.bitfield.cpuno64 = 0;
1381 cpu = cpu_flags_and (x, cpu);
1382 if (!cpu_flags_all_zero (&cpu))
1383 {
1384 if (x.bitfield.cpuavx)
1385 {
1386 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1387 if (cpu.bitfield.cpuavx)
1388 {
1389 /* Check SSE2AVX. */
1390 if (!t->opcode_modifier.sse2avx|| sse2avx)
1391 {
1392 match |= (CPU_FLAGS_ARCH_MATCH
1393 | CPU_FLAGS_AVX_MATCH);
1394 /* Check AES. */
1395 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1396 match |= CPU_FLAGS_AES_MATCH;
1397 /* Check PCLMUL. */
1398 if (!x.bitfield.cpupclmul
1399 || cpu.bitfield.cpupclmul)
1400 match |= CPU_FLAGS_PCLMUL_MATCH;
1401 }
1402 }
1403 else
1404 match |= CPU_FLAGS_ARCH_MATCH;
1405 }
1406 else
1407 match |= CPU_FLAGS_32BIT_MATCH;
1408 }
1409 }
1410 return match;
1411}
1412
1413static INLINE i386_operand_type
1414operand_type_and (i386_operand_type x, i386_operand_type y)
1415{
1416 switch (ARRAY_SIZE (x.array))
1417 {
1418 case 3:
1419 x.array [2] &= y.array [2];
1420 case 2:
1421 x.array [1] &= y.array [1];
1422 case 1:
1423 x.array [0] &= y.array [0];
1424 break;
1425 default:
1426 abort ();
1427 }
1428 return x;
1429}
1430
1431static INLINE i386_operand_type
1432operand_type_or (i386_operand_type x, i386_operand_type y)
1433{
1434 switch (ARRAY_SIZE (x.array))
1435 {
1436 case 3:
1437 x.array [2] |= y.array [2];
1438 case 2:
1439 x.array [1] |= y.array [1];
1440 case 1:
1441 x.array [0] |= y.array [0];
1442 break;
1443 default:
1444 abort ();
1445 }
1446 return x;
1447}
1448
1449static INLINE i386_operand_type
1450operand_type_xor (i386_operand_type x, i386_operand_type y)
1451{
1452 switch (ARRAY_SIZE (x.array))
1453 {
1454 case 3:
1455 x.array [2] ^= y.array [2];
1456 case 2:
1457 x.array [1] ^= y.array [1];
1458 case 1:
1459 x.array [0] ^= y.array [0];
1460 break;
1461 default:
1462 abort ();
1463 }
1464 return x;
1465}
1466
1467static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1468static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1469static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1470static const i386_operand_type inoutportreg
1471 = OPERAND_TYPE_INOUTPORTREG;
1472static const i386_operand_type reg16_inoutportreg
1473 = OPERAND_TYPE_REG16_INOUTPORTREG;
1474static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1475static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1476static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1477static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1478static const i386_operand_type anydisp
1479 = OPERAND_TYPE_ANYDISP;
1480static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1481static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1482static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1483static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1484static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1485static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1486static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1487static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1488static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1489static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1490static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1491static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1492
1493enum operand_type
1494{
1495 reg,
1496 imm,
1497 disp,
1498 anymem
1499};
1500
1501static INLINE int
1502operand_type_check (i386_operand_type t, enum operand_type c)
1503{
1504 switch (c)
1505 {
1506 case reg:
1507 return (t.bitfield.reg8
1508 || t.bitfield.reg16
1509 || t.bitfield.reg32
1510 || t.bitfield.reg64);
1511
1512 case imm:
1513 return (t.bitfield.imm8
1514 || t.bitfield.imm8s
1515 || t.bitfield.imm16
1516 || t.bitfield.imm32
1517 || t.bitfield.imm32s
1518 || t.bitfield.imm64);
1519
1520 case disp:
1521 return (t.bitfield.disp8
1522 || t.bitfield.disp16
1523 || t.bitfield.disp32
1524 || t.bitfield.disp32s
1525 || t.bitfield.disp64);
1526
1527 case anymem:
1528 return (t.bitfield.disp8
1529 || t.bitfield.disp16
1530 || t.bitfield.disp32
1531 || t.bitfield.disp32s
1532 || t.bitfield.disp64
1533 || t.bitfield.baseindex);
1534
1535 default:
1536 abort ();
1537 }
1538
1539 return 0;
1540}
1541
1542/* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1543 operand J for instruction template T. */
1544
1545static INLINE int
1546match_reg_size (const insn_template *t, unsigned int j)
1547{
1548 return !((i.types[j].bitfield.byte
1549 && !t->operand_types[j].bitfield.byte)
1550 || (i.types[j].bitfield.word
1551 && !t->operand_types[j].bitfield.word)
1552 || (i.types[j].bitfield.dword
1553 && !t->operand_types[j].bitfield.dword)
1554 || (i.types[j].bitfield.qword
1555 && !t->operand_types[j].bitfield.qword));
1556}
1557
1558/* Return 1 if there is no conflict in any size on operand J for
1559 instruction template T. */
1560
1561static INLINE int
1562match_mem_size (const insn_template *t, unsigned int j)
1563{
1564 return (match_reg_size (t, j)
1565 && !((i.types[j].bitfield.unspecified
1566 && !t->operand_types[j].bitfield.unspecified)
1567 || (i.types[j].bitfield.fword
1568 && !t->operand_types[j].bitfield.fword)
1569 || (i.types[j].bitfield.tbyte
1570 && !t->operand_types[j].bitfield.tbyte)
1571 || (i.types[j].bitfield.xmmword
1572 && !t->operand_types[j].bitfield.xmmword)
1573 || (i.types[j].bitfield.ymmword
1574 && !t->operand_types[j].bitfield.ymmword)));
1575}
1576
1577/* Return 1 if there is no size conflict on any operands for
1578 instruction template T. */
1579
1580static INLINE int
1581operand_size_match (const insn_template *t)
1582{
1583 unsigned int j;
1584 int match = 1;
1585
1586 /* Don't check jump instructions. */
1587 if (t->opcode_modifier.jump
1588 || t->opcode_modifier.jumpbyte
1589 || t->opcode_modifier.jumpdword
1590 || t->opcode_modifier.jumpintersegment)
1591 return match;
1592
1593 /* Check memory and accumulator operand size. */
1594 for (j = 0; j < i.operands; j++)
1595 {
1596 if (t->operand_types[j].bitfield.anysize)
1597 continue;
1598
1599 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1600 {
1601 match = 0;
1602 break;
1603 }
1604
1605 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1606 {
1607 match = 0;
1608 break;
1609 }
1610 }
1611
1612 if (match)
1613 return match;
1614 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1615 {
1616mismatch:
1617 i.error = operand_size_mismatch;
1618 return 0;
1619 }
1620
1621 /* Check reverse. */
1622 gas_assert (i.operands == 2);
1623
1624 match = 1;
1625 for (j = 0; j < 2; j++)
1626 {
1627 if (t->operand_types[j].bitfield.acc
1628 && !match_reg_size (t, j ? 0 : 1))
1629 goto mismatch;
1630
1631 if (i.types[j].bitfield.mem
1632 && !match_mem_size (t, j ? 0 : 1))
1633 goto mismatch;
1634 }
1635
1636 return match;
1637}
1638
1639static INLINE int
1640operand_type_match (i386_operand_type overlap,
1641 i386_operand_type given)
1642{
1643 i386_operand_type temp = overlap;
1644
1645 temp.bitfield.jumpabsolute = 0;
1646 temp.bitfield.unspecified = 0;
1647 temp.bitfield.byte = 0;
1648 temp.bitfield.word = 0;
1649 temp.bitfield.dword = 0;
1650 temp.bitfield.fword = 0;
1651 temp.bitfield.qword = 0;
1652 temp.bitfield.tbyte = 0;
1653 temp.bitfield.xmmword = 0;
1654 temp.bitfield.ymmword = 0;
1655 if (operand_type_all_zero (&temp))
1656 goto mismatch;
1657
1658 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1659 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1660 return 1;
1661
1662mismatch:
1663 i.error = operand_type_mismatch;
1664 return 0;
1665}
1666
1667/* If given types g0 and g1 are registers they must be of the same type
1668 unless the expected operand type register overlap is null.
1669 Note that Acc in a template matches every size of reg. */
1670
1671static INLINE int
1672operand_type_register_match (i386_operand_type m0,
1673 i386_operand_type g0,
1674 i386_operand_type t0,
1675 i386_operand_type m1,
1676 i386_operand_type g1,
1677 i386_operand_type t1)
1678{
1679 if (!operand_type_check (g0, reg))
1680 return 1;
1681
1682 if (!operand_type_check (g1, reg))
1683 return 1;
1684
1685 if (g0.bitfield.reg8 == g1.bitfield.reg8
1686 && g0.bitfield.reg16 == g1.bitfield.reg16
1687 && g0.bitfield.reg32 == g1.bitfield.reg32
1688 && g0.bitfield.reg64 == g1.bitfield.reg64)
1689 return 1;
1690
1691 if (m0.bitfield.acc)
1692 {
1693 t0.bitfield.reg8 = 1;
1694 t0.bitfield.reg16 = 1;
1695 t0.bitfield.reg32 = 1;
1696 t0.bitfield.reg64 = 1;
1697 }
1698
1699 if (m1.bitfield.acc)
1700 {
1701 t1.bitfield.reg8 = 1;
1702 t1.bitfield.reg16 = 1;
1703 t1.bitfield.reg32 = 1;
1704 t1.bitfield.reg64 = 1;
1705 }
1706
1707 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1708 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1709 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1710 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1711 return 1;
1712
1713 i.error = register_type_mismatch;
1714
1715 return 0;
1716}
1717
1718static INLINE unsigned int
1719mode_from_disp_size (i386_operand_type t)
1720{
1721 if (t.bitfield.disp8)
1722 return 1;
1723 else if (t.bitfield.disp16
1724 || t.bitfield.disp32
1725 || t.bitfield.disp32s)
1726 return 2;
1727 else
1728 return 0;
1729}
1730
1731static INLINE int
1732fits_in_signed_byte (offsetT num)
1733{
1734 return (num >= -128) && (num <= 127);
1735}
1736
1737static INLINE int
1738fits_in_unsigned_byte (offsetT num)
1739{
1740 return (num & 0xff) == num;
1741}
1742
1743static INLINE int
1744fits_in_unsigned_word (offsetT num)
1745{
1746 return (num & 0xffff) == num;
1747}
1748
1749static INLINE int
1750fits_in_signed_word (offsetT num)
1751{
1752 return (-32768 <= num) && (num <= 32767);
1753}
1754
1755static INLINE int
1756fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1757{
1758#ifndef BFD64
1759 return 1;
1760#else
1761 return (!(((offsetT) -1 << 31) & num)
1762 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1763#endif
1764} /* fits_in_signed_long() */
1765
1766static INLINE int
1767fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1768{
1769#ifndef BFD64
1770 return 1;
1771#else
1772 return (num & (((offsetT) 2 << 31) - 1)) == num;
1773#endif
1774} /* fits_in_unsigned_long() */
1775
1776static INLINE int
1777fits_in_imm4 (offsetT num)
1778{
1779 return (num & 0xf) == num;
1780}
1781
1782static i386_operand_type
1783smallest_imm_type (offsetT num)
1784{
1785 i386_operand_type t;
1786
1787 operand_type_set (&t, 0);
1788 t.bitfield.imm64 = 1;
1789
1790 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1791 {
1792 /* This code is disabled on the 486 because all the Imm1 forms
1793 in the opcode table are slower on the i486. They're the
1794 versions with the implicitly specified single-position
1795 displacement, which has another syntax if you really want to
1796 use that form. */
1797 t.bitfield.imm1 = 1;
1798 t.bitfield.imm8 = 1;
1799 t.bitfield.imm8s = 1;
1800 t.bitfield.imm16 = 1;
1801 t.bitfield.imm32 = 1;
1802 t.bitfield.imm32s = 1;
1803 }
1804 else if (fits_in_signed_byte (num))
1805 {
1806 t.bitfield.imm8 = 1;
1807 t.bitfield.imm8s = 1;
1808 t.bitfield.imm16 = 1;
1809 t.bitfield.imm32 = 1;
1810 t.bitfield.imm32s = 1;
1811 }
1812 else if (fits_in_unsigned_byte (num))
1813 {
1814 t.bitfield.imm8 = 1;
1815 t.bitfield.imm16 = 1;
1816 t.bitfield.imm32 = 1;
1817 t.bitfield.imm32s = 1;
1818 }
1819 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1820 {
1821 t.bitfield.imm16 = 1;
1822 t.bitfield.imm32 = 1;
1823 t.bitfield.imm32s = 1;
1824 }
1825 else if (fits_in_signed_long (num))
1826 {
1827 t.bitfield.imm32 = 1;
1828 t.bitfield.imm32s = 1;
1829 }
1830 else if (fits_in_unsigned_long (num))
1831 t.bitfield.imm32 = 1;
1832
1833 return t;
1834}
1835
1836static offsetT
1837offset_in_range (offsetT val, int size)
1838{
1839 addressT mask;
1840
1841 switch (size)
1842 {
1843 case 1: mask = ((addressT) 1 << 8) - 1; break;
1844 case 2: mask = ((addressT) 1 << 16) - 1; break;
1845 case 4: mask = ((addressT) 2 << 31) - 1; break;
1846#ifdef BFD64
1847 case 8: mask = ((addressT) 2 << 63) - 1; break;
1848#endif
1849 default: abort ();
1850 }
1851
1852#ifdef BFD64
1853 /* If BFD64, sign extend val for 32bit address mode. */
1854 if (flag_code != CODE_64BIT
1855 || i.prefix[ADDR_PREFIX])
1856 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1857 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1858#endif
1859
1860 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1861 {
1862 char buf1[40], buf2[40];
1863
1864 sprint_value (buf1, val);
1865 sprint_value (buf2, val & mask);
1866 as_warn (_("%s shortened to %s"), buf1, buf2);
1867 }
1868 return val & mask;
1869}
1870
1871enum PREFIX_GROUP
1872{
1873 PREFIX_EXIST = 0,
1874 PREFIX_LOCK,
1875 PREFIX_REP,
1876 PREFIX_OTHER
1877};
1878
1879/* Returns
1880 a. PREFIX_EXIST if attempting to add a prefix where one from the
1881 same class already exists.
1882 b. PREFIX_LOCK if lock prefix is added.
1883 c. PREFIX_REP if rep/repne prefix is added.
1884 d. PREFIX_OTHER if other prefix is added.
1885 */
1886
1887static enum PREFIX_GROUP
1888add_prefix (unsigned int prefix)
1889{
1890 enum PREFIX_GROUP ret = PREFIX_OTHER;
1891 unsigned int q;
1892
1893 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1894 && flag_code == CODE_64BIT)
1895 {
1896 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1897 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1898 && (prefix & (REX_R | REX_X | REX_B))))
1899 ret = PREFIX_EXIST;
1900 q = REX_PREFIX;
1901 }
1902 else
1903 {
1904 switch (prefix)
1905 {
1906 default:
1907 abort ();
1908
1909 case CS_PREFIX_OPCODE:
1910 case DS_PREFIX_OPCODE:
1911 case ES_PREFIX_OPCODE:
1912 case FS_PREFIX_OPCODE:
1913 case GS_PREFIX_OPCODE:
1914 case SS_PREFIX_OPCODE:
1915 q = SEG_PREFIX;
1916 break;
1917
1918 case REPNE_PREFIX_OPCODE:
1919 case REPE_PREFIX_OPCODE:
1920 q = REP_PREFIX;
1921 ret = PREFIX_REP;
1922 break;
1923
1924 case LOCK_PREFIX_OPCODE:
1925 q = LOCK_PREFIX;
1926 ret = PREFIX_LOCK;
1927 break;
1928
1929 case FWAIT_OPCODE:
1930 q = WAIT_PREFIX;
1931 break;
1932
1933 case ADDR_PREFIX_OPCODE:
1934 q = ADDR_PREFIX;
1935 break;
1936
1937 case DATA_PREFIX_OPCODE:
1938 q = DATA_PREFIX;
1939 break;
1940 }
1941 if (i.prefix[q] != 0)
1942 ret = PREFIX_EXIST;
1943 }
1944
1945 if (ret)
1946 {
1947 if (!i.prefix[q])
1948 ++i.prefixes;
1949 i.prefix[q] |= prefix;
1950 }
1951 else
1952 as_bad (_("same type of prefix used twice"));
1953
1954 return ret;
1955}
1956
1957static void
1958update_code_flag (int value, int check)
1959{
1960 PRINTF_LIKE ((*as_error));
1961
1962 flag_code = (enum flag_code) value;
1963 if (flag_code == CODE_64BIT)
1964 {
1965 cpu_arch_flags.bitfield.cpu64 = 1;
1966 cpu_arch_flags.bitfield.cpuno64 = 0;
1967 }
1968 else
1969 {
1970 cpu_arch_flags.bitfield.cpu64 = 0;
1971 cpu_arch_flags.bitfield.cpuno64 = 1;
1972 }
1973 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1974 {
1975 if (check)
1976 as_error = as_fatal;
1977 else
1978 as_error = as_bad;
1979 (*as_error) (_("64bit mode not supported on `%s'."),
1980 cpu_arch_name ? cpu_arch_name : default_arch);
1981 }
1982 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1983 {
1984 if (check)
1985 as_error = as_fatal;
1986 else
1987 as_error = as_bad;
1988 (*as_error) (_("32bit mode not supported on `%s'."),
1989 cpu_arch_name ? cpu_arch_name : default_arch);
1990 }
1991 stackop_size = '\0';
1992}
1993
1994static void
1995set_code_flag (int value)
1996{
1997 update_code_flag (value, 0);
1998}
1999
2000static void
2001set_16bit_gcc_code_flag (int new_code_flag)
2002{
2003 flag_code = (enum flag_code) new_code_flag;
2004 if (flag_code != CODE_16BIT)
2005 abort ();
2006 cpu_arch_flags.bitfield.cpu64 = 0;
2007 cpu_arch_flags.bitfield.cpuno64 = 1;
2008 stackop_size = LONG_MNEM_SUFFIX;
2009}
2010
2011static void
2012set_intel_syntax (int syntax_flag)
2013{
2014 /* Find out if register prefixing is specified. */
2015 int ask_naked_reg = 0;
2016
2017 SKIP_WHITESPACE ();
2018 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2019 {
2020 char *string = input_line_pointer;
2021 int e = get_symbol_end ();
2022
2023 if (strcmp (string, "prefix") == 0)
2024 ask_naked_reg = 1;
2025 else if (strcmp (string, "noprefix") == 0)
2026 ask_naked_reg = -1;
2027 else
2028 as_bad (_("bad argument to syntax directive."));
2029 *input_line_pointer = e;
2030 }
2031 demand_empty_rest_of_line ();
2032
2033 intel_syntax = syntax_flag;
2034
2035 if (ask_naked_reg == 0)
2036 allow_naked_reg = (intel_syntax
2037 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2038 else
2039 allow_naked_reg = (ask_naked_reg < 0);
2040
2041 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2042
2043 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2044 identifier_chars['$'] = intel_syntax ? '$' : 0;
2045 register_prefix = allow_naked_reg ? "" : "%";
2046}
2047
2048static void
2049set_intel_mnemonic (int mnemonic_flag)
2050{
2051 intel_mnemonic = mnemonic_flag;
2052}
2053
2054static void
2055set_allow_index_reg (int flag)
2056{
2057 allow_index_reg = flag;
2058}
2059
2060static void
2061set_sse_check (int dummy ATTRIBUTE_UNUSED)
2062{
2063 SKIP_WHITESPACE ();
2064
2065 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2066 {
2067 char *string = input_line_pointer;
2068 int e = get_symbol_end ();
2069
2070 if (strcmp (string, "none") == 0)
2071 sse_check = sse_check_none;
2072 else if (strcmp (string, "warning") == 0)
2073 sse_check = sse_check_warning;
2074 else if (strcmp (string, "error") == 0)
2075 sse_check = sse_check_error;
2076 else
2077 as_bad (_("bad argument to sse_check directive."));
2078 *input_line_pointer = e;
2079 }
2080 else
2081 as_bad (_("missing argument for sse_check directive"));
2082
2083 demand_empty_rest_of_line ();
2084}
2085
2086static void
2087check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2088 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2089{
2090#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2091 static const char *arch;
2092
2093 /* Intel LIOM is only supported on ELF. */
2094 if (!IS_ELF)
2095 return;
2096
2097 if (!arch)
2098 {
2099 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2100 use default_arch. */
2101 arch = cpu_arch_name;
2102 if (!arch)
2103 arch = default_arch;
2104 }
2105
2106 /* If we are targeting Intel L1OM, we must enable it. */
2107 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2108 || new_flag.bitfield.cpul1om)
2109 return;
2110
2111 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2112#endif
2113}
2114
2115static void
2116set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2117{
2118 SKIP_WHITESPACE ();
2119
2120 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2121 {
2122 char *string = input_line_pointer;
2123 int e = get_symbol_end ();
2124 unsigned int j;
2125 i386_cpu_flags flags;
2126
2127 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2128 {
2129 if (strcmp (string, cpu_arch[j].name) == 0)
2130 {
2131 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2132
2133 if (*string != '.')
2134 {
2135 cpu_arch_name = cpu_arch[j].name;
2136 cpu_sub_arch_name = NULL;
2137 cpu_arch_flags = cpu_arch[j].flags;
2138 if (flag_code == CODE_64BIT)
2139 {
2140 cpu_arch_flags.bitfield.cpu64 = 1;
2141 cpu_arch_flags.bitfield.cpuno64 = 0;
2142 }
2143 else
2144 {
2145 cpu_arch_flags.bitfield.cpu64 = 0;
2146 cpu_arch_flags.bitfield.cpuno64 = 1;
2147 }
2148 cpu_arch_isa = cpu_arch[j].type;
2149 cpu_arch_isa_flags = cpu_arch[j].flags;
2150 if (!cpu_arch_tune_set)
2151 {
2152 cpu_arch_tune = cpu_arch_isa;
2153 cpu_arch_tune_flags = cpu_arch_isa_flags;
2154 }
2155 break;
2156 }
2157
2158 if (!cpu_arch[j].negated)
2159 flags = cpu_flags_or (cpu_arch_flags,
2160 cpu_arch[j].flags);
2161 else
2162 flags = cpu_flags_and_not (cpu_arch_flags,
2163 cpu_arch[j].flags);
2164 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2165 {
2166 if (cpu_sub_arch_name)
2167 {
2168 char *name = cpu_sub_arch_name;
2169 cpu_sub_arch_name = concat (name,
2170 cpu_arch[j].name,
2171 (const char *) NULL);
2172 free (name);
2173 }
2174 else
2175 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2176 cpu_arch_flags = flags;
2177 }
2178 *input_line_pointer = e;
2179 demand_empty_rest_of_line ();
2180 return;
2181 }
2182 }
2183 if (j >= ARRAY_SIZE (cpu_arch))
2184 as_bad (_("no such architecture: `%s'"), string);
2185
2186 *input_line_pointer = e;
2187 }
2188 else
2189 as_bad (_("missing cpu architecture"));
2190
2191 no_cond_jump_promotion = 0;
2192 if (*input_line_pointer == ','
2193 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2194 {
2195 char *string = ++input_line_pointer;
2196 int e = get_symbol_end ();
2197
2198 if (strcmp (string, "nojumps") == 0)
2199 no_cond_jump_promotion = 1;
2200 else if (strcmp (string, "jumps") == 0)
2201 ;
2202 else
2203 as_bad (_("no such architecture modifier: `%s'"), string);
2204
2205 *input_line_pointer = e;
2206 }
2207
2208 demand_empty_rest_of_line ();
2209}
2210
2211enum bfd_architecture
2212i386_arch (void)
2213{
2214 if (cpu_arch_isa == PROCESSOR_L1OM)
2215 {
2216 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2217 || flag_code != CODE_64BIT)
2218 as_fatal (_("Intel L1OM is 64bit ELF only"));
2219 return bfd_arch_l1om;
2220 }
2221 else
2222 return bfd_arch_i386;
2223}
2224
2225unsigned long
2226i386_mach ()
2227{
2228 if (!strncmp (default_arch, "x86_64", 6))
2229 {
2230 if (cpu_arch_isa == PROCESSOR_L1OM)
2231 {
2232 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2233 || default_arch[6] != '\0')
2234 as_fatal (_("Intel L1OM is 64bit ELF only"));
2235 return bfd_mach_l1om;
2236 }
2237 else if (default_arch[6] == '\0')
2238 return bfd_mach_x86_64;
2239 else
2240 return bfd_mach_x64_32;
2241 }
2242 else if (!strcmp (default_arch, "i386"))
2243 return bfd_mach_i386_i386;
2244 else
2245 as_fatal (_("Unknown architecture"));
2246}
2247\f
2248void
2249md_begin ()
2250{
2251 const char *hash_err;
2252
2253 /* Initialize op_hash hash table. */
2254 op_hash = hash_new ();
2255
2256 {
2257 const insn_template *optab;
2258 templates *core_optab;
2259
2260 /* Setup for loop. */
2261 optab = i386_optab;
2262 core_optab = (templates *) xmalloc (sizeof (templates));
2263 core_optab->start = optab;
2264
2265 while (1)
2266 {
2267 ++optab;
2268 if (optab->name == NULL
2269 || strcmp (optab->name, (optab - 1)->name) != 0)
2270 {
2271 /* different name --> ship out current template list;
2272 add to hash table; & begin anew. */
2273 core_optab->end = optab;
2274 hash_err = hash_insert (op_hash,
2275 (optab - 1)->name,
2276 (void *) core_optab);
2277 if (hash_err)
2278 {
2279 as_fatal (_("Internal Error: Can't hash %s: %s"),
2280 (optab - 1)->name,
2281 hash_err);
2282 }
2283 if (optab->name == NULL)
2284 break;
2285 core_optab = (templates *) xmalloc (sizeof (templates));
2286 core_optab->start = optab;
2287 }
2288 }
2289 }
2290
2291 /* Initialize reg_hash hash table. */
2292 reg_hash = hash_new ();
2293 {
2294 const reg_entry *regtab;
2295 unsigned int regtab_size = i386_regtab_size;
2296
2297 for (regtab = i386_regtab; regtab_size--; regtab++)
2298 {
2299 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2300 if (hash_err)
2301 as_fatal (_("Internal Error: Can't hash %s: %s"),
2302 regtab->reg_name,
2303 hash_err);
2304 }
2305 }
2306
2307 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2308 {
2309 int c;
2310 char *p;
2311
2312 for (c = 0; c < 256; c++)
2313 {
2314 if (ISDIGIT (c))
2315 {
2316 digit_chars[c] = c;
2317 mnemonic_chars[c] = c;
2318 register_chars[c] = c;
2319 operand_chars[c] = c;
2320 }
2321 else if (ISLOWER (c))
2322 {
2323 mnemonic_chars[c] = c;
2324 register_chars[c] = c;
2325 operand_chars[c] = c;
2326 }
2327 else if (ISUPPER (c))
2328 {
2329 mnemonic_chars[c] = TOLOWER (c);
2330 register_chars[c] = mnemonic_chars[c];
2331 operand_chars[c] = c;
2332 }
2333
2334 if (ISALPHA (c) || ISDIGIT (c))
2335 identifier_chars[c] = c;
2336 else if (c >= 128)
2337 {
2338 identifier_chars[c] = c;
2339 operand_chars[c] = c;
2340 }
2341 }
2342
2343#ifdef LEX_AT
2344 identifier_chars['@'] = '@';
2345#endif
2346#ifdef LEX_QM
2347 identifier_chars['?'] = '?';
2348 operand_chars['?'] = '?';
2349#endif
2350 digit_chars['-'] = '-';
2351 mnemonic_chars['_'] = '_';
2352 mnemonic_chars['-'] = '-';
2353 mnemonic_chars['.'] = '.';
2354 identifier_chars['_'] = '_';
2355 identifier_chars['.'] = '.';
2356
2357 for (p = operand_special_chars; *p != '\0'; p++)
2358 operand_chars[(unsigned char) *p] = *p;
2359 }
2360
2361#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2362 if (IS_ELF)
2363 {
2364 record_alignment (text_section, 2);
2365 record_alignment (data_section, 2);
2366 record_alignment (bss_section, 2);
2367 }
2368#endif
2369
2370 if (flag_code == CODE_64BIT)
2371 {
2372 x86_dwarf2_return_column = 16;
2373 x86_cie_data_alignment = -8;
2374 }
2375 else
2376 {
2377 x86_dwarf2_return_column = 8;
2378 x86_cie_data_alignment = -4;
2379 }
2380}
2381
2382void
2383i386_print_statistics (FILE *file)
2384{
2385 hash_print_statistics (file, "i386 opcode", op_hash);
2386 hash_print_statistics (file, "i386 register", reg_hash);
2387}
2388\f
2389#ifdef DEBUG386
2390
2391/* Debugging routines for md_assemble. */
2392static void pte (insn_template *);
2393static void pt (i386_operand_type);
2394static void pe (expressionS *);
2395static void ps (symbolS *);
2396
2397static void
2398pi (char *line, i386_insn *x)
2399{
2400 unsigned int j;
2401
2402 fprintf (stdout, "%s: template ", line);
2403 pte (&x->tm);
2404 fprintf (stdout, " address: base %s index %s scale %x\n",
2405 x->base_reg ? x->base_reg->reg_name : "none",
2406 x->index_reg ? x->index_reg->reg_name : "none",
2407 x->log2_scale_factor);
2408 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2409 x->rm.mode, x->rm.reg, x->rm.regmem);
2410 fprintf (stdout, " sib: base %x index %x scale %x\n",
2411 x->sib.base, x->sib.index, x->sib.scale);
2412 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2413 (x->rex & REX_W) != 0,
2414 (x->rex & REX_R) != 0,
2415 (x->rex & REX_X) != 0,
2416 (x->rex & REX_B) != 0);
2417 for (j = 0; j < x->operands; j++)
2418 {
2419 fprintf (stdout, " #%d: ", j + 1);
2420 pt (x->types[j]);
2421 fprintf (stdout, "\n");
2422 if (x->types[j].bitfield.reg8
2423 || x->types[j].bitfield.reg16
2424 || x->types[j].bitfield.reg32
2425 || x->types[j].bitfield.reg64
2426 || x->types[j].bitfield.regmmx
2427 || x->types[j].bitfield.regxmm
2428 || x->types[j].bitfield.regymm
2429 || x->types[j].bitfield.sreg2
2430 || x->types[j].bitfield.sreg3
2431 || x->types[j].bitfield.control
2432 || x->types[j].bitfield.debug
2433 || x->types[j].bitfield.test)
2434 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2435 if (operand_type_check (x->types[j], imm))
2436 pe (x->op[j].imms);
2437 if (operand_type_check (x->types[j], disp))
2438 pe (x->op[j].disps);
2439 }
2440}
2441
2442static void
2443pte (insn_template *t)
2444{
2445 unsigned int j;
2446 fprintf (stdout, " %d operands ", t->operands);
2447 fprintf (stdout, "opcode %x ", t->base_opcode);
2448 if (t->extension_opcode != None)
2449 fprintf (stdout, "ext %x ", t->extension_opcode);
2450 if (t->opcode_modifier.d)
2451 fprintf (stdout, "D");
2452 if (t->opcode_modifier.w)
2453 fprintf (stdout, "W");
2454 fprintf (stdout, "\n");
2455 for (j = 0; j < t->operands; j++)
2456 {
2457 fprintf (stdout, " #%d type ", j + 1);
2458 pt (t->operand_types[j]);
2459 fprintf (stdout, "\n");
2460 }
2461}
2462
2463static void
2464pe (expressionS *e)
2465{
2466 fprintf (stdout, " operation %d\n", e->X_op);
2467 fprintf (stdout, " add_number %ld (%lx)\n",
2468 (long) e->X_add_number, (long) e->X_add_number);
2469 if (e->X_add_symbol)
2470 {
2471 fprintf (stdout, " add_symbol ");
2472 ps (e->X_add_symbol);
2473 fprintf (stdout, "\n");
2474 }
2475 if (e->X_op_symbol)
2476 {
2477 fprintf (stdout, " op_symbol ");
2478 ps (e->X_op_symbol);
2479 fprintf (stdout, "\n");
2480 }
2481}
2482
2483static void
2484ps (symbolS *s)
2485{
2486 fprintf (stdout, "%s type %s%s",
2487 S_GET_NAME (s),
2488 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2489 segment_name (S_GET_SEGMENT (s)));
2490}
2491
2492static struct type_name
2493 {
2494 i386_operand_type mask;
2495 const char *name;
2496 }
2497const type_names[] =
2498{
2499 { OPERAND_TYPE_REG8, "r8" },
2500 { OPERAND_TYPE_REG16, "r16" },
2501 { OPERAND_TYPE_REG32, "r32" },
2502 { OPERAND_TYPE_REG64, "r64" },
2503 { OPERAND_TYPE_IMM8, "i8" },
2504 { OPERAND_TYPE_IMM8, "i8s" },
2505 { OPERAND_TYPE_IMM16, "i16" },
2506 { OPERAND_TYPE_IMM32, "i32" },
2507 { OPERAND_TYPE_IMM32S, "i32s" },
2508 { OPERAND_TYPE_IMM64, "i64" },
2509 { OPERAND_TYPE_IMM1, "i1" },
2510 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2511 { OPERAND_TYPE_DISP8, "d8" },
2512 { OPERAND_TYPE_DISP16, "d16" },
2513 { OPERAND_TYPE_DISP32, "d32" },
2514 { OPERAND_TYPE_DISP32S, "d32s" },
2515 { OPERAND_TYPE_DISP64, "d64" },
2516 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2517 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2518 { OPERAND_TYPE_CONTROL, "control reg" },
2519 { OPERAND_TYPE_TEST, "test reg" },
2520 { OPERAND_TYPE_DEBUG, "debug reg" },
2521 { OPERAND_TYPE_FLOATREG, "FReg" },
2522 { OPERAND_TYPE_FLOATACC, "FAcc" },
2523 { OPERAND_TYPE_SREG2, "SReg2" },
2524 { OPERAND_TYPE_SREG3, "SReg3" },
2525 { OPERAND_TYPE_ACC, "Acc" },
2526 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2527 { OPERAND_TYPE_REGMMX, "rMMX" },
2528 { OPERAND_TYPE_REGXMM, "rXMM" },
2529 { OPERAND_TYPE_REGYMM, "rYMM" },
2530 { OPERAND_TYPE_ESSEG, "es" },
2531};
2532
2533static void
2534pt (i386_operand_type t)
2535{
2536 unsigned int j;
2537 i386_operand_type a;
2538
2539 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2540 {
2541 a = operand_type_and (t, type_names[j].mask);
2542 if (!operand_type_all_zero (&a))
2543 fprintf (stdout, "%s, ", type_names[j].name);
2544 }
2545 fflush (stdout);
2546}
2547
2548#endif /* DEBUG386 */
2549\f
2550static bfd_reloc_code_real_type
2551reloc (unsigned int size,
2552 int pcrel,
2553 int sign,
2554 bfd_reloc_code_real_type other)
2555{
2556 if (other != NO_RELOC)
2557 {
2558 reloc_howto_type *rel;
2559
2560 if (size == 8)
2561 switch (other)
2562 {
2563 case BFD_RELOC_X86_64_GOT32:
2564 return BFD_RELOC_X86_64_GOT64;
2565 break;
2566 case BFD_RELOC_X86_64_PLTOFF64:
2567 return BFD_RELOC_X86_64_PLTOFF64;
2568 break;
2569 case BFD_RELOC_X86_64_GOTPC32:
2570 other = BFD_RELOC_X86_64_GOTPC64;
2571 break;
2572 case BFD_RELOC_X86_64_GOTPCREL:
2573 other = BFD_RELOC_X86_64_GOTPCREL64;
2574 break;
2575 case BFD_RELOC_X86_64_TPOFF32:
2576 other = BFD_RELOC_X86_64_TPOFF64;
2577 break;
2578 case BFD_RELOC_X86_64_DTPOFF32:
2579 other = BFD_RELOC_X86_64_DTPOFF64;
2580 break;
2581 default:
2582 break;
2583 }
2584
2585 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2586 if (size == 4 && flag_code != CODE_64BIT)
2587 sign = -1;
2588
2589 rel = bfd_reloc_type_lookup (stdoutput, other);
2590 if (!rel)
2591 as_bad (_("unknown relocation (%u)"), other);
2592 else if (size != bfd_get_reloc_size (rel))
2593 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2594 bfd_get_reloc_size (rel),
2595 size);
2596 else if (pcrel && !rel->pc_relative)
2597 as_bad (_("non-pc-relative relocation for pc-relative field"));
2598 else if ((rel->complain_on_overflow == complain_overflow_signed
2599 && !sign)
2600 || (rel->complain_on_overflow == complain_overflow_unsigned
2601 && sign > 0))
2602 as_bad (_("relocated field and relocation type differ in signedness"));
2603 else
2604 return other;
2605 return NO_RELOC;
2606 }
2607
2608 if (pcrel)
2609 {
2610 if (!sign)
2611 as_bad (_("there are no unsigned pc-relative relocations"));
2612 switch (size)
2613 {
2614 case 1: return BFD_RELOC_8_PCREL;
2615 case 2: return BFD_RELOC_16_PCREL;
2616 case 4: return BFD_RELOC_32_PCREL;
2617 case 8: return BFD_RELOC_64_PCREL;
2618 }
2619 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2620 }
2621 else
2622 {
2623 if (sign > 0)
2624 switch (size)
2625 {
2626 case 4: return BFD_RELOC_X86_64_32S;
2627 }
2628 else
2629 switch (size)
2630 {
2631 case 1: return BFD_RELOC_8;
2632 case 2: return BFD_RELOC_16;
2633 case 4: return BFD_RELOC_32;
2634 case 8: return BFD_RELOC_64;
2635 }
2636 as_bad (_("cannot do %s %u byte relocation"),
2637 sign > 0 ? "signed" : "unsigned", size);
2638 }
2639
2640 return NO_RELOC;
2641}
2642
2643/* Here we decide which fixups can be adjusted to make them relative to
2644 the beginning of the section instead of the symbol. Basically we need
2645 to make sure that the dynamic relocations are done correctly, so in
2646 some cases we force the original symbol to be used. */
2647
2648int
2649tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2650{
2651#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2652 if (!IS_ELF)
2653 return 1;
2654
2655 /* Don't adjust pc-relative references to merge sections in 64-bit
2656 mode. */
2657 if (use_rela_relocations
2658 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2659 && fixP->fx_pcrel)
2660 return 0;
2661
2662 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2663 and changed later by validate_fix. */
2664 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2665 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2666 return 0;
2667
2668 /* adjust_reloc_syms doesn't know about the GOT. */
2669 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2670 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2671 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2672 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2673 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2674 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2675 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2676 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2677 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2678 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2679 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2680 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2681 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2682 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2683 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2684 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2685 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2686 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2687 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2688 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2689 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2690 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2691 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2692 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2693 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2694 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2695 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2696 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2697 return 0;
2698#endif
2699 return 1;
2700}
2701
2702static int
2703intel_float_operand (const char *mnemonic)
2704{
2705 /* Note that the value returned is meaningful only for opcodes with (memory)
2706 operands, hence the code here is free to improperly handle opcodes that
2707 have no operands (for better performance and smaller code). */
2708
2709 if (mnemonic[0] != 'f')
2710 return 0; /* non-math */
2711
2712 switch (mnemonic[1])
2713 {
2714 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2715 the fs segment override prefix not currently handled because no
2716 call path can make opcodes without operands get here */
2717 case 'i':
2718 return 2 /* integer op */;
2719 case 'l':
2720 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2721 return 3; /* fldcw/fldenv */
2722 break;
2723 case 'n':
2724 if (mnemonic[2] != 'o' /* fnop */)
2725 return 3; /* non-waiting control op */
2726 break;
2727 case 'r':
2728 if (mnemonic[2] == 's')
2729 return 3; /* frstor/frstpm */
2730 break;
2731 case 's':
2732 if (mnemonic[2] == 'a')
2733 return 3; /* fsave */
2734 if (mnemonic[2] == 't')
2735 {
2736 switch (mnemonic[3])
2737 {
2738 case 'c': /* fstcw */
2739 case 'd': /* fstdw */
2740 case 'e': /* fstenv */
2741 case 's': /* fsts[gw] */
2742 return 3;
2743 }
2744 }
2745 break;
2746 case 'x':
2747 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2748 return 0; /* fxsave/fxrstor are not really math ops */
2749 break;
2750 }
2751
2752 return 1;
2753}
2754
2755/* Build the VEX prefix. */
2756
2757static void
2758build_vex_prefix (const insn_template *t)
2759{
2760 unsigned int register_specifier;
2761 unsigned int implied_prefix;
2762 unsigned int vector_length;
2763
2764 /* Check register specifier. */
2765 if (i.vex.register_specifier)
2766 {
2767 register_specifier = i.vex.register_specifier->reg_num;
2768 if ((i.vex.register_specifier->reg_flags & RegRex))
2769 register_specifier += 8;
2770 register_specifier = ~register_specifier & 0xf;
2771 }
2772 else
2773 register_specifier = 0xf;
2774
2775 /* Use 2-byte VEX prefix by swappping destination and source
2776 operand. */
2777 if (!i.swap_operand
2778 && i.operands == i.reg_operands
2779 && i.tm.opcode_modifier.vexopcode == VEX0F
2780 && i.tm.opcode_modifier.s
2781 && i.rex == REX_B)
2782 {
2783 unsigned int xchg = i.operands - 1;
2784 union i386_op temp_op;
2785 i386_operand_type temp_type;
2786
2787 temp_type = i.types[xchg];
2788 i.types[xchg] = i.types[0];
2789 i.types[0] = temp_type;
2790 temp_op = i.op[xchg];
2791 i.op[xchg] = i.op[0];
2792 i.op[0] = temp_op;
2793
2794 gas_assert (i.rm.mode == 3);
2795
2796 i.rex = REX_R;
2797 xchg = i.rm.regmem;
2798 i.rm.regmem = i.rm.reg;
2799 i.rm.reg = xchg;
2800
2801 /* Use the next insn. */
2802 i.tm = t[1];
2803 }
2804
2805 if (i.tm.opcode_modifier.vex == VEXScalar)
2806 vector_length = avxscalar;
2807 else
2808 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2809
2810 switch ((i.tm.base_opcode >> 8) & 0xff)
2811 {
2812 case 0:
2813 implied_prefix = 0;
2814 break;
2815 case DATA_PREFIX_OPCODE:
2816 implied_prefix = 1;
2817 break;
2818 case REPE_PREFIX_OPCODE:
2819 implied_prefix = 2;
2820 break;
2821 case REPNE_PREFIX_OPCODE:
2822 implied_prefix = 3;
2823 break;
2824 default:
2825 abort ();
2826 }
2827
2828 /* Use 2-byte VEX prefix if possible. */
2829 if (i.tm.opcode_modifier.vexopcode == VEX0F
2830 && i.tm.opcode_modifier.vexw != VEXW1
2831 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2832 {
2833 /* 2-byte VEX prefix. */
2834 unsigned int r;
2835
2836 i.vex.length = 2;
2837 i.vex.bytes[0] = 0xc5;
2838
2839 /* Check the REX.R bit. */
2840 r = (i.rex & REX_R) ? 0 : 1;
2841 i.vex.bytes[1] = (r << 7
2842 | register_specifier << 3
2843 | vector_length << 2
2844 | implied_prefix);
2845 }
2846 else
2847 {
2848 /* 3-byte VEX prefix. */
2849 unsigned int m, w;
2850
2851 i.vex.length = 3;
2852
2853 switch (i.tm.opcode_modifier.vexopcode)
2854 {
2855 case VEX0F:
2856 m = 0x1;
2857 i.vex.bytes[0] = 0xc4;
2858 break;
2859 case VEX0F38:
2860 m = 0x2;
2861 i.vex.bytes[0] = 0xc4;
2862 break;
2863 case VEX0F3A:
2864 m = 0x3;
2865 i.vex.bytes[0] = 0xc4;
2866 break;
2867 case XOP08:
2868 m = 0x8;
2869 i.vex.bytes[0] = 0x8f;
2870 break;
2871 case XOP09:
2872 m = 0x9;
2873 i.vex.bytes[0] = 0x8f;
2874 break;
2875 case XOP0A:
2876 m = 0xa;
2877 i.vex.bytes[0] = 0x8f;
2878 break;
2879 default:
2880 abort ();
2881 }
2882
2883 /* The high 3 bits of the second VEX byte are 1's compliment
2884 of RXB bits from REX. */
2885 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2886
2887 /* Check the REX.W bit. */
2888 w = (i.rex & REX_W) ? 1 : 0;
2889 if (i.tm.opcode_modifier.vexw)
2890 {
2891 if (w)
2892 abort ();
2893
2894 if (i.tm.opcode_modifier.vexw == VEXW1)
2895 w = 1;
2896 }
2897
2898 i.vex.bytes[2] = (w << 7
2899 | register_specifier << 3
2900 | vector_length << 2
2901 | implied_prefix);
2902 }
2903}
2904
2905static void
2906process_immext (void)
2907{
2908 expressionS *exp;
2909
2910 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2911 {
2912 /* SSE3 Instructions have the fixed operands with an opcode
2913 suffix which is coded in the same place as an 8-bit immediate
2914 field would be. Here we check those operands and remove them
2915 afterwards. */
2916 unsigned int x;
2917
2918 for (x = 0; x < i.operands; x++)
2919 if (i.op[x].regs->reg_num != x)
2920 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2921 register_prefix, i.op[x].regs->reg_name, x + 1,
2922 i.tm.name);
2923
2924 i.operands = 0;
2925 }
2926
2927 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2928 which is coded in the same place as an 8-bit immediate field
2929 would be. Here we fake an 8-bit immediate operand from the
2930 opcode suffix stored in tm.extension_opcode.
2931
2932 AVX instructions also use this encoding, for some of
2933 3 argument instructions. */
2934
2935 gas_assert (i.imm_operands == 0
2936 && (i.operands <= 2
2937 || (i.tm.opcode_modifier.vex
2938 && i.operands <= 4)));
2939
2940 exp = &im_expressions[i.imm_operands++];
2941 i.op[i.operands].imms = exp;
2942 i.types[i.operands] = imm8;
2943 i.operands++;
2944 exp->X_op = O_constant;
2945 exp->X_add_number = i.tm.extension_opcode;
2946 i.tm.extension_opcode = None;
2947}
2948
2949/* This is the guts of the machine-dependent assembler. LINE points to a
2950 machine dependent instruction. This function is supposed to emit
2951 the frags/bytes it assembles to. */
2952
2953void
2954md_assemble (char *line)
2955{
2956 unsigned int j;
2957 char mnemonic[MAX_MNEM_SIZE];
2958 const insn_template *t;
2959
2960 /* Initialize globals. */
2961 memset (&i, '\0', sizeof (i));
2962 for (j = 0; j < MAX_OPERANDS; j++)
2963 i.reloc[j] = NO_RELOC;
2964 memset (disp_expressions, '\0', sizeof (disp_expressions));
2965 memset (im_expressions, '\0', sizeof (im_expressions));
2966 save_stack_p = save_stack;
2967
2968 /* First parse an instruction mnemonic & call i386_operand for the operands.
2969 We assume that the scrubber has arranged it so that line[0] is the valid
2970 start of a (possibly prefixed) mnemonic. */
2971
2972 line = parse_insn (line, mnemonic);
2973 if (line == NULL)
2974 return;
2975
2976 line = parse_operands (line, mnemonic);
2977 this_operand = -1;
2978 if (line == NULL)
2979 return;
2980
2981 /* Now we've parsed the mnemonic into a set of templates, and have the
2982 operands at hand. */
2983
2984 /* All intel opcodes have reversed operands except for "bound" and
2985 "enter". We also don't reverse intersegment "jmp" and "call"
2986 instructions with 2 immediate operands so that the immediate segment
2987 precedes the offset, as it does when in AT&T mode. */
2988 if (intel_syntax
2989 && i.operands > 1
2990 && (strcmp (mnemonic, "bound") != 0)
2991 && (strcmp (mnemonic, "invlpga") != 0)
2992 && !(operand_type_check (i.types[0], imm)
2993 && operand_type_check (i.types[1], imm)))
2994 swap_operands ();
2995
2996 /* The order of the immediates should be reversed
2997 for 2 immediates extrq and insertq instructions */
2998 if (i.imm_operands == 2
2999 && (strcmp (mnemonic, "extrq") == 0
3000 || strcmp (mnemonic, "insertq") == 0))
3001 swap_2_operands (0, 1);
3002
3003 if (i.imm_operands)
3004 optimize_imm ();
3005
3006 /* Don't optimize displacement for movabs since it only takes 64bit
3007 displacement. */
3008 if (i.disp_operands
3009 && !i.disp32_encoding)
3010 {
3011 if (flag_code == CODE_64BIT)
3012 {
3013 if (strcmp (mnemonic, "movabs") == 0)
3014 {
3015 if (disallow_64bit_disp)
3016 as_bad (_("'movabs' isn't supported in x32 mode"));
3017 }
3018 else
3019 optimize_disp ();
3020 }
3021 else
3022 optimize_disp ();
3023 }
3024
3025 /* Next, we find a template that matches the given insn,
3026 making sure the overlap of the given operands types is consistent
3027 with the template operand types. */
3028
3029 if (!(t = match_template ()))
3030 return;
3031
3032 if (sse_check != sse_check_none
3033 && !i.tm.opcode_modifier.noavx
3034 && (i.tm.cpu_flags.bitfield.cpusse
3035 || i.tm.cpu_flags.bitfield.cpusse2
3036 || i.tm.cpu_flags.bitfield.cpusse3
3037 || i.tm.cpu_flags.bitfield.cpussse3
3038 || i.tm.cpu_flags.bitfield.cpusse4_1
3039 || i.tm.cpu_flags.bitfield.cpusse4_2))
3040 {
3041 (sse_check == sse_check_warning
3042 ? as_warn
3043 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3044 }
3045
3046 /* Zap movzx and movsx suffix. The suffix has been set from
3047 "word ptr" or "byte ptr" on the source operand in Intel syntax
3048 or extracted from mnemonic in AT&T syntax. But we'll use
3049 the destination register to choose the suffix for encoding. */
3050 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3051 {
3052 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3053 there is no suffix, the default will be byte extension. */
3054 if (i.reg_operands != 2
3055 && !i.suffix
3056 && intel_syntax)
3057 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3058
3059 i.suffix = 0;
3060 }
3061
3062 if (i.tm.opcode_modifier.fwait)
3063 if (!add_prefix (FWAIT_OPCODE))
3064 return;
3065
3066 /* Check for lock without a lockable instruction. Destination operand
3067 must be memory unless it is xchg (0x86). */
3068 if (i.prefix[LOCK_PREFIX]
3069 && (!i.tm.opcode_modifier.islockable
3070 || i.mem_operands == 0
3071 || (i.tm.base_opcode != 0x86
3072 && !operand_type_check (i.types[i.operands - 1], anymem))))
3073 {
3074 as_bad (_("expecting lockable instruction after `lock'"));
3075 return;
3076 }
3077
3078 /* Check string instruction segment overrides. */
3079 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3080 {
3081 if (!check_string ())
3082 return;
3083 i.disp_operands = 0;
3084 }
3085
3086 if (!process_suffix ())
3087 return;
3088
3089 /* Update operand types. */
3090 for (j = 0; j < i.operands; j++)
3091 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3092
3093 /* Make still unresolved immediate matches conform to size of immediate
3094 given in i.suffix. */
3095 if (!finalize_imm ())
3096 return;
3097
3098 if (i.types[0].bitfield.imm1)
3099 i.imm_operands = 0; /* kludge for shift insns. */
3100
3101 /* We only need to check those implicit registers for instructions
3102 with 3 operands or less. */
3103 if (i.operands <= 3)
3104 for (j = 0; j < i.operands; j++)
3105 if (i.types[j].bitfield.inoutportreg
3106 || i.types[j].bitfield.shiftcount
3107 || i.types[j].bitfield.acc
3108 || i.types[j].bitfield.floatacc)
3109 i.reg_operands--;
3110
3111 /* ImmExt should be processed after SSE2AVX. */
3112 if (!i.tm.opcode_modifier.sse2avx
3113 && i.tm.opcode_modifier.immext)
3114 process_immext ();
3115
3116 /* For insns with operands there are more diddles to do to the opcode. */
3117 if (i.operands)
3118 {
3119 if (!process_operands ())
3120 return;
3121 }
3122 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3123 {
3124 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3125 as_warn (_("translating to `%sp'"), i.tm.name);
3126 }
3127
3128 if (i.tm.opcode_modifier.vex)
3129 build_vex_prefix (t);
3130
3131 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3132 instructions may define INT_OPCODE as well, so avoid this corner
3133 case for those instructions that use MODRM. */
3134 if (i.tm.base_opcode == INT_OPCODE
3135 && !i.tm.opcode_modifier.modrm
3136 && i.op[0].imms->X_add_number == 3)
3137 {
3138 i.tm.base_opcode = INT3_OPCODE;
3139 i.imm_operands = 0;
3140 }
3141
3142 if ((i.tm.opcode_modifier.jump
3143 || i.tm.opcode_modifier.jumpbyte
3144 || i.tm.opcode_modifier.jumpdword)
3145 && i.op[0].disps->X_op == O_constant)
3146 {
3147 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3148 the absolute address given by the constant. Since ix86 jumps and
3149 calls are pc relative, we need to generate a reloc. */
3150 i.op[0].disps->X_add_symbol = &abs_symbol;
3151 i.op[0].disps->X_op = O_symbol;
3152 }
3153
3154 if (i.tm.opcode_modifier.rex64)
3155 i.rex |= REX_W;
3156
3157 /* For 8 bit registers we need an empty rex prefix. Also if the
3158 instruction already has a prefix, we need to convert old
3159 registers to new ones. */
3160
3161 if ((i.types[0].bitfield.reg8
3162 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3163 || (i.types[1].bitfield.reg8
3164 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3165 || ((i.types[0].bitfield.reg8
3166 || i.types[1].bitfield.reg8)
3167 && i.rex != 0))
3168 {
3169 int x;
3170
3171 i.rex |= REX_OPCODE;
3172 for (x = 0; x < 2; x++)
3173 {
3174 /* Look for 8 bit operand that uses old registers. */
3175 if (i.types[x].bitfield.reg8
3176 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3177 {
3178 /* In case it is "hi" register, give up. */
3179 if (i.op[x].regs->reg_num > 3)
3180 as_bad (_("can't encode register '%s%s' in an "
3181 "instruction requiring REX prefix."),
3182 register_prefix, i.op[x].regs->reg_name);
3183
3184 /* Otherwise it is equivalent to the extended register.
3185 Since the encoding doesn't change this is merely
3186 cosmetic cleanup for debug output. */
3187
3188 i.op[x].regs = i.op[x].regs + 8;
3189 }
3190 }
3191 }
3192
3193 if (i.rex != 0)
3194 add_prefix (REX_OPCODE | i.rex);
3195
3196 /* We are ready to output the insn. */
3197 output_insn ();
3198}
3199
3200static char *
3201parse_insn (char *line, char *mnemonic)
3202{
3203 char *l = line;
3204 char *token_start = l;
3205 char *mnem_p;
3206 int supported;
3207 const insn_template *t;
3208 char *dot_p = NULL;
3209
3210 /* Non-zero if we found a prefix only acceptable with string insns. */
3211 const char *expecting_string_instruction = NULL;
3212
3213 while (1)
3214 {
3215 mnem_p = mnemonic;
3216 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3217 {
3218 if (*mnem_p == '.')
3219 dot_p = mnem_p;
3220 mnem_p++;
3221 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3222 {
3223 as_bad (_("no such instruction: `%s'"), token_start);
3224 return NULL;
3225 }
3226 l++;
3227 }
3228 if (!is_space_char (*l)
3229 && *l != END_OF_INSN
3230 && (intel_syntax
3231 || (*l != PREFIX_SEPARATOR
3232 && *l != ',')))
3233 {
3234 as_bad (_("invalid character %s in mnemonic"),
3235 output_invalid (*l));
3236 return NULL;
3237 }
3238 if (token_start == l)
3239 {
3240 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3241 as_bad (_("expecting prefix; got nothing"));
3242 else
3243 as_bad (_("expecting mnemonic; got nothing"));
3244 return NULL;
3245 }
3246
3247 /* Look up instruction (or prefix) via hash table. */
3248 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3249
3250 if (*l != END_OF_INSN
3251 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3252 && current_templates
3253 && current_templates->start->opcode_modifier.isprefix)
3254 {
3255 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3256 {
3257 as_bad ((flag_code != CODE_64BIT
3258 ? _("`%s' is only supported in 64-bit mode")
3259 : _("`%s' is not supported in 64-bit mode")),
3260 current_templates->start->name);
3261 return NULL;
3262 }
3263 /* If we are in 16-bit mode, do not allow addr16 or data16.
3264 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3265 if ((current_templates->start->opcode_modifier.size16
3266 || current_templates->start->opcode_modifier.size32)
3267 && flag_code != CODE_64BIT
3268 && (current_templates->start->opcode_modifier.size32
3269 ^ (flag_code == CODE_16BIT)))
3270 {
3271 as_bad (_("redundant %s prefix"),
3272 current_templates->start->name);
3273 return NULL;
3274 }
3275 /* Add prefix, checking for repeated prefixes. */
3276 switch (add_prefix (current_templates->start->base_opcode))
3277 {
3278 case PREFIX_EXIST:
3279 return NULL;
3280 case PREFIX_REP:
3281 expecting_string_instruction = current_templates->start->name;
3282 break;
3283 default:
3284 break;
3285 }
3286 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3287 token_start = ++l;
3288 }
3289 else
3290 break;
3291 }
3292
3293 if (!current_templates)
3294 {
3295 /* Check if we should swap operand or force 32bit displacement in
3296 encoding. */
3297 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3298 i.swap_operand = 1;
3299 else if (mnem_p - 4 == dot_p
3300 && dot_p[1] == 'd'
3301 && dot_p[2] == '3'
3302 && dot_p[3] == '2')
3303 i.disp32_encoding = 1;
3304 else
3305 goto check_suffix;
3306 mnem_p = dot_p;
3307 *dot_p = '\0';
3308 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3309 }
3310
3311 if (!current_templates)
3312 {
3313check_suffix:
3314 /* See if we can get a match by trimming off a suffix. */
3315 switch (mnem_p[-1])
3316 {
3317 case WORD_MNEM_SUFFIX:
3318 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3319 i.suffix = SHORT_MNEM_SUFFIX;
3320 else
3321 case BYTE_MNEM_SUFFIX:
3322 case QWORD_MNEM_SUFFIX:
3323 i.suffix = mnem_p[-1];
3324 mnem_p[-1] = '\0';
3325 current_templates = (const templates *) hash_find (op_hash,
3326 mnemonic);
3327 break;
3328 case SHORT_MNEM_SUFFIX:
3329 case LONG_MNEM_SUFFIX:
3330 if (!intel_syntax)
3331 {
3332 i.suffix = mnem_p[-1];
3333 mnem_p[-1] = '\0';
3334 current_templates = (const templates *) hash_find (op_hash,
3335 mnemonic);
3336 }
3337 break;
3338
3339 /* Intel Syntax. */
3340 case 'd':
3341 if (intel_syntax)
3342 {
3343 if (intel_float_operand (mnemonic) == 1)
3344 i.suffix = SHORT_MNEM_SUFFIX;
3345 else
3346 i.suffix = LONG_MNEM_SUFFIX;
3347 mnem_p[-1] = '\0';
3348 current_templates = (const templates *) hash_find (op_hash,
3349 mnemonic);
3350 }
3351 break;
3352 }
3353 if (!current_templates)
3354 {
3355 as_bad (_("no such instruction: `%s'"), token_start);
3356 return NULL;
3357 }
3358 }
3359
3360 if (current_templates->start->opcode_modifier.jump
3361 || current_templates->start->opcode_modifier.jumpbyte)
3362 {
3363 /* Check for a branch hint. We allow ",pt" and ",pn" for
3364 predict taken and predict not taken respectively.
3365 I'm not sure that branch hints actually do anything on loop
3366 and jcxz insns (JumpByte) for current Pentium4 chips. They
3367 may work in the future and it doesn't hurt to accept them
3368 now. */
3369 if (l[0] == ',' && l[1] == 'p')
3370 {
3371 if (l[2] == 't')
3372 {
3373 if (!add_prefix (DS_PREFIX_OPCODE))
3374 return NULL;
3375 l += 3;
3376 }
3377 else if (l[2] == 'n')
3378 {
3379 if (!add_prefix (CS_PREFIX_OPCODE))
3380 return NULL;
3381 l += 3;
3382 }
3383 }
3384 }
3385 /* Any other comma loses. */
3386 if (*l == ',')
3387 {
3388 as_bad (_("invalid character %s in mnemonic"),
3389 output_invalid (*l));
3390 return NULL;
3391 }
3392
3393 /* Check if instruction is supported on specified architecture. */
3394 supported = 0;
3395 for (t = current_templates->start; t < current_templates->end; ++t)
3396 {
3397 supported |= cpu_flags_match (t);
3398 if (supported == CPU_FLAGS_PERFECT_MATCH)
3399 goto skip;
3400 }
3401
3402 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3403 {
3404 as_bad (flag_code == CODE_64BIT
3405 ? _("`%s' is not supported in 64-bit mode")
3406 : _("`%s' is only supported in 64-bit mode"),
3407 current_templates->start->name);
3408 return NULL;
3409 }
3410 if (supported != CPU_FLAGS_PERFECT_MATCH)
3411 {
3412 as_bad (_("`%s' is not supported on `%s%s'"),
3413 current_templates->start->name,
3414 cpu_arch_name ? cpu_arch_name : default_arch,
3415 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3416 return NULL;
3417 }
3418
3419skip:
3420 if (!cpu_arch_flags.bitfield.cpui386
3421 && (flag_code != CODE_16BIT))
3422 {
3423 as_warn (_("use .code16 to ensure correct addressing mode"));
3424 }
3425
3426 /* Check for rep/repne without a string instruction. */
3427 if (expecting_string_instruction)
3428 {
3429 static templates override;
3430
3431 for (t = current_templates->start; t < current_templates->end; ++t)
3432 if (t->opcode_modifier.isstring)
3433 break;
3434 if (t >= current_templates->end)
3435 {
3436 as_bad (_("expecting string instruction after `%s'"),
3437 expecting_string_instruction);
3438 return NULL;
3439 }
3440 for (override.start = t; t < current_templates->end; ++t)
3441 if (!t->opcode_modifier.isstring)
3442 break;
3443 override.end = t;
3444 current_templates = &override;
3445 }
3446
3447 return l;
3448}
3449
3450static char *
3451parse_operands (char *l, const char *mnemonic)
3452{
3453 char *token_start;
3454
3455 /* 1 if operand is pending after ','. */
3456 unsigned int expecting_operand = 0;
3457
3458 /* Non-zero if operand parens not balanced. */
3459 unsigned int paren_not_balanced;
3460
3461 while (*l != END_OF_INSN)
3462 {
3463 /* Skip optional white space before operand. */
3464 if (is_space_char (*l))
3465 ++l;
3466 if (!is_operand_char (*l) && *l != END_OF_INSN)
3467 {
3468 as_bad (_("invalid character %s before operand %d"),
3469 output_invalid (*l),
3470 i.operands + 1);
3471 return NULL;
3472 }
3473 token_start = l; /* after white space */
3474 paren_not_balanced = 0;
3475 while (paren_not_balanced || *l != ',')
3476 {
3477 if (*l == END_OF_INSN)
3478 {
3479 if (paren_not_balanced)
3480 {
3481 if (!intel_syntax)
3482 as_bad (_("unbalanced parenthesis in operand %d."),
3483 i.operands + 1);
3484 else
3485 as_bad (_("unbalanced brackets in operand %d."),
3486 i.operands + 1);
3487 return NULL;
3488 }
3489 else
3490 break; /* we are done */
3491 }
3492 else if (!is_operand_char (*l) && !is_space_char (*l))
3493 {
3494 as_bad (_("invalid character %s in operand %d"),
3495 output_invalid (*l),
3496 i.operands + 1);
3497 return NULL;
3498 }
3499 if (!intel_syntax)
3500 {
3501 if (*l == '(')
3502 ++paren_not_balanced;
3503 if (*l == ')')
3504 --paren_not_balanced;
3505 }
3506 else
3507 {
3508 if (*l == '[')
3509 ++paren_not_balanced;
3510 if (*l == ']')
3511 --paren_not_balanced;
3512 }
3513 l++;
3514 }
3515 if (l != token_start)
3516 { /* Yes, we've read in another operand. */
3517 unsigned int operand_ok;
3518 this_operand = i.operands++;
3519 i.types[this_operand].bitfield.unspecified = 1;
3520 if (i.operands > MAX_OPERANDS)
3521 {
3522 as_bad (_("spurious operands; (%d operands/instruction max)"),
3523 MAX_OPERANDS);
3524 return NULL;
3525 }
3526 /* Now parse operand adding info to 'i' as we go along. */
3527 END_STRING_AND_SAVE (l);
3528
3529 if (intel_syntax)
3530 operand_ok =
3531 i386_intel_operand (token_start,
3532 intel_float_operand (mnemonic));
3533 else
3534 operand_ok = i386_att_operand (token_start);
3535
3536 RESTORE_END_STRING (l);
3537 if (!operand_ok)
3538 return NULL;
3539 }
3540 else
3541 {
3542 if (expecting_operand)
3543 {
3544 expecting_operand_after_comma:
3545 as_bad (_("expecting operand after ','; got nothing"));
3546 return NULL;
3547 }
3548 if (*l == ',')
3549 {
3550 as_bad (_("expecting operand before ','; got nothing"));
3551 return NULL;
3552 }
3553 }
3554
3555 /* Now *l must be either ',' or END_OF_INSN. */
3556 if (*l == ',')
3557 {
3558 if (*++l == END_OF_INSN)
3559 {
3560 /* Just skip it, if it's \n complain. */
3561 goto expecting_operand_after_comma;
3562 }
3563 expecting_operand = 1;
3564 }
3565 }
3566 return l;
3567}
3568
3569static void
3570swap_2_operands (int xchg1, int xchg2)
3571{
3572 union i386_op temp_op;
3573 i386_operand_type temp_type;
3574 enum bfd_reloc_code_real temp_reloc;
3575
3576 temp_type = i.types[xchg2];
3577 i.types[xchg2] = i.types[xchg1];
3578 i.types[xchg1] = temp_type;
3579 temp_op = i.op[xchg2];
3580 i.op[xchg2] = i.op[xchg1];
3581 i.op[xchg1] = temp_op;
3582 temp_reloc = i.reloc[xchg2];
3583 i.reloc[xchg2] = i.reloc[xchg1];
3584 i.reloc[xchg1] = temp_reloc;
3585}
3586
3587static void
3588swap_operands (void)
3589{
3590 switch (i.operands)
3591 {
3592 case 5:
3593 case 4:
3594 swap_2_operands (1, i.operands - 2);
3595 case 3:
3596 case 2:
3597 swap_2_operands (0, i.operands - 1);
3598 break;
3599 default:
3600 abort ();
3601 }
3602
3603 if (i.mem_operands == 2)
3604 {
3605 const seg_entry *temp_seg;
3606 temp_seg = i.seg[0];
3607 i.seg[0] = i.seg[1];
3608 i.seg[1] = temp_seg;
3609 }
3610}
3611
3612/* Try to ensure constant immediates are represented in the smallest
3613 opcode possible. */
3614static void
3615optimize_imm (void)
3616{
3617 char guess_suffix = 0;
3618 int op;
3619
3620 if (i.suffix)
3621 guess_suffix = i.suffix;
3622 else if (i.reg_operands)
3623 {
3624 /* Figure out a suffix from the last register operand specified.
3625 We can't do this properly yet, ie. excluding InOutPortReg,
3626 but the following works for instructions with immediates.
3627 In any case, we can't set i.suffix yet. */
3628 for (op = i.operands; --op >= 0;)
3629 if (i.types[op].bitfield.reg8)
3630 {
3631 guess_suffix = BYTE_MNEM_SUFFIX;
3632 break;
3633 }
3634 else if (i.types[op].bitfield.reg16)
3635 {
3636 guess_suffix = WORD_MNEM_SUFFIX;
3637 break;
3638 }
3639 else if (i.types[op].bitfield.reg32)
3640 {
3641 guess_suffix = LONG_MNEM_SUFFIX;
3642 break;
3643 }
3644 else if (i.types[op].bitfield.reg64)
3645 {
3646 guess_suffix = QWORD_MNEM_SUFFIX;
3647 break;
3648 }
3649 }
3650 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3651 guess_suffix = WORD_MNEM_SUFFIX;
3652
3653 for (op = i.operands; --op >= 0;)
3654 if (operand_type_check (i.types[op], imm))
3655 {
3656 switch (i.op[op].imms->X_op)
3657 {
3658 case O_constant:
3659 /* If a suffix is given, this operand may be shortened. */
3660 switch (guess_suffix)
3661 {
3662 case LONG_MNEM_SUFFIX:
3663 i.types[op].bitfield.imm32 = 1;
3664 i.types[op].bitfield.imm64 = 1;
3665 break;
3666 case WORD_MNEM_SUFFIX:
3667 i.types[op].bitfield.imm16 = 1;
3668 i.types[op].bitfield.imm32 = 1;
3669 i.types[op].bitfield.imm32s = 1;
3670 i.types[op].bitfield.imm64 = 1;
3671 break;
3672 case BYTE_MNEM_SUFFIX:
3673 i.types[op].bitfield.imm8 = 1;
3674 i.types[op].bitfield.imm8s = 1;
3675 i.types[op].bitfield.imm16 = 1;
3676 i.types[op].bitfield.imm32 = 1;
3677 i.types[op].bitfield.imm32s = 1;
3678 i.types[op].bitfield.imm64 = 1;
3679 break;
3680 }
3681
3682 /* If this operand is at most 16 bits, convert it
3683 to a signed 16 bit number before trying to see
3684 whether it will fit in an even smaller size.
3685 This allows a 16-bit operand such as $0xffe0 to
3686 be recognised as within Imm8S range. */
3687 if ((i.types[op].bitfield.imm16)
3688 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3689 {
3690 i.op[op].imms->X_add_number =
3691 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3692 }
3693 if ((i.types[op].bitfield.imm32)
3694 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3695 == 0))
3696 {
3697 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3698 ^ ((offsetT) 1 << 31))
3699 - ((offsetT) 1 << 31));
3700 }
3701 i.types[op]
3702 = operand_type_or (i.types[op],
3703 smallest_imm_type (i.op[op].imms->X_add_number));
3704
3705 /* We must avoid matching of Imm32 templates when 64bit
3706 only immediate is available. */
3707 if (guess_suffix == QWORD_MNEM_SUFFIX)
3708 i.types[op].bitfield.imm32 = 0;
3709 break;
3710
3711 case O_absent:
3712 case O_register:
3713 abort ();
3714
3715 /* Symbols and expressions. */
3716 default:
3717 /* Convert symbolic operand to proper sizes for matching, but don't
3718 prevent matching a set of insns that only supports sizes other
3719 than those matching the insn suffix. */
3720 {
3721 i386_operand_type mask, allowed;
3722 const insn_template *t;
3723
3724 operand_type_set (&mask, 0);
3725 operand_type_set (&allowed, 0);
3726
3727 for (t = current_templates->start;
3728 t < current_templates->end;
3729 ++t)
3730 allowed = operand_type_or (allowed,
3731 t->operand_types[op]);
3732 switch (guess_suffix)
3733 {
3734 case QWORD_MNEM_SUFFIX:
3735 mask.bitfield.imm64 = 1;
3736 mask.bitfield.imm32s = 1;
3737 break;
3738 case LONG_MNEM_SUFFIX:
3739 mask.bitfield.imm32 = 1;
3740 break;
3741 case WORD_MNEM_SUFFIX:
3742 mask.bitfield.imm16 = 1;
3743 break;
3744 case BYTE_MNEM_SUFFIX:
3745 mask.bitfield.imm8 = 1;
3746 break;
3747 default:
3748 break;
3749 }
3750 allowed = operand_type_and (mask, allowed);
3751 if (!operand_type_all_zero (&allowed))
3752 i.types[op] = operand_type_and (i.types[op], mask);
3753 }
3754 break;
3755 }
3756 }
3757}
3758
3759/* Try to use the smallest displacement type too. */
3760static void
3761optimize_disp (void)
3762{
3763 int op;
3764
3765 for (op = i.operands; --op >= 0;)
3766 if (operand_type_check (i.types[op], disp))
3767 {
3768 if (i.op[op].disps->X_op == O_constant)
3769 {
3770 offsetT op_disp = i.op[op].disps->X_add_number;
3771
3772 if (i.types[op].bitfield.disp16
3773 && (op_disp & ~(offsetT) 0xffff) == 0)
3774 {
3775 /* If this operand is at most 16 bits, convert
3776 to a signed 16 bit number and don't use 64bit
3777 displacement. */
3778 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3779 i.types[op].bitfield.disp64 = 0;
3780 }
3781 if (i.types[op].bitfield.disp32
3782 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3783 {
3784 /* If this operand is at most 32 bits, convert
3785 to a signed 32 bit number and don't use 64bit
3786 displacement. */
3787 op_disp &= (((offsetT) 2 << 31) - 1);
3788 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3789 i.types[op].bitfield.disp64 = 0;
3790 }
3791 if (!op_disp && i.types[op].bitfield.baseindex)
3792 {
3793 i.types[op].bitfield.disp8 = 0;
3794 i.types[op].bitfield.disp16 = 0;
3795 i.types[op].bitfield.disp32 = 0;
3796 i.types[op].bitfield.disp32s = 0;
3797 i.types[op].bitfield.disp64 = 0;
3798 i.op[op].disps = 0;
3799 i.disp_operands--;
3800 }
3801 else if (flag_code == CODE_64BIT)
3802 {
3803 if (fits_in_signed_long (op_disp))
3804 {
3805 i.types[op].bitfield.disp64 = 0;
3806 i.types[op].bitfield.disp32s = 1;
3807 }
3808 if (i.prefix[ADDR_PREFIX]
3809 && fits_in_unsigned_long (op_disp))
3810 i.types[op].bitfield.disp32 = 1;
3811 }
3812 if ((i.types[op].bitfield.disp32
3813 || i.types[op].bitfield.disp32s
3814 || i.types[op].bitfield.disp16)
3815 && fits_in_signed_byte (op_disp))
3816 i.types[op].bitfield.disp8 = 1;
3817 }
3818 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3819 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3820 {
3821 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3822 i.op[op].disps, 0, i.reloc[op]);
3823 i.types[op].bitfield.disp8 = 0;
3824 i.types[op].bitfield.disp16 = 0;
3825 i.types[op].bitfield.disp32 = 0;
3826 i.types[op].bitfield.disp32s = 0;
3827 i.types[op].bitfield.disp64 = 0;
3828 }
3829 else
3830 /* We only support 64bit displacement on constants. */
3831 i.types[op].bitfield.disp64 = 0;
3832 }
3833}
3834
3835/* Check if operands are valid for the instruction. Update VEX
3836 operand types. */
3837
3838static int
3839VEX_check_operands (const insn_template *t)
3840{
3841 if (!t->opcode_modifier.vex)
3842 return 0;
3843
3844 /* Only check VEX_Imm4, which must be the first operand. */
3845 if (t->operand_types[0].bitfield.vec_imm4)
3846 {
3847 if (i.op[0].imms->X_op != O_constant
3848 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3849 {
3850 i.error = bad_imm4;
3851 return 1;
3852 }
3853
3854 /* Turn off Imm8 so that update_imm won't complain. */
3855 i.types[0] = vec_imm4;
3856 }
3857
3858 return 0;
3859}
3860
3861static const insn_template *
3862match_template (void)
3863{
3864 /* Points to template once we've found it. */
3865 const insn_template *t;
3866 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3867 i386_operand_type overlap4;
3868 unsigned int found_reverse_match;
3869 i386_opcode_modifier suffix_check;
3870 i386_operand_type operand_types [MAX_OPERANDS];
3871 int addr_prefix_disp;
3872 unsigned int j;
3873 unsigned int found_cpu_match;
3874 unsigned int check_register;
3875
3876#if MAX_OPERANDS != 5
3877# error "MAX_OPERANDS must be 5."
3878#endif
3879
3880 found_reverse_match = 0;
3881 addr_prefix_disp = -1;
3882
3883 memset (&suffix_check, 0, sizeof (suffix_check));
3884 if (i.suffix == BYTE_MNEM_SUFFIX)
3885 suffix_check.no_bsuf = 1;
3886 else if (i.suffix == WORD_MNEM_SUFFIX)
3887 suffix_check.no_wsuf = 1;
3888 else if (i.suffix == SHORT_MNEM_SUFFIX)
3889 suffix_check.no_ssuf = 1;
3890 else if (i.suffix == LONG_MNEM_SUFFIX)
3891 suffix_check.no_lsuf = 1;
3892 else if (i.suffix == QWORD_MNEM_SUFFIX)
3893 suffix_check.no_qsuf = 1;
3894 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3895 suffix_check.no_ldsuf = 1;
3896
3897 /* Must have right number of operands. */
3898 i.error = number_of_operands_mismatch;
3899
3900 for (t = current_templates->start; t < current_templates->end; t++)
3901 {
3902 addr_prefix_disp = -1;
3903
3904 if (i.operands != t->operands)
3905 continue;
3906
3907 /* Check processor support. */
3908 i.error = unsupported;
3909 found_cpu_match = (cpu_flags_match (t)
3910 == CPU_FLAGS_PERFECT_MATCH);
3911 if (!found_cpu_match)
3912 continue;
3913
3914 /* Check old gcc support. */
3915 i.error = old_gcc_only;
3916 if (!old_gcc && t->opcode_modifier.oldgcc)
3917 continue;
3918
3919 /* Check AT&T mnemonic. */
3920 i.error = unsupported_with_intel_mnemonic;
3921 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3922 continue;
3923
3924 /* Check AT&T/Intel syntax. */
3925 i.error = unsupported_syntax;
3926 if ((intel_syntax && t->opcode_modifier.attsyntax)
3927 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3928 continue;
3929
3930 /* Check the suffix, except for some instructions in intel mode. */
3931 i.error = invalid_instruction_suffix;
3932 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3933 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3934 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3935 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3936 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3937 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3938 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3939 continue;
3940
3941 if (!operand_size_match (t))
3942 continue;
3943
3944 for (j = 0; j < MAX_OPERANDS; j++)
3945 operand_types[j] = t->operand_types[j];
3946
3947 /* In general, don't allow 64-bit operands in 32-bit mode. */
3948 if (i.suffix == QWORD_MNEM_SUFFIX
3949 && flag_code != CODE_64BIT
3950 && (intel_syntax
3951 ? (!t->opcode_modifier.ignoresize
3952 && !intel_float_operand (t->name))
3953 : intel_float_operand (t->name) != 2)
3954 && ((!operand_types[0].bitfield.regmmx
3955 && !operand_types[0].bitfield.regxmm
3956 && !operand_types[0].bitfield.regymm)
3957 || (!operand_types[t->operands > 1].bitfield.regmmx
3958 && !!operand_types[t->operands > 1].bitfield.regxmm
3959 && !!operand_types[t->operands > 1].bitfield.regymm))
3960 && (t->base_opcode != 0x0fc7
3961 || t->extension_opcode != 1 /* cmpxchg8b */))
3962 continue;
3963
3964 /* In general, don't allow 32-bit operands on pre-386. */
3965 else if (i.suffix == LONG_MNEM_SUFFIX
3966 && !cpu_arch_flags.bitfield.cpui386
3967 && (intel_syntax
3968 ? (!t->opcode_modifier.ignoresize
3969 && !intel_float_operand (t->name))
3970 : intel_float_operand (t->name) != 2)
3971 && ((!operand_types[0].bitfield.regmmx
3972 && !operand_types[0].bitfield.regxmm)
3973 || (!operand_types[t->operands > 1].bitfield.regmmx
3974 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3975 continue;
3976
3977 /* Do not verify operands when there are none. */
3978 else
3979 {
3980 if (!t->operands)
3981 /* We've found a match; break out of loop. */
3982 break;
3983 }
3984
3985 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3986 into Disp32/Disp16/Disp32 operand. */
3987 if (i.prefix[ADDR_PREFIX] != 0)
3988 {
3989 /* There should be only one Disp operand. */
3990 switch (flag_code)
3991 {
3992 case CODE_16BIT:
3993 for (j = 0; j < MAX_OPERANDS; j++)
3994 {
3995 if (operand_types[j].bitfield.disp16)
3996 {
3997 addr_prefix_disp = j;
3998 operand_types[j].bitfield.disp32 = 1;
3999 operand_types[j].bitfield.disp16 = 0;
4000 break;
4001 }
4002 }
4003 break;
4004 case CODE_32BIT:
4005 for (j = 0; j < MAX_OPERANDS; j++)
4006 {
4007 if (operand_types[j].bitfield.disp32)
4008 {
4009 addr_prefix_disp = j;
4010 operand_types[j].bitfield.disp32 = 0;
4011 operand_types[j].bitfield.disp16 = 1;
4012 break;
4013 }
4014 }
4015 break;
4016 case CODE_64BIT:
4017 for (j = 0; j < MAX_OPERANDS; j++)
4018 {
4019 if (operand_types[j].bitfield.disp64)
4020 {
4021 addr_prefix_disp = j;
4022 operand_types[j].bitfield.disp64 = 0;
4023 operand_types[j].bitfield.disp32 = 1;
4024 break;
4025 }
4026 }
4027 break;
4028 }
4029 }
4030
4031 /* We check register size if needed. */
4032 check_register = t->opcode_modifier.checkregsize;
4033 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4034 switch (t->operands)
4035 {
4036 case 1:
4037 if (!operand_type_match (overlap0, i.types[0]))
4038 continue;
4039 break;
4040 case 2:
4041 /* xchg %eax, %eax is a special case. It is an aliase for nop
4042 only in 32bit mode and we can use opcode 0x90. In 64bit
4043 mode, we can't use 0x90 for xchg %eax, %eax since it should
4044 zero-extend %eax to %rax. */
4045 if (flag_code == CODE_64BIT
4046 && t->base_opcode == 0x90
4047 && operand_type_equal (&i.types [0], &acc32)
4048 && operand_type_equal (&i.types [1], &acc32))
4049 continue;
4050 if (i.swap_operand)
4051 {
4052 /* If we swap operand in encoding, we either match
4053 the next one or reverse direction of operands. */
4054 if (t->opcode_modifier.s)
4055 continue;
4056 else if (t->opcode_modifier.d)
4057 goto check_reverse;
4058 }
4059
4060 case 3:
4061 /* If we swap operand in encoding, we match the next one. */
4062 if (i.swap_operand && t->opcode_modifier.s)
4063 continue;
4064 case 4:
4065 case 5:
4066 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4067 if (!operand_type_match (overlap0, i.types[0])
4068 || !operand_type_match (overlap1, i.types[1])
4069 || (check_register
4070 && !operand_type_register_match (overlap0, i.types[0],
4071 operand_types[0],
4072 overlap1, i.types[1],
4073 operand_types[1])))
4074 {
4075 /* Check if other direction is valid ... */
4076 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4077 continue;
4078
4079check_reverse:
4080 /* Try reversing direction of operands. */
4081 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4082 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4083 if (!operand_type_match (overlap0, i.types[0])
4084 || !operand_type_match (overlap1, i.types[1])
4085 || (check_register
4086 && !operand_type_register_match (overlap0,
4087 i.types[0],
4088 operand_types[1],
4089 overlap1,
4090 i.types[1],
4091 operand_types[0])))
4092 {
4093 /* Does not match either direction. */
4094 continue;
4095 }
4096 /* found_reverse_match holds which of D or FloatDR
4097 we've found. */
4098 if (t->opcode_modifier.d)
4099 found_reverse_match = Opcode_D;
4100 else if (t->opcode_modifier.floatd)
4101 found_reverse_match = Opcode_FloatD;
4102 else
4103 found_reverse_match = 0;
4104 if (t->opcode_modifier.floatr)
4105 found_reverse_match |= Opcode_FloatR;
4106 }
4107 else
4108 {
4109 /* Found a forward 2 operand match here. */
4110 switch (t->operands)
4111 {
4112 case 5:
4113 overlap4 = operand_type_and (i.types[4],
4114 operand_types[4]);
4115 case 4:
4116 overlap3 = operand_type_and (i.types[3],
4117 operand_types[3]);
4118 case 3:
4119 overlap2 = operand_type_and (i.types[2],
4120 operand_types[2]);
4121 break;
4122 }
4123
4124 switch (t->operands)
4125 {
4126 case 5:
4127 if (!operand_type_match (overlap4, i.types[4])
4128 || !operand_type_register_match (overlap3,
4129 i.types[3],
4130 operand_types[3],
4131 overlap4,
4132 i.types[4],
4133 operand_types[4]))
4134 continue;
4135 case 4:
4136 if (!operand_type_match (overlap3, i.types[3])
4137 || (check_register
4138 && !operand_type_register_match (overlap2,
4139 i.types[2],
4140 operand_types[2],
4141 overlap3,
4142 i.types[3],
4143 operand_types[3])))
4144 continue;
4145 case 3:
4146 /* Here we make use of the fact that there are no
4147 reverse match 3 operand instructions, and all 3
4148 operand instructions only need to be checked for
4149 register consistency between operands 2 and 3. */
4150 if (!operand_type_match (overlap2, i.types[2])
4151 || (check_register
4152 && !operand_type_register_match (overlap1,
4153 i.types[1],
4154 operand_types[1],
4155 overlap2,
4156 i.types[2],
4157 operand_types[2])))
4158 continue;
4159 break;
4160 }
4161 }
4162 /* Found either forward/reverse 2, 3 or 4 operand match here:
4163 slip through to break. */
4164 }
4165 if (!found_cpu_match)
4166 {
4167 found_reverse_match = 0;
4168 continue;
4169 }
4170
4171 /* Check if VEX operands are valid. */
4172 if (VEX_check_operands (t))
4173 continue;
4174
4175 /* We've found a match; break out of loop. */
4176 break;
4177 }
4178
4179 if (t == current_templates->end)
4180 {
4181 /* We found no match. */
4182 const char *err_msg;
4183 switch (i.error)
4184 {
4185 default:
4186 abort ();
4187 case operand_size_mismatch:
4188 err_msg = _("operand size mismatch");
4189 break;
4190 case operand_type_mismatch:
4191 err_msg = _("operand type mismatch");
4192 break;
4193 case register_type_mismatch:
4194 err_msg = _("register type mismatch");
4195 break;
4196 case number_of_operands_mismatch:
4197 err_msg = _("number of operands mismatch");
4198 break;
4199 case invalid_instruction_suffix:
4200 err_msg = _("invalid instruction suffix");
4201 break;
4202 case bad_imm4:
4203 err_msg = _("Imm4 isn't the first operand");
4204 break;
4205 case old_gcc_only:
4206 err_msg = _("only supported with old gcc");
4207 break;
4208 case unsupported_with_intel_mnemonic:
4209 err_msg = _("unsupported with Intel mnemonic");
4210 break;
4211 case unsupported_syntax:
4212 err_msg = _("unsupported syntax");
4213 break;
4214 case unsupported:
4215 err_msg = _("unsupported");
4216 break;
4217 }
4218 as_bad (_("%s for `%s'"), err_msg,
4219 current_templates->start->name);
4220 return NULL;
4221 }
4222
4223 if (!quiet_warnings)
4224 {
4225 if (!intel_syntax
4226 && (i.types[0].bitfield.jumpabsolute
4227 != operand_types[0].bitfield.jumpabsolute))
4228 {
4229 as_warn (_("indirect %s without `*'"), t->name);
4230 }
4231
4232 if (t->opcode_modifier.isprefix
4233 && t->opcode_modifier.ignoresize)
4234 {
4235 /* Warn them that a data or address size prefix doesn't
4236 affect assembly of the next line of code. */
4237 as_warn (_("stand-alone `%s' prefix"), t->name);
4238 }
4239 }
4240
4241 /* Copy the template we found. */
4242 i.tm = *t;
4243
4244 if (addr_prefix_disp != -1)
4245 i.tm.operand_types[addr_prefix_disp]
4246 = operand_types[addr_prefix_disp];
4247
4248 if (found_reverse_match)
4249 {
4250 /* If we found a reverse match we must alter the opcode
4251 direction bit. found_reverse_match holds bits to change
4252 (different for int & float insns). */
4253
4254 i.tm.base_opcode ^= found_reverse_match;
4255
4256 i.tm.operand_types[0] = operand_types[1];
4257 i.tm.operand_types[1] = operand_types[0];
4258 }
4259
4260 return t;
4261}
4262
4263static int
4264check_string (void)
4265{
4266 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4267 if (i.tm.operand_types[mem_op].bitfield.esseg)
4268 {
4269 if (i.seg[0] != NULL && i.seg[0] != &es)
4270 {
4271 as_bad (_("`%s' operand %d must use `%ses' segment"),
4272 i.tm.name,
4273 mem_op + 1,
4274 register_prefix);
4275 return 0;
4276 }
4277 /* There's only ever one segment override allowed per instruction.
4278 This instruction possibly has a legal segment override on the
4279 second operand, so copy the segment to where non-string
4280 instructions store it, allowing common code. */
4281 i.seg[0] = i.seg[1];
4282 }
4283 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4284 {
4285 if (i.seg[1] != NULL && i.seg[1] != &es)
4286 {
4287 as_bad (_("`%s' operand %d must use `%ses' segment"),
4288 i.tm.name,
4289 mem_op + 2,
4290 register_prefix);
4291 return 0;
4292 }
4293 }
4294 return 1;
4295}
4296
4297static int
4298process_suffix (void)
4299{
4300 /* If matched instruction specifies an explicit instruction mnemonic
4301 suffix, use it. */
4302 if (i.tm.opcode_modifier.size16)
4303 i.suffix = WORD_MNEM_SUFFIX;
4304 else if (i.tm.opcode_modifier.size32)
4305 i.suffix = LONG_MNEM_SUFFIX;
4306 else if (i.tm.opcode_modifier.size64)
4307 i.suffix = QWORD_MNEM_SUFFIX;
4308 else if (i.reg_operands)
4309 {
4310 /* If there's no instruction mnemonic suffix we try to invent one
4311 based on register operands. */
4312 if (!i.suffix)
4313 {
4314 /* We take i.suffix from the last register operand specified,
4315 Destination register type is more significant than source
4316 register type. crc32 in SSE4.2 prefers source register
4317 type. */
4318 if (i.tm.base_opcode == 0xf20f38f1)
4319 {
4320 if (i.types[0].bitfield.reg16)
4321 i.suffix = WORD_MNEM_SUFFIX;
4322 else if (i.types[0].bitfield.reg32)
4323 i.suffix = LONG_MNEM_SUFFIX;
4324 else if (i.types[0].bitfield.reg64)
4325 i.suffix = QWORD_MNEM_SUFFIX;
4326 }
4327 else if (i.tm.base_opcode == 0xf20f38f0)
4328 {
4329 if (i.types[0].bitfield.reg8)
4330 i.suffix = BYTE_MNEM_SUFFIX;
4331 }
4332
4333 if (!i.suffix)
4334 {
4335 int op;
4336
4337 if (i.tm.base_opcode == 0xf20f38f1
4338 || i.tm.base_opcode == 0xf20f38f0)
4339 {
4340 /* We have to know the operand size for crc32. */
4341 as_bad (_("ambiguous memory operand size for `%s`"),
4342 i.tm.name);
4343 return 0;
4344 }
4345
4346 for (op = i.operands; --op >= 0;)
4347 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4348 {
4349 if (i.types[op].bitfield.reg8)
4350 {
4351 i.suffix = BYTE_MNEM_SUFFIX;
4352 break;
4353 }
4354 else if (i.types[op].bitfield.reg16)
4355 {
4356 i.suffix = WORD_MNEM_SUFFIX;
4357 break;
4358 }
4359 else if (i.types[op].bitfield.reg32)
4360 {
4361 i.suffix = LONG_MNEM_SUFFIX;
4362 break;
4363 }
4364 else if (i.types[op].bitfield.reg64)
4365 {
4366 i.suffix = QWORD_MNEM_SUFFIX;
4367 break;
4368 }
4369 }
4370 }
4371 }
4372 else if (i.suffix == BYTE_MNEM_SUFFIX)
4373 {
4374 if (intel_syntax
4375 && i.tm.opcode_modifier.ignoresize
4376 && i.tm.opcode_modifier.no_bsuf)
4377 i.suffix = 0;
4378 else if (!check_byte_reg ())
4379 return 0;
4380 }
4381 else if (i.suffix == LONG_MNEM_SUFFIX)
4382 {
4383 if (intel_syntax
4384 && i.tm.opcode_modifier.ignoresize
4385 && i.tm.opcode_modifier.no_lsuf)
4386 i.suffix = 0;
4387 else if (!check_long_reg ())
4388 return 0;
4389 }
4390 else if (i.suffix == QWORD_MNEM_SUFFIX)
4391 {
4392 if (intel_syntax
4393 && i.tm.opcode_modifier.ignoresize
4394 && i.tm.opcode_modifier.no_qsuf)
4395 i.suffix = 0;
4396 else if (!check_qword_reg ())
4397 return 0;
4398 }
4399 else if (i.suffix == WORD_MNEM_SUFFIX)
4400 {
4401 if (intel_syntax
4402 && i.tm.opcode_modifier.ignoresize
4403 && i.tm.opcode_modifier.no_wsuf)
4404 i.suffix = 0;
4405 else if (!check_word_reg ())
4406 return 0;
4407 }
4408 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4409 || i.suffix == YMMWORD_MNEM_SUFFIX)
4410 {
4411 /* Skip if the instruction has x/y suffix. match_template
4412 should check if it is a valid suffix. */
4413 }
4414 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4415 /* Do nothing if the instruction is going to ignore the prefix. */
4416 ;
4417 else
4418 abort ();
4419 }
4420 else if (i.tm.opcode_modifier.defaultsize
4421 && !i.suffix
4422 /* exclude fldenv/frstor/fsave/fstenv */
4423 && i.tm.opcode_modifier.no_ssuf)
4424 {
4425 i.suffix = stackop_size;
4426 }
4427 else if (intel_syntax
4428 && !i.suffix
4429 && (i.tm.operand_types[0].bitfield.jumpabsolute
4430 || i.tm.opcode_modifier.jumpbyte
4431 || i.tm.opcode_modifier.jumpintersegment
4432 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4433 && i.tm.extension_opcode <= 3)))
4434 {
4435 switch (flag_code)
4436 {
4437 case CODE_64BIT:
4438 if (!i.tm.opcode_modifier.no_qsuf)
4439 {
4440 i.suffix = QWORD_MNEM_SUFFIX;
4441 break;
4442 }
4443 case CODE_32BIT:
4444 if (!i.tm.opcode_modifier.no_lsuf)
4445 i.suffix = LONG_MNEM_SUFFIX;
4446 break;
4447 case CODE_16BIT:
4448 if (!i.tm.opcode_modifier.no_wsuf)
4449 i.suffix = WORD_MNEM_SUFFIX;
4450 break;
4451 }
4452 }
4453
4454 if (!i.suffix)
4455 {
4456 if (!intel_syntax)
4457 {
4458 if (i.tm.opcode_modifier.w)
4459 {
4460 as_bad (_("no instruction mnemonic suffix given and "
4461 "no register operands; can't size instruction"));
4462 return 0;
4463 }
4464 }
4465 else
4466 {
4467 unsigned int suffixes;
4468
4469 suffixes = !i.tm.opcode_modifier.no_bsuf;
4470 if (!i.tm.opcode_modifier.no_wsuf)
4471 suffixes |= 1 << 1;
4472 if (!i.tm.opcode_modifier.no_lsuf)
4473 suffixes |= 1 << 2;
4474 if (!i.tm.opcode_modifier.no_ldsuf)
4475 suffixes |= 1 << 3;
4476 if (!i.tm.opcode_modifier.no_ssuf)
4477 suffixes |= 1 << 4;
4478 if (!i.tm.opcode_modifier.no_qsuf)
4479 suffixes |= 1 << 5;
4480
4481 /* There are more than suffix matches. */
4482 if (i.tm.opcode_modifier.w
4483 || ((suffixes & (suffixes - 1))
4484 && !i.tm.opcode_modifier.defaultsize
4485 && !i.tm.opcode_modifier.ignoresize))
4486 {
4487 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4488 return 0;
4489 }
4490 }
4491 }
4492
4493 /* Change the opcode based on the operand size given by i.suffix;
4494 We don't need to change things for byte insns. */
4495
4496 if (i.suffix
4497 && i.suffix != BYTE_MNEM_SUFFIX
4498 && i.suffix != XMMWORD_MNEM_SUFFIX
4499 && i.suffix != YMMWORD_MNEM_SUFFIX)
4500 {
4501 /* It's not a byte, select word/dword operation. */
4502 if (i.tm.opcode_modifier.w)
4503 {
4504 if (i.tm.opcode_modifier.shortform)
4505 i.tm.base_opcode |= 8;
4506 else
4507 i.tm.base_opcode |= 1;
4508 }
4509
4510 /* Now select between word & dword operations via the operand
4511 size prefix, except for instructions that will ignore this
4512 prefix anyway. */
4513 if (i.tm.opcode_modifier.addrprefixop0)
4514 {
4515 /* The address size override prefix changes the size of the
4516 first operand. */
4517 if ((flag_code == CODE_32BIT
4518 && i.op->regs[0].reg_type.bitfield.reg16)
4519 || (flag_code != CODE_32BIT
4520 && i.op->regs[0].reg_type.bitfield.reg32))
4521 if (!add_prefix (ADDR_PREFIX_OPCODE))
4522 return 0;
4523 }
4524 else if (i.suffix != QWORD_MNEM_SUFFIX
4525 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4526 && !i.tm.opcode_modifier.ignoresize
4527 && !i.tm.opcode_modifier.floatmf
4528 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4529 || (flag_code == CODE_64BIT
4530 && i.tm.opcode_modifier.jumpbyte)))
4531 {
4532 unsigned int prefix = DATA_PREFIX_OPCODE;
4533
4534 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4535 prefix = ADDR_PREFIX_OPCODE;
4536
4537 if (!add_prefix (prefix))
4538 return 0;
4539 }
4540
4541 /* Set mode64 for an operand. */
4542 if (i.suffix == QWORD_MNEM_SUFFIX
4543 && flag_code == CODE_64BIT
4544 && !i.tm.opcode_modifier.norex64)
4545 {
4546 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4547 need rex64. cmpxchg8b is also a special case. */
4548 if (! (i.operands == 2
4549 && i.tm.base_opcode == 0x90
4550 && i.tm.extension_opcode == None
4551 && operand_type_equal (&i.types [0], &acc64)
4552 && operand_type_equal (&i.types [1], &acc64))
4553 && ! (i.operands == 1
4554 && i.tm.base_opcode == 0xfc7
4555 && i.tm.extension_opcode == 1
4556 && !operand_type_check (i.types [0], reg)
4557 && operand_type_check (i.types [0], anymem)))
4558 i.rex |= REX_W;
4559 }
4560
4561 /* Size floating point instruction. */
4562 if (i.suffix == LONG_MNEM_SUFFIX)
4563 if (i.tm.opcode_modifier.floatmf)
4564 i.tm.base_opcode ^= 4;
4565 }
4566
4567 return 1;
4568}
4569
4570static int
4571check_byte_reg (void)
4572{
4573 int op;
4574
4575 for (op = i.operands; --op >= 0;)
4576 {
4577 /* If this is an eight bit register, it's OK. If it's the 16 or
4578 32 bit version of an eight bit register, we will just use the
4579 low portion, and that's OK too. */
4580 if (i.types[op].bitfield.reg8)
4581 continue;
4582
4583 /* crc32 doesn't generate this warning. */
4584 if (i.tm.base_opcode == 0xf20f38f0)
4585 continue;
4586
4587 if ((i.types[op].bitfield.reg16
4588 || i.types[op].bitfield.reg32
4589 || i.types[op].bitfield.reg64)
4590 && i.op[op].regs->reg_num < 4)
4591 {
4592 /* Prohibit these changes in the 64bit mode, since the
4593 lowering is more complicated. */
4594 if (flag_code == CODE_64BIT
4595 && !i.tm.operand_types[op].bitfield.inoutportreg)
4596 {
4597 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4598 register_prefix, i.op[op].regs->reg_name,
4599 i.suffix);
4600 return 0;
4601 }
4602#if REGISTER_WARNINGS
4603 if (!quiet_warnings
4604 && !i.tm.operand_types[op].bitfield.inoutportreg)
4605 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4606 register_prefix,
4607 (i.op[op].regs + (i.types[op].bitfield.reg16
4608 ? REGNAM_AL - REGNAM_AX
4609 : REGNAM_AL - REGNAM_EAX))->reg_name,
4610 register_prefix,
4611 i.op[op].regs->reg_name,
4612 i.suffix);
4613#endif
4614 continue;
4615 }
4616 /* Any other register is bad. */
4617 if (i.types[op].bitfield.reg16
4618 || i.types[op].bitfield.reg32
4619 || i.types[op].bitfield.reg64
4620 || i.types[op].bitfield.regmmx
4621 || i.types[op].bitfield.regxmm
4622 || i.types[op].bitfield.regymm
4623 || i.types[op].bitfield.sreg2
4624 || i.types[op].bitfield.sreg3
4625 || i.types[op].bitfield.control
4626 || i.types[op].bitfield.debug
4627 || i.types[op].bitfield.test
4628 || i.types[op].bitfield.floatreg
4629 || i.types[op].bitfield.floatacc)
4630 {
4631 as_bad (_("`%s%s' not allowed with `%s%c'"),
4632 register_prefix,
4633 i.op[op].regs->reg_name,
4634 i.tm.name,
4635 i.suffix);
4636 return 0;
4637 }
4638 }
4639 return 1;
4640}
4641
4642static int
4643check_long_reg (void)
4644{
4645 int op;
4646
4647 for (op = i.operands; --op >= 0;)
4648 /* Reject eight bit registers, except where the template requires
4649 them. (eg. movzb) */
4650 if (i.types[op].bitfield.reg8
4651 && (i.tm.operand_types[op].bitfield.reg16
4652 || i.tm.operand_types[op].bitfield.reg32
4653 || i.tm.operand_types[op].bitfield.acc))
4654 {
4655 as_bad (_("`%s%s' not allowed with `%s%c'"),
4656 register_prefix,
4657 i.op[op].regs->reg_name,
4658 i.tm.name,
4659 i.suffix);
4660 return 0;
4661 }
4662 /* Warn if the e prefix on a general reg is missing. */
4663 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4664 && i.types[op].bitfield.reg16
4665 && (i.tm.operand_types[op].bitfield.reg32
4666 || i.tm.operand_types[op].bitfield.acc))
4667 {
4668 /* Prohibit these changes in the 64bit mode, since the
4669 lowering is more complicated. */
4670 if (flag_code == CODE_64BIT)
4671 {
4672 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4673 register_prefix, i.op[op].regs->reg_name,
4674 i.suffix);
4675 return 0;
4676 }
4677#if REGISTER_WARNINGS
4678 else
4679 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4680 register_prefix,
4681 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4682 register_prefix,
4683 i.op[op].regs->reg_name,
4684 i.suffix);
4685#endif
4686 }
4687 /* Warn if the r prefix on a general reg is missing. */
4688 else if (i.types[op].bitfield.reg64
4689 && (i.tm.operand_types[op].bitfield.reg32
4690 || i.tm.operand_types[op].bitfield.acc))
4691 {
4692 if (intel_syntax
4693 && i.tm.opcode_modifier.toqword
4694 && !i.types[0].bitfield.regxmm)
4695 {
4696 /* Convert to QWORD. We want REX byte. */
4697 i.suffix = QWORD_MNEM_SUFFIX;
4698 }
4699 else
4700 {
4701 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4702 register_prefix, i.op[op].regs->reg_name,
4703 i.suffix);
4704 return 0;
4705 }
4706 }
4707 return 1;
4708}
4709
4710static int
4711check_qword_reg (void)
4712{
4713 int op;
4714
4715 for (op = i.operands; --op >= 0; )
4716 /* Reject eight bit registers, except where the template requires
4717 them. (eg. movzb) */
4718 if (i.types[op].bitfield.reg8
4719 && (i.tm.operand_types[op].bitfield.reg16
4720 || i.tm.operand_types[op].bitfield.reg32
4721 || i.tm.operand_types[op].bitfield.acc))
4722 {
4723 as_bad (_("`%s%s' not allowed with `%s%c'"),
4724 register_prefix,
4725 i.op[op].regs->reg_name,
4726 i.tm.name,
4727 i.suffix);
4728 return 0;
4729 }
4730 /* Warn if the e prefix on a general reg is missing. */
4731 else if ((i.types[op].bitfield.reg16
4732 || i.types[op].bitfield.reg32)
4733 && (i.tm.operand_types[op].bitfield.reg32
4734 || i.tm.operand_types[op].bitfield.acc))
4735 {
4736 /* Prohibit these changes in the 64bit mode, since the
4737 lowering is more complicated. */
4738 if (intel_syntax
4739 && i.tm.opcode_modifier.todword
4740 && !i.types[0].bitfield.regxmm)
4741 {
4742 /* Convert to DWORD. We don't want REX byte. */
4743 i.suffix = LONG_MNEM_SUFFIX;
4744 }
4745 else
4746 {
4747 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4748 register_prefix, i.op[op].regs->reg_name,
4749 i.suffix);
4750 return 0;
4751 }
4752 }
4753 return 1;
4754}
4755
4756static int
4757check_word_reg (void)
4758{
4759 int op;
4760 for (op = i.operands; --op >= 0;)
4761 /* Reject eight bit registers, except where the template requires
4762 them. (eg. movzb) */
4763 if (i.types[op].bitfield.reg8
4764 && (i.tm.operand_types[op].bitfield.reg16
4765 || i.tm.operand_types[op].bitfield.reg32
4766 || i.tm.operand_types[op].bitfield.acc))
4767 {
4768 as_bad (_("`%s%s' not allowed with `%s%c'"),
4769 register_prefix,
4770 i.op[op].regs->reg_name,
4771 i.tm.name,
4772 i.suffix);
4773 return 0;
4774 }
4775 /* Warn if the e prefix on a general reg is present. */
4776 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4777 && i.types[op].bitfield.reg32
4778 && (i.tm.operand_types[op].bitfield.reg16
4779 || i.tm.operand_types[op].bitfield.acc))
4780 {
4781 /* Prohibit these changes in the 64bit mode, since the
4782 lowering is more complicated. */
4783 if (flag_code == CODE_64BIT)
4784 {
4785 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4786 register_prefix, i.op[op].regs->reg_name,
4787 i.suffix);
4788 return 0;
4789 }
4790 else
4791#if REGISTER_WARNINGS
4792 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4793 register_prefix,
4794 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4795 register_prefix,
4796 i.op[op].regs->reg_name,
4797 i.suffix);
4798#endif
4799 }
4800 return 1;
4801}
4802
4803static int
4804update_imm (unsigned int j)
4805{
4806 i386_operand_type overlap = i.types[j];
4807 if ((overlap.bitfield.imm8
4808 || overlap.bitfield.imm8s
4809 || overlap.bitfield.imm16
4810 || overlap.bitfield.imm32
4811 || overlap.bitfield.imm32s
4812 || overlap.bitfield.imm64)
4813 && !operand_type_equal (&overlap, &imm8)
4814 && !operand_type_equal (&overlap, &imm8s)
4815 && !operand_type_equal (&overlap, &imm16)
4816 && !operand_type_equal (&overlap, &imm32)
4817 && !operand_type_equal (&overlap, &imm32s)
4818 && !operand_type_equal (&overlap, &imm64))
4819 {
4820 if (i.suffix)
4821 {
4822 i386_operand_type temp;
4823
4824 operand_type_set (&temp, 0);
4825 if (i.suffix == BYTE_MNEM_SUFFIX)
4826 {
4827 temp.bitfield.imm8 = overlap.bitfield.imm8;
4828 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4829 }
4830 else if (i.suffix == WORD_MNEM_SUFFIX)
4831 temp.bitfield.imm16 = overlap.bitfield.imm16;
4832 else if (i.suffix == QWORD_MNEM_SUFFIX)
4833 {
4834 temp.bitfield.imm64 = overlap.bitfield.imm64;
4835 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4836 }
4837 else
4838 temp.bitfield.imm32 = overlap.bitfield.imm32;
4839 overlap = temp;
4840 }
4841 else if (operand_type_equal (&overlap, &imm16_32_32s)
4842 || operand_type_equal (&overlap, &imm16_32)
4843 || operand_type_equal (&overlap, &imm16_32s))
4844 {
4845 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4846 overlap = imm16;
4847 else
4848 overlap = imm32s;
4849 }
4850 if (!operand_type_equal (&overlap, &imm8)
4851 && !operand_type_equal (&overlap, &imm8s)
4852 && !operand_type_equal (&overlap, &imm16)
4853 && !operand_type_equal (&overlap, &imm32)
4854 && !operand_type_equal (&overlap, &imm32s)
4855 && !operand_type_equal (&overlap, &imm64))
4856 {
4857 as_bad (_("no instruction mnemonic suffix given; "
4858 "can't determine immediate size"));
4859 return 0;
4860 }
4861 }
4862 i.types[j] = overlap;
4863
4864 return 1;
4865}
4866
4867static int
4868finalize_imm (void)
4869{
4870 unsigned int j, n;
4871
4872 /* Update the first 2 immediate operands. */
4873 n = i.operands > 2 ? 2 : i.operands;
4874 if (n)
4875 {
4876 for (j = 0; j < n; j++)
4877 if (update_imm (j) == 0)
4878 return 0;
4879
4880 /* The 3rd operand can't be immediate operand. */
4881 gas_assert (operand_type_check (i.types[2], imm) == 0);
4882 }
4883
4884 return 1;
4885}
4886
4887static int
4888bad_implicit_operand (int xmm)
4889{
4890 const char *ireg = xmm ? "xmm0" : "ymm0";
4891
4892 if (intel_syntax)
4893 as_bad (_("the last operand of `%s' must be `%s%s'"),
4894 i.tm.name, register_prefix, ireg);
4895 else
4896 as_bad (_("the first operand of `%s' must be `%s%s'"),
4897 i.tm.name, register_prefix, ireg);
4898 return 0;
4899}
4900
4901static int
4902process_operands (void)
4903{
4904 /* Default segment register this instruction will use for memory
4905 accesses. 0 means unknown. This is only for optimizing out
4906 unnecessary segment overrides. */
4907 const seg_entry *default_seg = 0;
4908
4909 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4910 {
4911 unsigned int dupl = i.operands;
4912 unsigned int dest = dupl - 1;
4913 unsigned int j;
4914
4915 /* The destination must be an xmm register. */
4916 gas_assert (i.reg_operands
4917 && MAX_OPERANDS > dupl
4918 && operand_type_equal (&i.types[dest], &regxmm));
4919
4920 if (i.tm.opcode_modifier.firstxmm0)
4921 {
4922 /* The first operand is implicit and must be xmm0. */
4923 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4924 if (i.op[0].regs->reg_num != 0)
4925 return bad_implicit_operand (1);
4926
4927 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4928 {
4929 /* Keep xmm0 for instructions with VEX prefix and 3
4930 sources. */
4931 goto duplicate;
4932 }
4933 else
4934 {
4935 /* We remove the first xmm0 and keep the number of
4936 operands unchanged, which in fact duplicates the
4937 destination. */
4938 for (j = 1; j < i.operands; j++)
4939 {
4940 i.op[j - 1] = i.op[j];
4941 i.types[j - 1] = i.types[j];
4942 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4943 }
4944 }
4945 }
4946 else if (i.tm.opcode_modifier.implicit1stxmm0)
4947 {
4948 gas_assert ((MAX_OPERANDS - 1) > dupl
4949 && (i.tm.opcode_modifier.vexsources
4950 == VEX3SOURCES));
4951
4952 /* Add the implicit xmm0 for instructions with VEX prefix
4953 and 3 sources. */
4954 for (j = i.operands; j > 0; j--)
4955 {
4956 i.op[j] = i.op[j - 1];
4957 i.types[j] = i.types[j - 1];
4958 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4959 }
4960 i.op[0].regs
4961 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4962 i.types[0] = regxmm;
4963 i.tm.operand_types[0] = regxmm;
4964
4965 i.operands += 2;
4966 i.reg_operands += 2;
4967 i.tm.operands += 2;
4968
4969 dupl++;
4970 dest++;
4971 i.op[dupl] = i.op[dest];
4972 i.types[dupl] = i.types[dest];
4973 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4974 }
4975 else
4976 {
4977duplicate:
4978 i.operands++;
4979 i.reg_operands++;
4980 i.tm.operands++;
4981
4982 i.op[dupl] = i.op[dest];
4983 i.types[dupl] = i.types[dest];
4984 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4985 }
4986
4987 if (i.tm.opcode_modifier.immext)
4988 process_immext ();
4989 }
4990 else if (i.tm.opcode_modifier.firstxmm0)
4991 {
4992 unsigned int j;
4993
4994 /* The first operand is implicit and must be xmm0/ymm0. */
4995 gas_assert (i.reg_operands
4996 && (operand_type_equal (&i.types[0], &regxmm)
4997 || operand_type_equal (&i.types[0], &regymm)));
4998 if (i.op[0].regs->reg_num != 0)
4999 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5000
5001 for (j = 1; j < i.operands; j++)
5002 {
5003 i.op[j - 1] = i.op[j];
5004 i.types[j - 1] = i.types[j];
5005
5006 /* We need to adjust fields in i.tm since they are used by
5007 build_modrm_byte. */
5008 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5009 }
5010
5011 i.operands--;
5012 i.reg_operands--;
5013 i.tm.operands--;
5014 }
5015 else if (i.tm.opcode_modifier.regkludge)
5016 {
5017 /* The imul $imm, %reg instruction is converted into
5018 imul $imm, %reg, %reg, and the clr %reg instruction
5019 is converted into xor %reg, %reg. */
5020
5021 unsigned int first_reg_op;
5022
5023 if (operand_type_check (i.types[0], reg))
5024 first_reg_op = 0;
5025 else
5026 first_reg_op = 1;
5027 /* Pretend we saw the extra register operand. */
5028 gas_assert (i.reg_operands == 1
5029 && i.op[first_reg_op + 1].regs == 0);
5030 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5031 i.types[first_reg_op + 1] = i.types[first_reg_op];
5032 i.operands++;
5033 i.reg_operands++;
5034 }
5035
5036 if (i.tm.opcode_modifier.shortform)
5037 {
5038 if (i.types[0].bitfield.sreg2
5039 || i.types[0].bitfield.sreg3)
5040 {
5041 if (i.tm.base_opcode == POP_SEG_SHORT
5042 && i.op[0].regs->reg_num == 1)
5043 {
5044 as_bad (_("you can't `pop %scs'"), register_prefix);
5045 return 0;
5046 }
5047 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5048 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5049 i.rex |= REX_B;
5050 }
5051 else
5052 {
5053 /* The register or float register operand is in operand
5054 0 or 1. */
5055 unsigned int op;
5056
5057 if (i.types[0].bitfield.floatreg
5058 || operand_type_check (i.types[0], reg))
5059 op = 0;
5060 else
5061 op = 1;
5062 /* Register goes in low 3 bits of opcode. */
5063 i.tm.base_opcode |= i.op[op].regs->reg_num;
5064 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5065 i.rex |= REX_B;
5066 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5067 {
5068 /* Warn about some common errors, but press on regardless.
5069 The first case can be generated by gcc (<= 2.8.1). */
5070 if (i.operands == 2)
5071 {
5072 /* Reversed arguments on faddp, fsubp, etc. */
5073 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5074 register_prefix, i.op[!intel_syntax].regs->reg_name,
5075 register_prefix, i.op[intel_syntax].regs->reg_name);
5076 }
5077 else
5078 {
5079 /* Extraneous `l' suffix on fp insn. */
5080 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5081 register_prefix, i.op[0].regs->reg_name);
5082 }
5083 }
5084 }
5085 }
5086 else if (i.tm.opcode_modifier.modrm)
5087 {
5088 /* The opcode is completed (modulo i.tm.extension_opcode which
5089 must be put into the modrm byte). Now, we make the modrm and
5090 index base bytes based on all the info we've collected. */
5091
5092 default_seg = build_modrm_byte ();
5093 }
5094 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5095 {
5096 default_seg = &ds;
5097 }
5098 else if (i.tm.opcode_modifier.isstring)
5099 {
5100 /* For the string instructions that allow a segment override
5101 on one of their operands, the default segment is ds. */
5102 default_seg = &ds;
5103 }
5104
5105 if (i.tm.base_opcode == 0x8d /* lea */
5106 && i.seg[0]
5107 && !quiet_warnings)
5108 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5109
5110 /* If a segment was explicitly specified, and the specified segment
5111 is not the default, use an opcode prefix to select it. If we
5112 never figured out what the default segment is, then default_seg
5113 will be zero at this point, and the specified segment prefix will
5114 always be used. */
5115 if ((i.seg[0]) && (i.seg[0] != default_seg))
5116 {
5117 if (!add_prefix (i.seg[0]->seg_prefix))
5118 return 0;
5119 }
5120 return 1;
5121}
5122
5123static const seg_entry *
5124build_modrm_byte (void)
5125{
5126 const seg_entry *default_seg = 0;
5127 unsigned int source, dest;
5128 int vex_3_sources;
5129
5130 /* The first operand of instructions with VEX prefix and 3 sources
5131 must be VEX_Imm4. */
5132 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5133 if (vex_3_sources)
5134 {
5135 unsigned int nds, reg_slot;
5136 expressionS *exp;
5137
5138 if (i.tm.opcode_modifier.veximmext
5139 && i.tm.opcode_modifier.immext)
5140 {
5141 dest = i.operands - 2;
5142 gas_assert (dest == 3);
5143 }
5144 else
5145 dest = i.operands - 1;
5146 nds = dest - 1;
5147
5148 /* There are 2 kinds of instructions:
5149 1. 5 operands: 4 register operands or 3 register operands
5150 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5151 VexW0 or VexW1. The destination must be either XMM or YMM
5152 register.
5153 2. 4 operands: 4 register operands or 3 register operands
5154 plus 1 memory operand, VexXDS, and VexImmExt */
5155 gas_assert ((i.reg_operands == 4
5156 || (i.reg_operands == 3 && i.mem_operands == 1))
5157 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5158 && (i.tm.opcode_modifier.veximmext
5159 || (i.imm_operands == 1
5160 && i.types[0].bitfield.vec_imm4
5161 && (i.tm.opcode_modifier.vexw == VEXW0
5162 || i.tm.opcode_modifier.vexw == VEXW1)
5163 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5164 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5165
5166 if (i.imm_operands == 0)
5167 {
5168 /* When there is no immediate operand, generate an 8bit
5169 immediate operand to encode the first operand. */
5170 exp = &im_expressions[i.imm_operands++];
5171 i.op[i.operands].imms = exp;
5172 i.types[i.operands] = imm8;
5173 i.operands++;
5174 /* If VexW1 is set, the first operand is the source and
5175 the second operand is encoded in the immediate operand. */
5176 if (i.tm.opcode_modifier.vexw == VEXW1)
5177 {
5178 source = 0;
5179 reg_slot = 1;
5180 }
5181 else
5182 {
5183 source = 1;
5184 reg_slot = 0;
5185 }
5186
5187 /* FMA swaps REG and NDS. */
5188 if (i.tm.cpu_flags.bitfield.cpufma)
5189 {
5190 unsigned int tmp;
5191 tmp = reg_slot;
5192 reg_slot = nds;
5193 nds = tmp;
5194 }
5195
5196 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5197 &regxmm)
5198 || operand_type_equal (&i.tm.operand_types[reg_slot],
5199 &regymm));
5200 exp->X_op = O_constant;
5201 exp->X_add_number
5202 = ((i.op[reg_slot].regs->reg_num
5203 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5204 << 4);
5205 }
5206 else
5207 {
5208 unsigned int imm_slot;
5209
5210 if (i.tm.opcode_modifier.vexw == VEXW0)
5211 {
5212 /* If VexW0 is set, the third operand is the source and
5213 the second operand is encoded in the immediate
5214 operand. */
5215 source = 2;
5216 reg_slot = 1;
5217 }
5218 else
5219 {
5220 /* VexW1 is set, the second operand is the source and
5221 the third operand is encoded in the immediate
5222 operand. */
5223 source = 1;
5224 reg_slot = 2;
5225 }
5226
5227 if (i.tm.opcode_modifier.immext)
5228 {
5229 /* When ImmExt is set, the immdiate byte is the last
5230 operand. */
5231 imm_slot = i.operands - 1;
5232 source--;
5233 reg_slot--;
5234 }
5235 else
5236 {
5237 imm_slot = 0;
5238
5239 /* Turn on Imm8 so that output_imm will generate it. */
5240 i.types[imm_slot].bitfield.imm8 = 1;
5241 }
5242
5243 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5244 &regxmm)
5245 || operand_type_equal (&i.tm.operand_types[reg_slot],
5246 &regymm));
5247 i.op[imm_slot].imms->X_add_number
5248 |= ((i.op[reg_slot].regs->reg_num
5249 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5250 << 4);
5251 }
5252
5253 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5254 || operand_type_equal (&i.tm.operand_types[nds],
5255 &regymm));
5256 i.vex.register_specifier = i.op[nds].regs;
5257 }
5258 else
5259 source = dest = 0;
5260
5261 /* i.reg_operands MUST be the number of real register operands;
5262 implicit registers do not count. If there are 3 register
5263 operands, it must be a instruction with VexNDS. For a
5264 instruction with VexNDD, the destination register is encoded
5265 in VEX prefix. If there are 4 register operands, it must be
5266 a instruction with VEX prefix and 3 sources. */
5267 if (i.mem_operands == 0
5268 && ((i.reg_operands == 2
5269 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5270 || (i.reg_operands == 3
5271 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5272 || (i.reg_operands == 4 && vex_3_sources)))
5273 {
5274 switch (i.operands)
5275 {
5276 case 2:
5277 source = 0;
5278 break;
5279 case 3:
5280 /* When there are 3 operands, one of them may be immediate,
5281 which may be the first or the last operand. Otherwise,
5282 the first operand must be shift count register (cl) or it
5283 is an instruction with VexNDS. */
5284 gas_assert (i.imm_operands == 1
5285 || (i.imm_operands == 0
5286 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5287 || i.types[0].bitfield.shiftcount)));
5288 if (operand_type_check (i.types[0], imm)
5289 || i.types[0].bitfield.shiftcount)
5290 source = 1;
5291 else
5292 source = 0;
5293 break;
5294 case 4:
5295 /* When there are 4 operands, the first two must be 8bit
5296 immediate operands. The source operand will be the 3rd
5297 one.
5298
5299 For instructions with VexNDS, if the first operand
5300 an imm8, the source operand is the 2nd one. If the last
5301 operand is imm8, the source operand is the first one. */
5302 gas_assert ((i.imm_operands == 2
5303 && i.types[0].bitfield.imm8
5304 && i.types[1].bitfield.imm8)
5305 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5306 && i.imm_operands == 1
5307 && (i.types[0].bitfield.imm8
5308 || i.types[i.operands - 1].bitfield.imm8)));
5309 if (i.imm_operands == 2)
5310 source = 2;
5311 else
5312 {
5313 if (i.types[0].bitfield.imm8)
5314 source = 1;
5315 else
5316 source = 0;
5317 }
5318 break;
5319 case 5:
5320 break;
5321 default:
5322 abort ();
5323 }
5324
5325 if (!vex_3_sources)
5326 {
5327 dest = source + 1;
5328
5329 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5330 {
5331 /* For instructions with VexNDS, the register-only
5332 source operand must be 32/64bit integer, XMM or
5333 YMM register. It is encoded in VEX prefix. We
5334 need to clear RegMem bit before calling
5335 operand_type_equal. */
5336
5337 i386_operand_type op;
5338 unsigned int vvvv;
5339
5340 /* Check register-only source operand when two source
5341 operands are swapped. */
5342 if (!i.tm.operand_types[source].bitfield.baseindex
5343 && i.tm.operand_types[dest].bitfield.baseindex)
5344 {
5345 vvvv = source;
5346 source = dest;
5347 }
5348 else
5349 vvvv = dest;
5350
5351 op = i.tm.operand_types[vvvv];
5352 op.bitfield.regmem = 0;
5353 if ((dest + 1) >= i.operands
5354 || (op.bitfield.reg32 != 1
5355 && !op.bitfield.reg64 != 1
5356 && !operand_type_equal (&op, &regxmm)
5357 && !operand_type_equal (&op, &regymm)))
5358 abort ();
5359 i.vex.register_specifier = i.op[vvvv].regs;
5360 dest++;
5361 }
5362 }
5363
5364 i.rm.mode = 3;
5365 /* One of the register operands will be encoded in the i.tm.reg
5366 field, the other in the combined i.tm.mode and i.tm.regmem
5367 fields. If no form of this instruction supports a memory
5368 destination operand, then we assume the source operand may
5369 sometimes be a memory operand and so we need to store the
5370 destination in the i.rm.reg field. */
5371 if (!i.tm.operand_types[dest].bitfield.regmem
5372 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5373 {
5374 i.rm.reg = i.op[dest].regs->reg_num;
5375 i.rm.regmem = i.op[source].regs->reg_num;
5376 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5377 i.rex |= REX_R;
5378 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5379 i.rex |= REX_B;
5380 }
5381 else
5382 {
5383 i.rm.reg = i.op[source].regs->reg_num;
5384 i.rm.regmem = i.op[dest].regs->reg_num;
5385 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5386 i.rex |= REX_B;
5387 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5388 i.rex |= REX_R;
5389 }
5390 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5391 {
5392 if (!i.types[0].bitfield.control
5393 && !i.types[1].bitfield.control)
5394 abort ();
5395 i.rex &= ~(REX_R | REX_B);
5396 add_prefix (LOCK_PREFIX_OPCODE);
5397 }
5398 }
5399 else
5400 { /* If it's not 2 reg operands... */
5401 unsigned int mem;
5402
5403 if (i.mem_operands)
5404 {
5405 unsigned int fake_zero_displacement = 0;
5406 unsigned int op;
5407
5408 for (op = 0; op < i.operands; op++)
5409 if (operand_type_check (i.types[op], anymem))
5410 break;
5411 gas_assert (op < i.operands);
5412
5413 default_seg = &ds;
5414
5415 if (i.base_reg == 0)
5416 {
5417 i.rm.mode = 0;
5418 if (!i.disp_operands)
5419 fake_zero_displacement = 1;
5420 if (i.index_reg == 0)
5421 {
5422 /* Operand is just <disp> */
5423 if (flag_code == CODE_64BIT)
5424 {
5425 /* 64bit mode overwrites the 32bit absolute
5426 addressing by RIP relative addressing and
5427 absolute addressing is encoded by one of the
5428 redundant SIB forms. */
5429 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5430 i.sib.base = NO_BASE_REGISTER;
5431 i.sib.index = NO_INDEX_REGISTER;
5432 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5433 ? disp32s : disp32);
5434 }
5435 else if ((flag_code == CODE_16BIT)
5436 ^ (i.prefix[ADDR_PREFIX] != 0))
5437 {
5438 i.rm.regmem = NO_BASE_REGISTER_16;
5439 i.types[op] = disp16;
5440 }
5441 else
5442 {
5443 i.rm.regmem = NO_BASE_REGISTER;
5444 i.types[op] = disp32;
5445 }
5446 }
5447 else /* !i.base_reg && i.index_reg */
5448 {
5449 if (i.index_reg->reg_num == RegEiz
5450 || i.index_reg->reg_num == RegRiz)
5451 i.sib.index = NO_INDEX_REGISTER;
5452 else
5453 i.sib.index = i.index_reg->reg_num;
5454 i.sib.base = NO_BASE_REGISTER;
5455 i.sib.scale = i.log2_scale_factor;
5456 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5457 i.types[op].bitfield.disp8 = 0;
5458 i.types[op].bitfield.disp16 = 0;
5459 i.types[op].bitfield.disp64 = 0;
5460 if (flag_code != CODE_64BIT)
5461 {
5462 /* Must be 32 bit */
5463 i.types[op].bitfield.disp32 = 1;
5464 i.types[op].bitfield.disp32s = 0;
5465 }
5466 else
5467 {
5468 i.types[op].bitfield.disp32 = 0;
5469 i.types[op].bitfield.disp32s = 1;
5470 }
5471 if ((i.index_reg->reg_flags & RegRex) != 0)
5472 i.rex |= REX_X;
5473 }
5474 }
5475 /* RIP addressing for 64bit mode. */
5476 else if (i.base_reg->reg_num == RegRip ||
5477 i.base_reg->reg_num == RegEip)
5478 {
5479 i.rm.regmem = NO_BASE_REGISTER;
5480 i.types[op].bitfield.disp8 = 0;
5481 i.types[op].bitfield.disp16 = 0;
5482 i.types[op].bitfield.disp32 = 0;
5483 i.types[op].bitfield.disp32s = 1;
5484 i.types[op].bitfield.disp64 = 0;
5485 i.flags[op] |= Operand_PCrel;
5486 if (! i.disp_operands)
5487 fake_zero_displacement = 1;
5488 }
5489 else if (i.base_reg->reg_type.bitfield.reg16)
5490 {
5491 switch (i.base_reg->reg_num)
5492 {
5493 case 3: /* (%bx) */
5494 if (i.index_reg == 0)
5495 i.rm.regmem = 7;
5496 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5497 i.rm.regmem = i.index_reg->reg_num - 6;
5498 break;
5499 case 5: /* (%bp) */
5500 default_seg = &ss;
5501 if (i.index_reg == 0)
5502 {
5503 i.rm.regmem = 6;
5504 if (operand_type_check (i.types[op], disp) == 0)
5505 {
5506 /* fake (%bp) into 0(%bp) */
5507 i.types[op].bitfield.disp8 = 1;
5508 fake_zero_displacement = 1;
5509 }
5510 }
5511 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5512 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5513 break;
5514 default: /* (%si) -> 4 or (%di) -> 5 */
5515 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5516 }
5517 i.rm.mode = mode_from_disp_size (i.types[op]);
5518 }
5519 else /* i.base_reg and 32/64 bit mode */
5520 {
5521 if (flag_code == CODE_64BIT
5522 && operand_type_check (i.types[op], disp))
5523 {
5524 i386_operand_type temp;
5525 operand_type_set (&temp, 0);
5526 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5527 i.types[op] = temp;
5528 if (i.prefix[ADDR_PREFIX] == 0)
5529 i.types[op].bitfield.disp32s = 1;
5530 else
5531 i.types[op].bitfield.disp32 = 1;
5532 }
5533
5534 i.rm.regmem = i.base_reg->reg_num;
5535 if ((i.base_reg->reg_flags & RegRex) != 0)
5536 i.rex |= REX_B;
5537 i.sib.base = i.base_reg->reg_num;
5538 /* x86-64 ignores REX prefix bit here to avoid decoder
5539 complications. */
5540 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5541 {
5542 default_seg = &ss;
5543 if (i.disp_operands == 0)
5544 {
5545 fake_zero_displacement = 1;
5546 i.types[op].bitfield.disp8 = 1;
5547 }
5548 }
5549 else if (i.base_reg->reg_num == ESP_REG_NUM)
5550 {
5551 default_seg = &ss;
5552 }
5553 i.sib.scale = i.log2_scale_factor;
5554 if (i.index_reg == 0)
5555 {
5556 /* <disp>(%esp) becomes two byte modrm with no index
5557 register. We've already stored the code for esp
5558 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5559 Any base register besides %esp will not use the
5560 extra modrm byte. */
5561 i.sib.index = NO_INDEX_REGISTER;
5562 }
5563 else
5564 {
5565 if (i.index_reg->reg_num == RegEiz
5566 || i.index_reg->reg_num == RegRiz)
5567 i.sib.index = NO_INDEX_REGISTER;
5568 else
5569 i.sib.index = i.index_reg->reg_num;
5570 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5571 if ((i.index_reg->reg_flags & RegRex) != 0)
5572 i.rex |= REX_X;
5573 }
5574
5575 if (i.disp_operands
5576 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5577 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5578 i.rm.mode = 0;
5579 else
5580 i.rm.mode = mode_from_disp_size (i.types[op]);
5581 }
5582
5583 if (fake_zero_displacement)
5584 {
5585 /* Fakes a zero displacement assuming that i.types[op]
5586 holds the correct displacement size. */
5587 expressionS *exp;
5588
5589 gas_assert (i.op[op].disps == 0);
5590 exp = &disp_expressions[i.disp_operands++];
5591 i.op[op].disps = exp;
5592 exp->X_op = O_constant;
5593 exp->X_add_number = 0;
5594 exp->X_add_symbol = (symbolS *) 0;
5595 exp->X_op_symbol = (symbolS *) 0;
5596 }
5597
5598 mem = op;
5599 }
5600 else
5601 mem = ~0;
5602
5603 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5604 {
5605 if (operand_type_check (i.types[0], imm))
5606 i.vex.register_specifier = NULL;
5607 else
5608 {
5609 /* VEX.vvvv encodes one of the sources when the first
5610 operand is not an immediate. */
5611 if (i.tm.opcode_modifier.vexw == VEXW0)
5612 i.vex.register_specifier = i.op[0].regs;
5613 else
5614 i.vex.register_specifier = i.op[1].regs;
5615 }
5616
5617 /* Destination is a XMM register encoded in the ModRM.reg
5618 and VEX.R bit. */
5619 i.rm.reg = i.op[2].regs->reg_num;
5620 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5621 i.rex |= REX_R;
5622
5623 /* ModRM.rm and VEX.B encodes the other source. */
5624 if (!i.mem_operands)
5625 {
5626 i.rm.mode = 3;
5627
5628 if (i.tm.opcode_modifier.vexw == VEXW0)
5629 i.rm.regmem = i.op[1].regs->reg_num;
5630 else
5631 i.rm.regmem = i.op[0].regs->reg_num;
5632
5633 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5634 i.rex |= REX_B;
5635 }
5636 }
5637 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5638 {
5639 i.vex.register_specifier = i.op[2].regs;
5640 if (!i.mem_operands)
5641 {
5642 i.rm.mode = 3;
5643 i.rm.regmem = i.op[1].regs->reg_num;
5644 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5645 i.rex |= REX_B;
5646 }
5647 }
5648 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5649 (if any) based on i.tm.extension_opcode. Again, we must be
5650 careful to make sure that segment/control/debug/test/MMX
5651 registers are coded into the i.rm.reg field. */
5652 else if (i.reg_operands)
5653 {
5654 unsigned int op;
5655 unsigned int vex_reg = ~0;
5656
5657 for (op = 0; op < i.operands; op++)
5658 if (i.types[op].bitfield.reg8
5659 || i.types[op].bitfield.reg16
5660 || i.types[op].bitfield.reg32
5661 || i.types[op].bitfield.reg64
5662 || i.types[op].bitfield.regmmx
5663 || i.types[op].bitfield.regxmm
5664 || i.types[op].bitfield.regymm
5665 || i.types[op].bitfield.sreg2
5666 || i.types[op].bitfield.sreg3
5667 || i.types[op].bitfield.control
5668 || i.types[op].bitfield.debug
5669 || i.types[op].bitfield.test)
5670 break;
5671
5672 if (vex_3_sources)
5673 op = dest;
5674 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5675 {
5676 /* For instructions with VexNDS, the register-only
5677 source operand is encoded in VEX prefix. */
5678 gas_assert (mem != (unsigned int) ~0);
5679
5680 if (op > mem)
5681 {
5682 vex_reg = op++;
5683 gas_assert (op < i.operands);
5684 }
5685 else
5686 {
5687 /* Check register-only source operand when two source
5688 operands are swapped. */
5689 if (!i.tm.operand_types[op].bitfield.baseindex
5690 && i.tm.operand_types[op + 1].bitfield.baseindex)
5691 {
5692 vex_reg = op;
5693 op += 2;
5694 gas_assert (mem == (vex_reg + 1)
5695 && op < i.operands);
5696 }
5697 else
5698 {
5699 vex_reg = op + 1;
5700 gas_assert (vex_reg < i.operands);
5701 }
5702 }
5703 }
5704 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5705 {
5706 /* For instructions with VexNDD, the register destination
5707 is encoded in VEX prefix. */
5708 if (i.mem_operands == 0)
5709 {
5710 /* There is no memory operand. */
5711 gas_assert ((op + 2) == i.operands);
5712 vex_reg = op + 1;
5713 }
5714 else
5715 {
5716 /* There are only 2 operands. */
5717 gas_assert (op < 2 && i.operands == 2);
5718 vex_reg = 1;
5719 }
5720 }
5721 else
5722 gas_assert (op < i.operands);
5723
5724 if (vex_reg != (unsigned int) ~0)
5725 {
5726 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5727
5728 if (type->bitfield.reg32 != 1
5729 && type->bitfield.reg64 != 1
5730 && !operand_type_equal (type, &regxmm)
5731 && !operand_type_equal (type, &regymm))
5732 abort ();
5733
5734 i.vex.register_specifier = i.op[vex_reg].regs;
5735 }
5736
5737 /* Don't set OP operand twice. */
5738 if (vex_reg != op)
5739 {
5740 /* If there is an extension opcode to put here, the
5741 register number must be put into the regmem field. */
5742 if (i.tm.extension_opcode != None)
5743 {
5744 i.rm.regmem = i.op[op].regs->reg_num;
5745 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5746 i.rex |= REX_B;
5747 }
5748 else
5749 {
5750 i.rm.reg = i.op[op].regs->reg_num;
5751 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5752 i.rex |= REX_R;
5753 }
5754 }
5755
5756 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5757 must set it to 3 to indicate this is a register operand
5758 in the regmem field. */
5759 if (!i.mem_operands)
5760 i.rm.mode = 3;
5761 }
5762
5763 /* Fill in i.rm.reg field with extension opcode (if any). */
5764 if (i.tm.extension_opcode != None)
5765 i.rm.reg = i.tm.extension_opcode;
5766 }
5767 return default_seg;
5768}
5769
5770static void
5771output_branch (void)
5772{
5773 char *p;
5774 int size;
5775 int code16;
5776 int prefix;
5777 relax_substateT subtype;
5778 symbolS *sym;
5779 offsetT off;
5780
5781 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5782 size = i.disp32_encoding ? BIG : SMALL;
5783
5784 prefix = 0;
5785 if (i.prefix[DATA_PREFIX] != 0)
5786 {
5787 prefix = 1;
5788 i.prefixes -= 1;
5789 code16 ^= CODE16;
5790 }
5791 /* Pentium4 branch hints. */
5792 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5793 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5794 {
5795 prefix++;
5796 i.prefixes--;
5797 }
5798 if (i.prefix[REX_PREFIX] != 0)
5799 {
5800 prefix++;
5801 i.prefixes--;
5802 }
5803
5804 if (i.prefixes != 0 && !intel_syntax)
5805 as_warn (_("skipping prefixes on this instruction"));
5806
5807 /* It's always a symbol; End frag & setup for relax.
5808 Make sure there is enough room in this frag for the largest
5809 instruction we may generate in md_convert_frag. This is 2
5810 bytes for the opcode and room for the prefix and largest
5811 displacement. */
5812 frag_grow (prefix + 2 + 4);
5813 /* Prefix and 1 opcode byte go in fr_fix. */
5814 p = frag_more (prefix + 1);
5815 if (i.prefix[DATA_PREFIX] != 0)
5816 *p++ = DATA_PREFIX_OPCODE;
5817 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5818 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5819 *p++ = i.prefix[SEG_PREFIX];
5820 if (i.prefix[REX_PREFIX] != 0)
5821 *p++ = i.prefix[REX_PREFIX];
5822 *p = i.tm.base_opcode;
5823
5824 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5825 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5826 else if (cpu_arch_flags.bitfield.cpui386)
5827 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5828 else
5829 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5830 subtype |= code16;
5831
5832 sym = i.op[0].disps->X_add_symbol;
5833 off = i.op[0].disps->X_add_number;
5834
5835 if (i.op[0].disps->X_op != O_constant
5836 && i.op[0].disps->X_op != O_symbol)
5837 {
5838 /* Handle complex expressions. */
5839 sym = make_expr_symbol (i.op[0].disps);
5840 off = 0;
5841 }
5842
5843 /* 1 possible extra opcode + 4 byte displacement go in var part.
5844 Pass reloc in fr_var. */
5845 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5846}
5847
5848static void
5849output_jump (void)
5850{
5851 char *p;
5852 int size;
5853 fixS *fixP;
5854
5855 if (i.tm.opcode_modifier.jumpbyte)
5856 {
5857 /* This is a loop or jecxz type instruction. */
5858 size = 1;
5859 if (i.prefix[ADDR_PREFIX] != 0)
5860 {
5861 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5862 i.prefixes -= 1;
5863 }
5864 /* Pentium4 branch hints. */
5865 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5866 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5867 {
5868 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5869 i.prefixes--;
5870 }
5871 }
5872 else
5873 {
5874 int code16;
5875
5876 code16 = 0;
5877 if (flag_code == CODE_16BIT)
5878 code16 = CODE16;
5879
5880 if (i.prefix[DATA_PREFIX] != 0)
5881 {
5882 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5883 i.prefixes -= 1;
5884 code16 ^= CODE16;
5885 }
5886
5887 size = 4;
5888 if (code16)
5889 size = 2;
5890 }
5891
5892 if (i.prefix[REX_PREFIX] != 0)
5893 {
5894 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5895 i.prefixes -= 1;
5896 }
5897
5898 if (i.prefixes != 0 && !intel_syntax)
5899 as_warn (_("skipping prefixes on this instruction"));
5900
5901 p = frag_more (1 + size);
5902 *p++ = i.tm.base_opcode;
5903
5904 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5905 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5906
5907 /* All jumps handled here are signed, but don't use a signed limit
5908 check for 32 and 16 bit jumps as we want to allow wrap around at
5909 4G and 64k respectively. */
5910 if (size == 1)
5911 fixP->fx_signed = 1;
5912}
5913
5914static void
5915output_interseg_jump (void)
5916{
5917 char *p;
5918 int size;
5919 int prefix;
5920 int code16;
5921
5922 code16 = 0;
5923 if (flag_code == CODE_16BIT)
5924 code16 = CODE16;
5925
5926 prefix = 0;
5927 if (i.prefix[DATA_PREFIX] != 0)
5928 {
5929 prefix = 1;
5930 i.prefixes -= 1;
5931 code16 ^= CODE16;
5932 }
5933 if (i.prefix[REX_PREFIX] != 0)
5934 {
5935 prefix++;
5936 i.prefixes -= 1;
5937 }
5938
5939 size = 4;
5940 if (code16)
5941 size = 2;
5942
5943 if (i.prefixes != 0 && !intel_syntax)
5944 as_warn (_("skipping prefixes on this instruction"));
5945
5946 /* 1 opcode; 2 segment; offset */
5947 p = frag_more (prefix + 1 + 2 + size);
5948
5949 if (i.prefix[DATA_PREFIX] != 0)
5950 *p++ = DATA_PREFIX_OPCODE;
5951
5952 if (i.prefix[REX_PREFIX] != 0)
5953 *p++ = i.prefix[REX_PREFIX];
5954
5955 *p++ = i.tm.base_opcode;
5956 if (i.op[1].imms->X_op == O_constant)
5957 {
5958 offsetT n = i.op[1].imms->X_add_number;
5959
5960 if (size == 2
5961 && !fits_in_unsigned_word (n)
5962 && !fits_in_signed_word (n))
5963 {
5964 as_bad (_("16-bit jump out of range"));
5965 return;
5966 }
5967 md_number_to_chars (p, n, size);
5968 }
5969 else
5970 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5971 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5972 if (i.op[0].imms->X_op != O_constant)
5973 as_bad (_("can't handle non absolute segment in `%s'"),
5974 i.tm.name);
5975 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5976}
5977
5978static void
5979output_insn (void)
5980{
5981 fragS *insn_start_frag;
5982 offsetT insn_start_off;
5983
5984 /* Tie dwarf2 debug info to the address at the start of the insn.
5985 We can't do this after the insn has been output as the current
5986 frag may have been closed off. eg. by frag_var. */
5987 dwarf2_emit_insn (0);
5988
5989 insn_start_frag = frag_now;
5990 insn_start_off = frag_now_fix ();
5991
5992 /* Output jumps. */
5993 if (i.tm.opcode_modifier.jump)
5994 output_branch ();
5995 else if (i.tm.opcode_modifier.jumpbyte
5996 || i.tm.opcode_modifier.jumpdword)
5997 output_jump ();
5998 else if (i.tm.opcode_modifier.jumpintersegment)
5999 output_interseg_jump ();
6000 else
6001 {
6002 /* Output normal instructions here. */
6003 char *p;
6004 unsigned char *q;
6005 unsigned int j;
6006 unsigned int prefix;
6007
6008 /* Since the VEX prefix contains the implicit prefix, we don't
6009 need the explicit prefix. */
6010 if (!i.tm.opcode_modifier.vex)
6011 {
6012 switch (i.tm.opcode_length)
6013 {
6014 case 3:
6015 if (i.tm.base_opcode & 0xff000000)
6016 {
6017 prefix = (i.tm.base_opcode >> 24) & 0xff;
6018 goto check_prefix;
6019 }
6020 break;
6021 case 2:
6022 if ((i.tm.base_opcode & 0xff0000) != 0)
6023 {
6024 prefix = (i.tm.base_opcode >> 16) & 0xff;
6025 if (i.tm.cpu_flags.bitfield.cpupadlock)
6026 {
6027check_prefix:
6028 if (prefix != REPE_PREFIX_OPCODE
6029 || (i.prefix[REP_PREFIX]
6030 != REPE_PREFIX_OPCODE))
6031 add_prefix (prefix);
6032 }
6033 else
6034 add_prefix (prefix);
6035 }
6036 break;
6037 case 1:
6038 break;
6039 default:
6040 abort ();
6041 }
6042
6043 /* The prefix bytes. */
6044 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6045 if (*q)
6046 FRAG_APPEND_1_CHAR (*q);
6047 }
6048
6049 if (i.tm.opcode_modifier.vex)
6050 {
6051 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6052 if (*q)
6053 switch (j)
6054 {
6055 case REX_PREFIX:
6056 /* REX byte is encoded in VEX prefix. */
6057 break;
6058 case SEG_PREFIX:
6059 case ADDR_PREFIX:
6060 FRAG_APPEND_1_CHAR (*q);
6061 break;
6062 default:
6063 /* There should be no other prefixes for instructions
6064 with VEX prefix. */
6065 abort ();
6066 }
6067
6068 /* Now the VEX prefix. */
6069 p = frag_more (i.vex.length);
6070 for (j = 0; j < i.vex.length; j++)
6071 p[j] = i.vex.bytes[j];
6072 }
6073
6074 /* Now the opcode; be careful about word order here! */
6075 if (i.tm.opcode_length == 1)
6076 {
6077 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6078 }
6079 else
6080 {
6081 switch (i.tm.opcode_length)
6082 {
6083 case 3:
6084 p = frag_more (3);
6085 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6086 break;
6087 case 2:
6088 p = frag_more (2);
6089 break;
6090 default:
6091 abort ();
6092 break;
6093 }
6094
6095 /* Put out high byte first: can't use md_number_to_chars! */
6096 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6097 *p = i.tm.base_opcode & 0xff;
6098 }
6099
6100 /* Now the modrm byte and sib byte (if present). */
6101 if (i.tm.opcode_modifier.modrm)
6102 {
6103 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6104 | i.rm.reg << 3
6105 | i.rm.mode << 6));
6106 /* If i.rm.regmem == ESP (4)
6107 && i.rm.mode != (Register mode)
6108 && not 16 bit
6109 ==> need second modrm byte. */
6110 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6111 && i.rm.mode != 3
6112 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6113 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6114 | i.sib.index << 3
6115 | i.sib.scale << 6));
6116 }
6117
6118 if (i.disp_operands)
6119 output_disp (insn_start_frag, insn_start_off);
6120
6121 if (i.imm_operands)
6122 output_imm (insn_start_frag, insn_start_off);
6123 }
6124
6125#ifdef DEBUG386
6126 if (flag_debug)
6127 {
6128 pi ("" /*line*/, &i);
6129 }
6130#endif /* DEBUG386 */
6131}
6132
6133/* Return the size of the displacement operand N. */
6134
6135static int
6136disp_size (unsigned int n)
6137{
6138 int size = 4;
6139 if (i.types[n].bitfield.disp64)
6140 size = 8;
6141 else if (i.types[n].bitfield.disp8)
6142 size = 1;
6143 else if (i.types[n].bitfield.disp16)
6144 size = 2;
6145 return size;
6146}
6147
6148/* Return the size of the immediate operand N. */
6149
6150static int
6151imm_size (unsigned int n)
6152{
6153 int size = 4;
6154 if (i.types[n].bitfield.imm64)
6155 size = 8;
6156 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6157 size = 1;
6158 else if (i.types[n].bitfield.imm16)
6159 size = 2;
6160 return size;
6161}
6162
6163static void
6164output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6165{
6166 char *p;
6167 unsigned int n;
6168
6169 for (n = 0; n < i.operands; n++)
6170 {
6171 if (operand_type_check (i.types[n], disp))
6172 {
6173 if (i.op[n].disps->X_op == O_constant)
6174 {
6175 int size = disp_size (n);
6176 offsetT val;
6177
6178 val = offset_in_range (i.op[n].disps->X_add_number,
6179 size);
6180 p = frag_more (size);
6181 md_number_to_chars (p, val, size);
6182 }
6183 else
6184 {
6185 enum bfd_reloc_code_real reloc_type;
6186 int size = disp_size (n);
6187 int sign = i.types[n].bitfield.disp32s;
6188 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6189
6190 /* We can't have 8 bit displacement here. */
6191 gas_assert (!i.types[n].bitfield.disp8);
6192
6193 /* The PC relative address is computed relative
6194 to the instruction boundary, so in case immediate
6195 fields follows, we need to adjust the value. */
6196 if (pcrel && i.imm_operands)
6197 {
6198 unsigned int n1;
6199 int sz = 0;
6200
6201 for (n1 = 0; n1 < i.operands; n1++)
6202 if (operand_type_check (i.types[n1], imm))
6203 {
6204 /* Only one immediate is allowed for PC
6205 relative address. */
6206 gas_assert (sz == 0);
6207 sz = imm_size (n1);
6208 i.op[n].disps->X_add_number -= sz;
6209 }
6210 /* We should find the immediate. */
6211 gas_assert (sz != 0);
6212 }
6213
6214 p = frag_more (size);
6215 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6216 if (GOT_symbol
6217 && GOT_symbol == i.op[n].disps->X_add_symbol
6218 && (((reloc_type == BFD_RELOC_32
6219 || reloc_type == BFD_RELOC_X86_64_32S
6220 || (reloc_type == BFD_RELOC_64
6221 && object_64bit))
6222 && (i.op[n].disps->X_op == O_symbol
6223 || (i.op[n].disps->X_op == O_add
6224 && ((symbol_get_value_expression
6225 (i.op[n].disps->X_op_symbol)->X_op)
6226 == O_subtract))))
6227 || reloc_type == BFD_RELOC_32_PCREL))
6228 {
6229 offsetT add;
6230
6231 if (insn_start_frag == frag_now)
6232 add = (p - frag_now->fr_literal) - insn_start_off;
6233 else
6234 {
6235 fragS *fr;
6236
6237 add = insn_start_frag->fr_fix - insn_start_off;
6238 for (fr = insn_start_frag->fr_next;
6239 fr && fr != frag_now; fr = fr->fr_next)
6240 add += fr->fr_fix;
6241 add += p - frag_now->fr_literal;
6242 }
6243
6244 if (!object_64bit)
6245 {
6246 reloc_type = BFD_RELOC_386_GOTPC;
6247 i.op[n].imms->X_add_number += add;
6248 }
6249 else if (reloc_type == BFD_RELOC_64)
6250 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6251 else
6252 /* Don't do the adjustment for x86-64, as there
6253 the pcrel addressing is relative to the _next_
6254 insn, and that is taken care of in other code. */
6255 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6256 }
6257 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6258 i.op[n].disps, pcrel, reloc_type);
6259 }
6260 }
6261 }
6262}
6263
6264static void
6265output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6266{
6267 char *p;
6268 unsigned int n;
6269
6270 for (n = 0; n < i.operands; n++)
6271 {
6272 if (operand_type_check (i.types[n], imm))
6273 {
6274 if (i.op[n].imms->X_op == O_constant)
6275 {
6276 int size = imm_size (n);
6277 offsetT val;
6278
6279 val = offset_in_range (i.op[n].imms->X_add_number,
6280 size);
6281 p = frag_more (size);
6282 md_number_to_chars (p, val, size);
6283 }
6284 else
6285 {
6286 /* Not absolute_section.
6287 Need a 32-bit fixup (don't support 8bit
6288 non-absolute imms). Try to support other
6289 sizes ... */
6290 enum bfd_reloc_code_real reloc_type;
6291 int size = imm_size (n);
6292 int sign;
6293
6294 if (i.types[n].bitfield.imm32s
6295 && (i.suffix == QWORD_MNEM_SUFFIX
6296 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6297 sign = 1;
6298 else
6299 sign = 0;
6300
6301 p = frag_more (size);
6302 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6303
6304 /* This is tough to explain. We end up with this one if we
6305 * have operands that look like
6306 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6307 * obtain the absolute address of the GOT, and it is strongly
6308 * preferable from a performance point of view to avoid using
6309 * a runtime relocation for this. The actual sequence of
6310 * instructions often look something like:
6311 *
6312 * call .L66
6313 * .L66:
6314 * popl %ebx
6315 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6316 *
6317 * The call and pop essentially return the absolute address
6318 * of the label .L66 and store it in %ebx. The linker itself
6319 * will ultimately change the first operand of the addl so
6320 * that %ebx points to the GOT, but to keep things simple, the
6321 * .o file must have this operand set so that it generates not
6322 * the absolute address of .L66, but the absolute address of
6323 * itself. This allows the linker itself simply treat a GOTPC
6324 * relocation as asking for a pcrel offset to the GOT to be
6325 * added in, and the addend of the relocation is stored in the
6326 * operand field for the instruction itself.
6327 *
6328 * Our job here is to fix the operand so that it would add
6329 * the correct offset so that %ebx would point to itself. The
6330 * thing that is tricky is that .-.L66 will point to the
6331 * beginning of the instruction, so we need to further modify
6332 * the operand so that it will point to itself. There are
6333 * other cases where you have something like:
6334 *
6335 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6336 *
6337 * and here no correction would be required. Internally in
6338 * the assembler we treat operands of this form as not being
6339 * pcrel since the '.' is explicitly mentioned, and I wonder
6340 * whether it would simplify matters to do it this way. Who
6341 * knows. In earlier versions of the PIC patches, the
6342 * pcrel_adjust field was used to store the correction, but
6343 * since the expression is not pcrel, I felt it would be
6344 * confusing to do it this way. */
6345
6346 if ((reloc_type == BFD_RELOC_32
6347 || reloc_type == BFD_RELOC_X86_64_32S
6348 || reloc_type == BFD_RELOC_64)
6349 && GOT_symbol
6350 && GOT_symbol == i.op[n].imms->X_add_symbol
6351 && (i.op[n].imms->X_op == O_symbol
6352 || (i.op[n].imms->X_op == O_add
6353 && ((symbol_get_value_expression
6354 (i.op[n].imms->X_op_symbol)->X_op)
6355 == O_subtract))))
6356 {
6357 offsetT add;
6358
6359 if (insn_start_frag == frag_now)
6360 add = (p - frag_now->fr_literal) - insn_start_off;
6361 else
6362 {
6363 fragS *fr;
6364
6365 add = insn_start_frag->fr_fix - insn_start_off;
6366 for (fr = insn_start_frag->fr_next;
6367 fr && fr != frag_now; fr = fr->fr_next)
6368 add += fr->fr_fix;
6369 add += p - frag_now->fr_literal;
6370 }
6371
6372 if (!object_64bit)
6373 reloc_type = BFD_RELOC_386_GOTPC;
6374 else if (size == 4)
6375 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6376 else if (size == 8)
6377 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6378 i.op[n].imms->X_add_number += add;
6379 }
6380 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6381 i.op[n].imms, 0, reloc_type);
6382 }
6383 }
6384 }
6385}
6386\f
6387/* x86_cons_fix_new is called via the expression parsing code when a
6388 reloc is needed. We use this hook to get the correct .got reloc. */
6389static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6390static int cons_sign = -1;
6391
6392void
6393x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6394 expressionS *exp)
6395{
6396 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6397
6398 got_reloc = NO_RELOC;
6399
6400#ifdef TE_PE
6401 if (exp->X_op == O_secrel)
6402 {
6403 exp->X_op = O_symbol;
6404 r = BFD_RELOC_32_SECREL;
6405 }
6406#endif
6407
6408 fix_new_exp (frag, off, len, exp, 0, r);
6409}
6410
6411#if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6412# define lex_got(reloc, adjust, types) NULL
6413#else
6414/* Parse operands of the form
6415 <symbol>@GOTOFF+<nnn>
6416 and similar .plt or .got references.
6417
6418 If we find one, set up the correct relocation in RELOC and copy the
6419 input string, minus the `@GOTOFF' into a malloc'd buffer for
6420 parsing by the calling routine. Return this buffer, and if ADJUST
6421 is non-null set it to the length of the string we removed from the
6422 input line. Otherwise return NULL. */
6423static char *
6424lex_got (enum bfd_reloc_code_real *rel,
6425 int *adjust,
6426 i386_operand_type *types)
6427{
6428 /* Some of the relocations depend on the size of what field is to
6429 be relocated. But in our callers i386_immediate and i386_displacement
6430 we don't yet know the operand size (this will be set by insn
6431 matching). Hence we record the word32 relocation here,
6432 and adjust the reloc according to the real size in reloc(). */
6433 static const struct {
6434 const char *str;
6435 int len;
6436 const enum bfd_reloc_code_real rel[2];
6437 const i386_operand_type types64;
6438 } gotrel[] = {
6439 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6440 BFD_RELOC_X86_64_PLTOFF64 },
6441 OPERAND_TYPE_IMM64 },
6442 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6443 BFD_RELOC_X86_64_PLT32 },
6444 OPERAND_TYPE_IMM32_32S_DISP32 },
6445 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6446 BFD_RELOC_X86_64_GOTPLT64 },
6447 OPERAND_TYPE_IMM64_DISP64 },
6448 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6449 BFD_RELOC_X86_64_GOTOFF64 },
6450 OPERAND_TYPE_IMM64_DISP64 },
6451 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6452 BFD_RELOC_X86_64_GOTPCREL },
6453 OPERAND_TYPE_IMM32_32S_DISP32 },
6454 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6455 BFD_RELOC_X86_64_TLSGD },
6456 OPERAND_TYPE_IMM32_32S_DISP32 },
6457 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6458 _dummy_first_bfd_reloc_code_real },
6459 OPERAND_TYPE_NONE },
6460 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6461 BFD_RELOC_X86_64_TLSLD },
6462 OPERAND_TYPE_IMM32_32S_DISP32 },
6463 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6464 BFD_RELOC_X86_64_GOTTPOFF },
6465 OPERAND_TYPE_IMM32_32S_DISP32 },
6466 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6467 BFD_RELOC_X86_64_TPOFF32 },
6468 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6469 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6470 _dummy_first_bfd_reloc_code_real },
6471 OPERAND_TYPE_NONE },
6472 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6473 BFD_RELOC_X86_64_DTPOFF32 },
6474 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6475 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6476 _dummy_first_bfd_reloc_code_real },
6477 OPERAND_TYPE_NONE },
6478 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6479 _dummy_first_bfd_reloc_code_real },
6480 OPERAND_TYPE_NONE },
6481 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6482 BFD_RELOC_X86_64_GOT32 },
6483 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6484 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6485 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6486 OPERAND_TYPE_IMM32_32S_DISP32 },
6487 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6488 BFD_RELOC_X86_64_TLSDESC_CALL },
6489 OPERAND_TYPE_IMM32_32S_DISP32 },
6490 };
6491 char *cp;
6492 unsigned int j;
6493
6494 if (!IS_ELF)
6495 return NULL;
6496
6497 for (cp = input_line_pointer; *cp != '@'; cp++)
6498 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6499 return NULL;
6500
6501 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6502 {
6503 int len = gotrel[j].len;
6504 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6505 {
6506 if (gotrel[j].rel[object_64bit] != 0)
6507 {
6508 int first, second;
6509 char *tmpbuf, *past_reloc;
6510
6511 *rel = gotrel[j].rel[object_64bit];
6512 if (adjust)
6513 *adjust = len;
6514
6515 if (types)
6516 {
6517 if (flag_code != CODE_64BIT)
6518 {
6519 types->bitfield.imm32 = 1;
6520 types->bitfield.disp32 = 1;
6521 }
6522 else
6523 *types = gotrel[j].types64;
6524 }
6525
6526 if (GOT_symbol == NULL)
6527 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6528
6529 /* The length of the first part of our input line. */
6530 first = cp - input_line_pointer;
6531
6532 /* The second part goes from after the reloc token until
6533 (and including) an end_of_line char or comma. */
6534 past_reloc = cp + 1 + len;
6535 cp = past_reloc;
6536 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6537 ++cp;
6538 second = cp + 1 - past_reloc;
6539
6540 /* Allocate and copy string. The trailing NUL shouldn't
6541 be necessary, but be safe. */
6542 tmpbuf = (char *) xmalloc (first + second + 2);
6543 memcpy (tmpbuf, input_line_pointer, first);
6544 if (second != 0 && *past_reloc != ' ')
6545 /* Replace the relocation token with ' ', so that
6546 errors like foo@GOTOFF1 will be detected. */
6547 tmpbuf[first++] = ' ';
6548 memcpy (tmpbuf + first, past_reloc, second);
6549 tmpbuf[first + second] = '\0';
6550 return tmpbuf;
6551 }
6552
6553 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6554 gotrel[j].str, 1 << (5 + object_64bit));
6555 return NULL;
6556 }
6557 }
6558
6559 /* Might be a symbol version string. Don't as_bad here. */
6560 return NULL;
6561}
6562
6563void
6564x86_cons (expressionS *exp, int size)
6565{
6566 intel_syntax = -intel_syntax;
6567
6568 exp->X_md = 0;
6569 if (size == 4 || (object_64bit && size == 8))
6570 {
6571 /* Handle @GOTOFF and the like in an expression. */
6572 char *save;
6573 char *gotfree_input_line;
6574 int adjust;
6575
6576 save = input_line_pointer;
6577 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6578 if (gotfree_input_line)
6579 input_line_pointer = gotfree_input_line;
6580
6581 expression (exp);
6582
6583 if (gotfree_input_line)
6584 {
6585 /* expression () has merrily parsed up to the end of line,
6586 or a comma - in the wrong buffer. Transfer how far
6587 input_line_pointer has moved to the right buffer. */
6588 input_line_pointer = (save
6589 + (input_line_pointer - gotfree_input_line)
6590 + adjust);
6591 free (gotfree_input_line);
6592 if (exp->X_op == O_constant
6593 || exp->X_op == O_absent
6594 || exp->X_op == O_illegal
6595 || exp->X_op == O_register
6596 || exp->X_op == O_big)
6597 {
6598 char c = *input_line_pointer;
6599 *input_line_pointer = 0;
6600 as_bad (_("missing or invalid expression `%s'"), save);
6601 *input_line_pointer = c;
6602 }
6603 }
6604 }
6605 else
6606 expression (exp);
6607
6608 intel_syntax = -intel_syntax;
6609
6610 if (intel_syntax)
6611 i386_intel_simplify (exp);
6612}
6613#endif
6614
6615static void
6616signed_cons (int size)
6617{
6618 if (flag_code == CODE_64BIT)
6619 cons_sign = 1;
6620 cons (size);
6621 cons_sign = -1;
6622}
6623
6624#ifdef TE_PE
6625static void
6626pe_directive_secrel (dummy)
6627 int dummy ATTRIBUTE_UNUSED;
6628{
6629 expressionS exp;
6630
6631 do
6632 {
6633 expression (&exp);
6634 if (exp.X_op == O_symbol)
6635 exp.X_op = O_secrel;
6636
6637 emit_expr (&exp, 4);
6638 }
6639 while (*input_line_pointer++ == ',');
6640
6641 input_line_pointer--;
6642 demand_empty_rest_of_line ();
6643}
6644#endif
6645
6646static int
6647i386_immediate (char *imm_start)
6648{
6649 char *save_input_line_pointer;
6650 char *gotfree_input_line;
6651 segT exp_seg = 0;
6652 expressionS *exp;
6653 i386_operand_type types;
6654
6655 operand_type_set (&types, ~0);
6656
6657 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6658 {
6659 as_bad (_("at most %d immediate operands are allowed"),
6660 MAX_IMMEDIATE_OPERANDS);
6661 return 0;
6662 }
6663
6664 exp = &im_expressions[i.imm_operands++];
6665 i.op[this_operand].imms = exp;
6666
6667 if (is_space_char (*imm_start))
6668 ++imm_start;
6669
6670 save_input_line_pointer = input_line_pointer;
6671 input_line_pointer = imm_start;
6672
6673 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6674 if (gotfree_input_line)
6675 input_line_pointer = gotfree_input_line;
6676
6677 exp_seg = expression (exp);
6678
6679 SKIP_WHITESPACE ();
6680 if (*input_line_pointer)
6681 as_bad (_("junk `%s' after expression"), input_line_pointer);
6682
6683 input_line_pointer = save_input_line_pointer;
6684 if (gotfree_input_line)
6685 {
6686 free (gotfree_input_line);
6687
6688 if (exp->X_op == O_constant || exp->X_op == O_register)
6689 exp->X_op = O_illegal;
6690 }
6691
6692 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6693}
6694
6695static int
6696i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6697 i386_operand_type types, const char *imm_start)
6698{
6699 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6700 {
6701 if (imm_start)
6702 as_bad (_("missing or invalid immediate expression `%s'"),
6703 imm_start);
6704 return 0;
6705 }
6706 else if (exp->X_op == O_constant)
6707 {
6708 /* Size it properly later. */
6709 i.types[this_operand].bitfield.imm64 = 1;
6710 /* If not 64bit, sign extend val. */
6711 if (flag_code != CODE_64BIT
6712 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6713 exp->X_add_number
6714 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6715 }
6716#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6717 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6718 && exp_seg != absolute_section
6719 && exp_seg != text_section
6720 && exp_seg != data_section
6721 && exp_seg != bss_section
6722 && exp_seg != undefined_section
6723 && !bfd_is_com_section (exp_seg))
6724 {
6725 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6726 return 0;
6727 }
6728#endif
6729 else if (!intel_syntax && exp->X_op == O_register)
6730 {
6731 if (imm_start)
6732 as_bad (_("illegal immediate register operand %s"), imm_start);
6733 return 0;
6734 }
6735 else
6736 {
6737 /* This is an address. The size of the address will be
6738 determined later, depending on destination register,
6739 suffix, or the default for the section. */
6740 i.types[this_operand].bitfield.imm8 = 1;
6741 i.types[this_operand].bitfield.imm16 = 1;
6742 i.types[this_operand].bitfield.imm32 = 1;
6743 i.types[this_operand].bitfield.imm32s = 1;
6744 i.types[this_operand].bitfield.imm64 = 1;
6745 i.types[this_operand] = operand_type_and (i.types[this_operand],
6746 types);
6747 }
6748
6749 return 1;
6750}
6751
6752static char *
6753i386_scale (char *scale)
6754{
6755 offsetT val;
6756 char *save = input_line_pointer;
6757
6758 input_line_pointer = scale;
6759 val = get_absolute_expression ();
6760
6761 switch (val)
6762 {
6763 case 1:
6764 i.log2_scale_factor = 0;
6765 break;
6766 case 2:
6767 i.log2_scale_factor = 1;
6768 break;
6769 case 4:
6770 i.log2_scale_factor = 2;
6771 break;
6772 case 8:
6773 i.log2_scale_factor = 3;
6774 break;
6775 default:
6776 {
6777 char sep = *input_line_pointer;
6778
6779 *input_line_pointer = '\0';
6780 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6781 scale);
6782 *input_line_pointer = sep;
6783 input_line_pointer = save;
6784 return NULL;
6785 }
6786 }
6787 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6788 {
6789 as_warn (_("scale factor of %d without an index register"),
6790 1 << i.log2_scale_factor);
6791 i.log2_scale_factor = 0;
6792 }
6793 scale = input_line_pointer;
6794 input_line_pointer = save;
6795 return scale;
6796}
6797
6798static int
6799i386_displacement (char *disp_start, char *disp_end)
6800{
6801 expressionS *exp;
6802 segT exp_seg = 0;
6803 char *save_input_line_pointer;
6804 char *gotfree_input_line;
6805 int override;
6806 i386_operand_type bigdisp, types = anydisp;
6807 int ret;
6808
6809 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6810 {
6811 as_bad (_("at most %d displacement operands are allowed"),
6812 MAX_MEMORY_OPERANDS);
6813 return 0;
6814 }
6815
6816 operand_type_set (&bigdisp, 0);
6817 if ((i.types[this_operand].bitfield.jumpabsolute)
6818 || (!current_templates->start->opcode_modifier.jump
6819 && !current_templates->start->opcode_modifier.jumpdword))
6820 {
6821 bigdisp.bitfield.disp32 = 1;
6822 override = (i.prefix[ADDR_PREFIX] != 0);
6823 if (flag_code == CODE_64BIT)
6824 {
6825 if (!override)
6826 {
6827 bigdisp.bitfield.disp32s = 1;
6828 bigdisp.bitfield.disp64 = 1;
6829 }
6830 }
6831 else if ((flag_code == CODE_16BIT) ^ override)
6832 {
6833 bigdisp.bitfield.disp32 = 0;
6834 bigdisp.bitfield.disp16 = 1;
6835 }
6836 }
6837 else
6838 {
6839 /* For PC-relative branches, the width of the displacement
6840 is dependent upon data size, not address size. */
6841 override = (i.prefix[DATA_PREFIX] != 0);
6842 if (flag_code == CODE_64BIT)
6843 {
6844 if (override || i.suffix == WORD_MNEM_SUFFIX)
6845 bigdisp.bitfield.disp16 = 1;
6846 else
6847 {
6848 bigdisp.bitfield.disp32 = 1;
6849 bigdisp.bitfield.disp32s = 1;
6850 }
6851 }
6852 else
6853 {
6854 if (!override)
6855 override = (i.suffix == (flag_code != CODE_16BIT
6856 ? WORD_MNEM_SUFFIX
6857 : LONG_MNEM_SUFFIX));
6858 bigdisp.bitfield.disp32 = 1;
6859 if ((flag_code == CODE_16BIT) ^ override)
6860 {
6861 bigdisp.bitfield.disp32 = 0;
6862 bigdisp.bitfield.disp16 = 1;
6863 }
6864 }
6865 }
6866 i.types[this_operand] = operand_type_or (i.types[this_operand],
6867 bigdisp);
6868
6869 exp = &disp_expressions[i.disp_operands];
6870 i.op[this_operand].disps = exp;
6871 i.disp_operands++;
6872 save_input_line_pointer = input_line_pointer;
6873 input_line_pointer = disp_start;
6874 END_STRING_AND_SAVE (disp_end);
6875
6876#ifndef GCC_ASM_O_HACK
6877#define GCC_ASM_O_HACK 0
6878#endif
6879#if GCC_ASM_O_HACK
6880 END_STRING_AND_SAVE (disp_end + 1);
6881 if (i.types[this_operand].bitfield.baseIndex
6882 && displacement_string_end[-1] == '+')
6883 {
6884 /* This hack is to avoid a warning when using the "o"
6885 constraint within gcc asm statements.
6886 For instance:
6887
6888 #define _set_tssldt_desc(n,addr,limit,type) \
6889 __asm__ __volatile__ ( \
6890 "movw %w2,%0\n\t" \
6891 "movw %w1,2+%0\n\t" \
6892 "rorl $16,%1\n\t" \
6893 "movb %b1,4+%0\n\t" \
6894 "movb %4,5+%0\n\t" \
6895 "movb $0,6+%0\n\t" \
6896 "movb %h1,7+%0\n\t" \
6897 "rorl $16,%1" \
6898 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6899
6900 This works great except that the output assembler ends
6901 up looking a bit weird if it turns out that there is
6902 no offset. You end up producing code that looks like:
6903
6904 #APP
6905 movw $235,(%eax)
6906 movw %dx,2+(%eax)
6907 rorl $16,%edx
6908 movb %dl,4+(%eax)
6909 movb $137,5+(%eax)
6910 movb $0,6+(%eax)
6911 movb %dh,7+(%eax)
6912 rorl $16,%edx
6913 #NO_APP
6914
6915 So here we provide the missing zero. */
6916
6917 *displacement_string_end = '0';
6918 }
6919#endif
6920 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6921 if (gotfree_input_line)
6922 input_line_pointer = gotfree_input_line;
6923
6924 exp_seg = expression (exp);
6925
6926 SKIP_WHITESPACE ();
6927 if (*input_line_pointer)
6928 as_bad (_("junk `%s' after expression"), input_line_pointer);
6929#if GCC_ASM_O_HACK
6930 RESTORE_END_STRING (disp_end + 1);
6931#endif
6932 input_line_pointer = save_input_line_pointer;
6933 if (gotfree_input_line)
6934 {
6935 free (gotfree_input_line);
6936
6937 if (exp->X_op == O_constant || exp->X_op == O_register)
6938 exp->X_op = O_illegal;
6939 }
6940
6941 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6942
6943 RESTORE_END_STRING (disp_end);
6944
6945 return ret;
6946}
6947
6948static int
6949i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6950 i386_operand_type types, const char *disp_start)
6951{
6952 i386_operand_type bigdisp;
6953 int ret = 1;
6954
6955 /* We do this to make sure that the section symbol is in
6956 the symbol table. We will ultimately change the relocation
6957 to be relative to the beginning of the section. */
6958 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6959 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6960 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6961 {
6962 if (exp->X_op != O_symbol)
6963 goto inv_disp;
6964
6965 if (S_IS_LOCAL (exp->X_add_symbol)
6966 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
6967 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
6968 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6969 exp->X_op = O_subtract;
6970 exp->X_op_symbol = GOT_symbol;
6971 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6972 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6973 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6974 i.reloc[this_operand] = BFD_RELOC_64;
6975 else
6976 i.reloc[this_operand] = BFD_RELOC_32;
6977 }
6978
6979 else if (exp->X_op == O_absent
6980 || exp->X_op == O_illegal
6981 || exp->X_op == O_big)
6982 {
6983 inv_disp:
6984 as_bad (_("missing or invalid displacement expression `%s'"),
6985 disp_start);
6986 ret = 0;
6987 }
6988
6989 else if (flag_code == CODE_64BIT
6990 && !i.prefix[ADDR_PREFIX]
6991 && exp->X_op == O_constant)
6992 {
6993 /* Since displacement is signed extended to 64bit, don't allow
6994 disp32 and turn off disp32s if they are out of range. */
6995 i.types[this_operand].bitfield.disp32 = 0;
6996 if (!fits_in_signed_long (exp->X_add_number))
6997 {
6998 i.types[this_operand].bitfield.disp32s = 0;
6999 if (i.types[this_operand].bitfield.baseindex)
7000 {
7001 as_bad (_("0x%lx out range of signed 32bit displacement"),
7002 (long) exp->X_add_number);
7003 ret = 0;
7004 }
7005 }
7006 }
7007
7008#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7009 else if (exp->X_op != O_constant
7010 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7011 && exp_seg != absolute_section
7012 && exp_seg != text_section
7013 && exp_seg != data_section
7014 && exp_seg != bss_section
7015 && exp_seg != undefined_section
7016 && !bfd_is_com_section (exp_seg))
7017 {
7018 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7019 ret = 0;
7020 }
7021#endif
7022
7023 /* Check if this is a displacement only operand. */
7024 bigdisp = i.types[this_operand];
7025 bigdisp.bitfield.disp8 = 0;
7026 bigdisp.bitfield.disp16 = 0;
7027 bigdisp.bitfield.disp32 = 0;
7028 bigdisp.bitfield.disp32s = 0;
7029 bigdisp.bitfield.disp64 = 0;
7030 if (operand_type_all_zero (&bigdisp))
7031 i.types[this_operand] = operand_type_and (i.types[this_operand],
7032 types);
7033
7034 return ret;
7035}
7036
7037/* Make sure the memory operand we've been dealt is valid.
7038 Return 1 on success, 0 on a failure. */
7039
7040static int
7041i386_index_check (const char *operand_string)
7042{
7043 int ok;
7044 const char *kind = "base/index";
7045#if INFER_ADDR_PREFIX
7046 int fudged = 0;
7047
7048 tryprefix:
7049#endif
7050 ok = 1;
7051 if (current_templates->start->opcode_modifier.isstring
7052 && !current_templates->start->opcode_modifier.immext
7053 && (current_templates->end[-1].opcode_modifier.isstring
7054 || i.mem_operands))
7055 {
7056 /* Memory operands of string insns are special in that they only allow
7057 a single register (rDI, rSI, or rBX) as their memory address. */
7058 unsigned int expected;
7059
7060 kind = "string address";
7061
7062 if (current_templates->start->opcode_modifier.w)
7063 {
7064 i386_operand_type type = current_templates->end[-1].operand_types[0];
7065
7066 if (!type.bitfield.baseindex
7067 || ((!i.mem_operands != !intel_syntax)
7068 && current_templates->end[-1].operand_types[1]
7069 .bitfield.baseindex))
7070 type = current_templates->end[-1].operand_types[1];
7071 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7072 }
7073 else
7074 expected = 3 /* rBX */;
7075
7076 if (!i.base_reg || i.index_reg
7077 || operand_type_check (i.types[this_operand], disp))
7078 ok = -1;
7079 else if (!(flag_code == CODE_64BIT
7080 ? i.prefix[ADDR_PREFIX]
7081 ? i.base_reg->reg_type.bitfield.reg32
7082 : i.base_reg->reg_type.bitfield.reg64
7083 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7084 ? i.base_reg->reg_type.bitfield.reg32
7085 : i.base_reg->reg_type.bitfield.reg16))
7086 ok = 0;
7087 else if (i.base_reg->reg_num != expected)
7088 ok = -1;
7089
7090 if (ok < 0)
7091 {
7092 unsigned int j;
7093
7094 for (j = 0; j < i386_regtab_size; ++j)
7095 if ((flag_code == CODE_64BIT
7096 ? i.prefix[ADDR_PREFIX]
7097 ? i386_regtab[j].reg_type.bitfield.reg32
7098 : i386_regtab[j].reg_type.bitfield.reg64
7099 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7100 ? i386_regtab[j].reg_type.bitfield.reg32
7101 : i386_regtab[j].reg_type.bitfield.reg16)
7102 && i386_regtab[j].reg_num == expected)
7103 break;
7104 gas_assert (j < i386_regtab_size);
7105 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7106 operand_string,
7107 intel_syntax ? '[' : '(',
7108 register_prefix,
7109 i386_regtab[j].reg_name,
7110 intel_syntax ? ']' : ')');
7111 ok = 1;
7112 }
7113 }
7114 else if (flag_code == CODE_64BIT)
7115 {
7116 if ((i.base_reg
7117 && ((i.prefix[ADDR_PREFIX] == 0
7118 && !i.base_reg->reg_type.bitfield.reg64)
7119 || (i.prefix[ADDR_PREFIX]
7120 && !i.base_reg->reg_type.bitfield.reg32))
7121 && (i.index_reg
7122 || i.base_reg->reg_num !=
7123 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7124 || (i.index_reg
7125 && (!i.index_reg->reg_type.bitfield.baseindex
7126 || (i.prefix[ADDR_PREFIX] == 0
7127 && i.index_reg->reg_num != RegRiz
7128 && !i.index_reg->reg_type.bitfield.reg64
7129 )
7130 || (i.prefix[ADDR_PREFIX]
7131 && i.index_reg->reg_num != RegEiz
7132 && !i.index_reg->reg_type.bitfield.reg32))))
7133 ok = 0;
7134 }
7135 else
7136 {
7137 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7138 {
7139 /* 16bit checks. */
7140 if ((i.base_reg
7141 && (!i.base_reg->reg_type.bitfield.reg16
7142 || !i.base_reg->reg_type.bitfield.baseindex))
7143 || (i.index_reg
7144 && (!i.index_reg->reg_type.bitfield.reg16
7145 || !i.index_reg->reg_type.bitfield.baseindex
7146 || !(i.base_reg
7147 && i.base_reg->reg_num < 6
7148 && i.index_reg->reg_num >= 6
7149 && i.log2_scale_factor == 0))))
7150 ok = 0;
7151 }
7152 else
7153 {
7154 /* 32bit checks. */
7155 if ((i.base_reg
7156 && !i.base_reg->reg_type.bitfield.reg32)
7157 || (i.index_reg
7158 && ((!i.index_reg->reg_type.bitfield.reg32
7159 && i.index_reg->reg_num != RegEiz)
7160 || !i.index_reg->reg_type.bitfield.baseindex)))
7161 ok = 0;
7162 }
7163 }
7164 if (!ok)
7165 {
7166#if INFER_ADDR_PREFIX
7167 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7168 {
7169 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7170 i.prefixes += 1;
7171 /* Change the size of any displacement too. At most one of
7172 Disp16 or Disp32 is set.
7173 FIXME. There doesn't seem to be any real need for separate
7174 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7175 Removing them would probably clean up the code quite a lot. */
7176 if (flag_code != CODE_64BIT
7177 && (i.types[this_operand].bitfield.disp16
7178 || i.types[this_operand].bitfield.disp32))
7179 i.types[this_operand]
7180 = operand_type_xor (i.types[this_operand], disp16_32);
7181 fudged = 1;
7182 goto tryprefix;
7183 }
7184 if (fudged)
7185 as_bad (_("`%s' is not a valid %s expression"),
7186 operand_string,
7187 kind);
7188 else
7189#endif
7190 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7191 operand_string,
7192 flag_code_names[i.prefix[ADDR_PREFIX]
7193 ? flag_code == CODE_32BIT
7194 ? CODE_16BIT
7195 : CODE_32BIT
7196 : flag_code],
7197 kind);
7198 }
7199 return ok;
7200}
7201
7202/* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7203 on error. */
7204
7205static int
7206i386_att_operand (char *operand_string)
7207{
7208 const reg_entry *r;
7209 char *end_op;
7210 char *op_string = operand_string;
7211
7212 if (is_space_char (*op_string))
7213 ++op_string;
7214
7215 /* We check for an absolute prefix (differentiating,
7216 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7217 if (*op_string == ABSOLUTE_PREFIX)
7218 {
7219 ++op_string;
7220 if (is_space_char (*op_string))
7221 ++op_string;
7222 i.types[this_operand].bitfield.jumpabsolute = 1;
7223 }
7224
7225 /* Check if operand is a register. */
7226 if ((r = parse_register (op_string, &end_op)) != NULL)
7227 {
7228 i386_operand_type temp;
7229
7230 /* Check for a segment override by searching for ':' after a
7231 segment register. */
7232 op_string = end_op;
7233 if (is_space_char (*op_string))
7234 ++op_string;
7235 if (*op_string == ':'
7236 && (r->reg_type.bitfield.sreg2
7237 || r->reg_type.bitfield.sreg3))
7238 {
7239 switch (r->reg_num)
7240 {
7241 case 0:
7242 i.seg[i.mem_operands] = &es;
7243 break;
7244 case 1:
7245 i.seg[i.mem_operands] = &cs;
7246 break;
7247 case 2:
7248 i.seg[i.mem_operands] = &ss;
7249 break;
7250 case 3:
7251 i.seg[i.mem_operands] = &ds;
7252 break;
7253 case 4:
7254 i.seg[i.mem_operands] = &fs;
7255 break;
7256 case 5:
7257 i.seg[i.mem_operands] = &gs;
7258 break;
7259 }
7260
7261 /* Skip the ':' and whitespace. */
7262 ++op_string;
7263 if (is_space_char (*op_string))
7264 ++op_string;
7265
7266 if (!is_digit_char (*op_string)
7267 && !is_identifier_char (*op_string)
7268 && *op_string != '('
7269 && *op_string != ABSOLUTE_PREFIX)
7270 {
7271 as_bad (_("bad memory operand `%s'"), op_string);
7272 return 0;
7273 }
7274 /* Handle case of %es:*foo. */
7275 if (*op_string == ABSOLUTE_PREFIX)
7276 {
7277 ++op_string;
7278 if (is_space_char (*op_string))
7279 ++op_string;
7280 i.types[this_operand].bitfield.jumpabsolute = 1;
7281 }
7282 goto do_memory_reference;
7283 }
7284 if (*op_string)
7285 {
7286 as_bad (_("junk `%s' after register"), op_string);
7287 return 0;
7288 }
7289 temp = r->reg_type;
7290 temp.bitfield.baseindex = 0;
7291 i.types[this_operand] = operand_type_or (i.types[this_operand],
7292 temp);
7293 i.types[this_operand].bitfield.unspecified = 0;
7294 i.op[this_operand].regs = r;
7295 i.reg_operands++;
7296 }
7297 else if (*op_string == REGISTER_PREFIX)
7298 {
7299 as_bad (_("bad register name `%s'"), op_string);
7300 return 0;
7301 }
7302 else if (*op_string == IMMEDIATE_PREFIX)
7303 {
7304 ++op_string;
7305 if (i.types[this_operand].bitfield.jumpabsolute)
7306 {
7307 as_bad (_("immediate operand illegal with absolute jump"));
7308 return 0;
7309 }
7310 if (!i386_immediate (op_string))
7311 return 0;
7312 }
7313 else if (is_digit_char (*op_string)
7314 || is_identifier_char (*op_string)
7315 || *op_string == '(')
7316 {
7317 /* This is a memory reference of some sort. */
7318 char *base_string;
7319
7320 /* Start and end of displacement string expression (if found). */
7321 char *displacement_string_start;
7322 char *displacement_string_end;
7323
7324 do_memory_reference:
7325 if ((i.mem_operands == 1
7326 && !current_templates->start->opcode_modifier.isstring)
7327 || i.mem_operands == 2)
7328 {
7329 as_bad (_("too many memory references for `%s'"),
7330 current_templates->start->name);
7331 return 0;
7332 }
7333
7334 /* Check for base index form. We detect the base index form by
7335 looking for an ')' at the end of the operand, searching
7336 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7337 after the '('. */
7338 base_string = op_string + strlen (op_string);
7339
7340 --base_string;
7341 if (is_space_char (*base_string))
7342 --base_string;
7343
7344 /* If we only have a displacement, set-up for it to be parsed later. */
7345 displacement_string_start = op_string;
7346 displacement_string_end = base_string + 1;
7347
7348 if (*base_string == ')')
7349 {
7350 char *temp_string;
7351 unsigned int parens_balanced = 1;
7352 /* We've already checked that the number of left & right ()'s are
7353 equal, so this loop will not be infinite. */
7354 do
7355 {
7356 base_string--;
7357 if (*base_string == ')')
7358 parens_balanced++;
7359 if (*base_string == '(')
7360 parens_balanced--;
7361 }
7362 while (parens_balanced);
7363
7364 temp_string = base_string;
7365
7366 /* Skip past '(' and whitespace. */
7367 ++base_string;
7368 if (is_space_char (*base_string))
7369 ++base_string;
7370
7371 if (*base_string == ','
7372 || ((i.base_reg = parse_register (base_string, &end_op))
7373 != NULL))
7374 {
7375 displacement_string_end = temp_string;
7376
7377 i.types[this_operand].bitfield.baseindex = 1;
7378
7379 if (i.base_reg)
7380 {
7381 base_string = end_op;
7382 if (is_space_char (*base_string))
7383 ++base_string;
7384 }
7385
7386 /* There may be an index reg or scale factor here. */
7387 if (*base_string == ',')
7388 {
7389 ++base_string;
7390 if (is_space_char (*base_string))
7391 ++base_string;
7392
7393 if ((i.index_reg = parse_register (base_string, &end_op))
7394 != NULL)
7395 {
7396 base_string = end_op;
7397 if (is_space_char (*base_string))
7398 ++base_string;
7399 if (*base_string == ',')
7400 {
7401 ++base_string;
7402 if (is_space_char (*base_string))
7403 ++base_string;
7404 }
7405 else if (*base_string != ')')
7406 {
7407 as_bad (_("expecting `,' or `)' "
7408 "after index register in `%s'"),
7409 operand_string);
7410 return 0;
7411 }
7412 }
7413 else if (*base_string == REGISTER_PREFIX)
7414 {
7415 as_bad (_("bad register name `%s'"), base_string);
7416 return 0;
7417 }
7418
7419 /* Check for scale factor. */
7420 if (*base_string != ')')
7421 {
7422 char *end_scale = i386_scale (base_string);
7423
7424 if (!end_scale)
7425 return 0;
7426
7427 base_string = end_scale;
7428 if (is_space_char (*base_string))
7429 ++base_string;
7430 if (*base_string != ')')
7431 {
7432 as_bad (_("expecting `)' "
7433 "after scale factor in `%s'"),
7434 operand_string);
7435 return 0;
7436 }
7437 }
7438 else if (!i.index_reg)
7439 {
7440 as_bad (_("expecting index register or scale factor "
7441 "after `,'; got '%c'"),
7442 *base_string);
7443 return 0;
7444 }
7445 }
7446 else if (*base_string != ')')
7447 {
7448 as_bad (_("expecting `,' or `)' "
7449 "after base register in `%s'"),
7450 operand_string);
7451 return 0;
7452 }
7453 }
7454 else if (*base_string == REGISTER_PREFIX)
7455 {
7456 as_bad (_("bad register name `%s'"), base_string);
7457 return 0;
7458 }
7459 }
7460
7461 /* If there's an expression beginning the operand, parse it,
7462 assuming displacement_string_start and
7463 displacement_string_end are meaningful. */
7464 if (displacement_string_start != displacement_string_end)
7465 {
7466 if (!i386_displacement (displacement_string_start,
7467 displacement_string_end))
7468 return 0;
7469 }
7470
7471 /* Special case for (%dx) while doing input/output op. */
7472 if (i.base_reg
7473 && operand_type_equal (&i.base_reg->reg_type,
7474 &reg16_inoutportreg)
7475 && i.index_reg == 0
7476 && i.log2_scale_factor == 0
7477 && i.seg[i.mem_operands] == 0
7478 && !operand_type_check (i.types[this_operand], disp))
7479 {
7480 i.types[this_operand] = inoutportreg;
7481 return 1;
7482 }
7483
7484 if (i386_index_check (operand_string) == 0)
7485 return 0;
7486 i.types[this_operand].bitfield.mem = 1;
7487 i.mem_operands++;
7488 }
7489 else
7490 {
7491 /* It's not a memory operand; argh! */
7492 as_bad (_("invalid char %s beginning operand %d `%s'"),
7493 output_invalid (*op_string),
7494 this_operand + 1,
7495 op_string);
7496 return 0;
7497 }
7498 return 1; /* Normal return. */
7499}
7500\f
7501/* md_estimate_size_before_relax()
7502
7503 Called just before relax() for rs_machine_dependent frags. The x86
7504 assembler uses these frags to handle variable size jump
7505 instructions.
7506
7507 Any symbol that is now undefined will not become defined.
7508 Return the correct fr_subtype in the frag.
7509 Return the initial "guess for variable size of frag" to caller.
7510 The guess is actually the growth beyond the fixed part. Whatever
7511 we do to grow the fixed or variable part contributes to our
7512 returned value. */
7513
7514int
7515md_estimate_size_before_relax (fragP, segment)
7516 fragS *fragP;
7517 segT segment;
7518{
7519 /* We've already got fragP->fr_subtype right; all we have to do is
7520 check for un-relaxable symbols. On an ELF system, we can't relax
7521 an externally visible symbol, because it may be overridden by a
7522 shared library. */
7523 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7524#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7525 || (IS_ELF
7526 && (S_IS_EXTERNAL (fragP->fr_symbol)
7527 || S_IS_WEAK (fragP->fr_symbol)
7528 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7529 & BSF_GNU_INDIRECT_FUNCTION))))
7530#endif
7531#if defined (OBJ_COFF) && defined (TE_PE)
7532 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7533 && S_IS_WEAK (fragP->fr_symbol))
7534#endif
7535 )
7536 {
7537 /* Symbol is undefined in this segment, or we need to keep a
7538 reloc so that weak symbols can be overridden. */
7539 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7540 enum bfd_reloc_code_real reloc_type;
7541 unsigned char *opcode;
7542 int old_fr_fix;
7543
7544 if (fragP->fr_var != NO_RELOC)
7545 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7546 else if (size == 2)
7547 reloc_type = BFD_RELOC_16_PCREL;
7548 else
7549 reloc_type = BFD_RELOC_32_PCREL;
7550
7551 old_fr_fix = fragP->fr_fix;
7552 opcode = (unsigned char *) fragP->fr_opcode;
7553
7554 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7555 {
7556 case UNCOND_JUMP:
7557 /* Make jmp (0xeb) a (d)word displacement jump. */
7558 opcode[0] = 0xe9;
7559 fragP->fr_fix += size;
7560 fix_new (fragP, old_fr_fix, size,
7561 fragP->fr_symbol,
7562 fragP->fr_offset, 1,
7563 reloc_type);
7564 break;
7565
7566 case COND_JUMP86:
7567 if (size == 2
7568 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7569 {
7570 /* Negate the condition, and branch past an
7571 unconditional jump. */
7572 opcode[0] ^= 1;
7573 opcode[1] = 3;
7574 /* Insert an unconditional jump. */
7575 opcode[2] = 0xe9;
7576 /* We added two extra opcode bytes, and have a two byte
7577 offset. */
7578 fragP->fr_fix += 2 + 2;
7579 fix_new (fragP, old_fr_fix + 2, 2,
7580 fragP->fr_symbol,
7581 fragP->fr_offset, 1,
7582 reloc_type);
7583 break;
7584 }
7585 /* Fall through. */
7586
7587 case COND_JUMP:
7588 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7589 {
7590 fixS *fixP;
7591
7592 fragP->fr_fix += 1;
7593 fixP = fix_new (fragP, old_fr_fix, 1,
7594 fragP->fr_symbol,
7595 fragP->fr_offset, 1,
7596 BFD_RELOC_8_PCREL);
7597 fixP->fx_signed = 1;
7598 break;
7599 }
7600
7601 /* This changes the byte-displacement jump 0x7N
7602 to the (d)word-displacement jump 0x0f,0x8N. */
7603 opcode[1] = opcode[0] + 0x10;
7604 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7605 /* We've added an opcode byte. */
7606 fragP->fr_fix += 1 + size;
7607 fix_new (fragP, old_fr_fix + 1, size,
7608 fragP->fr_symbol,
7609 fragP->fr_offset, 1,
7610 reloc_type);
7611 break;
7612
7613 default:
7614 BAD_CASE (fragP->fr_subtype);
7615 break;
7616 }
7617 frag_wane (fragP);
7618 return fragP->fr_fix - old_fr_fix;
7619 }
7620
7621 /* Guess size depending on current relax state. Initially the relax
7622 state will correspond to a short jump and we return 1, because
7623 the variable part of the frag (the branch offset) is one byte
7624 long. However, we can relax a section more than once and in that
7625 case we must either set fr_subtype back to the unrelaxed state,
7626 or return the value for the appropriate branch. */
7627 return md_relax_table[fragP->fr_subtype].rlx_length;
7628}
7629
7630/* Called after relax() is finished.
7631
7632 In: Address of frag.
7633 fr_type == rs_machine_dependent.
7634 fr_subtype is what the address relaxed to.
7635
7636 Out: Any fixSs and constants are set up.
7637 Caller will turn frag into a ".space 0". */
7638
7639void
7640md_convert_frag (abfd, sec, fragP)
7641 bfd *abfd ATTRIBUTE_UNUSED;
7642 segT sec ATTRIBUTE_UNUSED;
7643 fragS *fragP;
7644{
7645 unsigned char *opcode;
7646 unsigned char *where_to_put_displacement = NULL;
7647 offsetT target_address;
7648 offsetT opcode_address;
7649 unsigned int extension = 0;
7650 offsetT displacement_from_opcode_start;
7651
7652 opcode = (unsigned char *) fragP->fr_opcode;
7653
7654 /* Address we want to reach in file space. */
7655 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7656
7657 /* Address opcode resides at in file space. */
7658 opcode_address = fragP->fr_address + fragP->fr_fix;
7659
7660 /* Displacement from opcode start to fill into instruction. */
7661 displacement_from_opcode_start = target_address - opcode_address;
7662
7663 if ((fragP->fr_subtype & BIG) == 0)
7664 {
7665 /* Don't have to change opcode. */
7666 extension = 1; /* 1 opcode + 1 displacement */
7667 where_to_put_displacement = &opcode[1];
7668 }
7669 else
7670 {
7671 if (no_cond_jump_promotion
7672 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7673 as_warn_where (fragP->fr_file, fragP->fr_line,
7674 _("long jump required"));
7675
7676 switch (fragP->fr_subtype)
7677 {
7678 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7679 extension = 4; /* 1 opcode + 4 displacement */
7680 opcode[0] = 0xe9;
7681 where_to_put_displacement = &opcode[1];
7682 break;
7683
7684 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7685 extension = 2; /* 1 opcode + 2 displacement */
7686 opcode[0] = 0xe9;
7687 where_to_put_displacement = &opcode[1];
7688 break;
7689
7690 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7691 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7692 extension = 5; /* 2 opcode + 4 displacement */
7693 opcode[1] = opcode[0] + 0x10;
7694 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7695 where_to_put_displacement = &opcode[2];
7696 break;
7697
7698 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7699 extension = 3; /* 2 opcode + 2 displacement */
7700 opcode[1] = opcode[0] + 0x10;
7701 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7702 where_to_put_displacement = &opcode[2];
7703 break;
7704
7705 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7706 extension = 4;
7707 opcode[0] ^= 1;
7708 opcode[1] = 3;
7709 opcode[2] = 0xe9;
7710 where_to_put_displacement = &opcode[3];
7711 break;
7712
7713 default:
7714 BAD_CASE (fragP->fr_subtype);
7715 break;
7716 }
7717 }
7718
7719 /* If size if less then four we are sure that the operand fits,
7720 but if it's 4, then it could be that the displacement is larger
7721 then -/+ 2GB. */
7722 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7723 && object_64bit
7724 && ((addressT) (displacement_from_opcode_start - extension
7725 + ((addressT) 1 << 31))
7726 > (((addressT) 2 << 31) - 1)))
7727 {
7728 as_bad_where (fragP->fr_file, fragP->fr_line,
7729 _("jump target out of range"));
7730 /* Make us emit 0. */
7731 displacement_from_opcode_start = extension;
7732 }
7733 /* Now put displacement after opcode. */
7734 md_number_to_chars ((char *) where_to_put_displacement,
7735 (valueT) (displacement_from_opcode_start - extension),
7736 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7737 fragP->fr_fix += extension;
7738}
7739\f
7740/* Apply a fixup (fixS) to segment data, once it has been determined
7741 by our caller that we have all the info we need to fix it up.
7742
7743 On the 386, immediates, displacements, and data pointers are all in
7744 the same (little-endian) format, so we don't need to care about which
7745 we are handling. */
7746
7747void
7748md_apply_fix (fixP, valP, seg)
7749 /* The fix we're to put in. */
7750 fixS *fixP;
7751 /* Pointer to the value of the bits. */
7752 valueT *valP;
7753 /* Segment fix is from. */
7754 segT seg ATTRIBUTE_UNUSED;
7755{
7756 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7757 valueT value = *valP;
7758
7759#if !defined (TE_Mach)
7760 if (fixP->fx_pcrel)
7761 {
7762 switch (fixP->fx_r_type)
7763 {
7764 default:
7765 break;
7766
7767 case BFD_RELOC_64:
7768 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7769 break;
7770 case BFD_RELOC_32:
7771 case BFD_RELOC_X86_64_32S:
7772 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7773 break;
7774 case BFD_RELOC_16:
7775 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7776 break;
7777 case BFD_RELOC_8:
7778 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7779 break;
7780 }
7781 }
7782
7783 if (fixP->fx_addsy != NULL
7784 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7785 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7786 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7787 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7788 && !use_rela_relocations)
7789 {
7790 /* This is a hack. There should be a better way to handle this.
7791 This covers for the fact that bfd_install_relocation will
7792 subtract the current location (for partial_inplace, PC relative
7793 relocations); see more below. */
7794#ifndef OBJ_AOUT
7795 if (IS_ELF
7796#ifdef TE_PE
7797 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7798#endif
7799 )
7800 value += fixP->fx_where + fixP->fx_frag->fr_address;
7801#endif
7802#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7803 if (IS_ELF)
7804 {
7805 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7806
7807 if ((sym_seg == seg
7808 || (symbol_section_p (fixP->fx_addsy)
7809 && sym_seg != absolute_section))
7810 && !generic_force_reloc (fixP))
7811 {
7812 /* Yes, we add the values in twice. This is because
7813 bfd_install_relocation subtracts them out again. I think
7814 bfd_install_relocation is broken, but I don't dare change
7815 it. FIXME. */
7816 value += fixP->fx_where + fixP->fx_frag->fr_address;
7817 }
7818 }
7819#endif
7820#if defined (OBJ_COFF) && defined (TE_PE)
7821 /* For some reason, the PE format does not store a
7822 section address offset for a PC relative symbol. */
7823 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7824 || S_IS_WEAK (fixP->fx_addsy))
7825 value += md_pcrel_from (fixP);
7826#endif
7827 }
7828#if defined (OBJ_COFF) && defined (TE_PE)
7829 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7830 {
7831 value -= S_GET_VALUE (fixP->fx_addsy);
7832 }
7833#endif
7834
7835 /* Fix a few things - the dynamic linker expects certain values here,
7836 and we must not disappoint it. */
7837#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7838 if (IS_ELF && fixP->fx_addsy)
7839 switch (fixP->fx_r_type)
7840 {
7841 case BFD_RELOC_386_PLT32:
7842 case BFD_RELOC_X86_64_PLT32:
7843 /* Make the jump instruction point to the address of the operand. At
7844 runtime we merely add the offset to the actual PLT entry. */
7845 value = -4;
7846 break;
7847
7848 case BFD_RELOC_386_TLS_GD:
7849 case BFD_RELOC_386_TLS_LDM:
7850 case BFD_RELOC_386_TLS_IE_32:
7851 case BFD_RELOC_386_TLS_IE:
7852 case BFD_RELOC_386_TLS_GOTIE:
7853 case BFD_RELOC_386_TLS_GOTDESC:
7854 case BFD_RELOC_X86_64_TLSGD:
7855 case BFD_RELOC_X86_64_TLSLD:
7856 case BFD_RELOC_X86_64_GOTTPOFF:
7857 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7858 value = 0; /* Fully resolved at runtime. No addend. */
7859 /* Fallthrough */
7860 case BFD_RELOC_386_TLS_LE:
7861 case BFD_RELOC_386_TLS_LDO_32:
7862 case BFD_RELOC_386_TLS_LE_32:
7863 case BFD_RELOC_X86_64_DTPOFF32:
7864 case BFD_RELOC_X86_64_DTPOFF64:
7865 case BFD_RELOC_X86_64_TPOFF32:
7866 case BFD_RELOC_X86_64_TPOFF64:
7867 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7868 break;
7869
7870 case BFD_RELOC_386_TLS_DESC_CALL:
7871 case BFD_RELOC_X86_64_TLSDESC_CALL:
7872 value = 0; /* Fully resolved at runtime. No addend. */
7873 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7874 fixP->fx_done = 0;
7875 return;
7876
7877 case BFD_RELOC_386_GOT32:
7878 case BFD_RELOC_X86_64_GOT32:
7879 value = 0; /* Fully resolved at runtime. No addend. */
7880 break;
7881
7882 case BFD_RELOC_VTABLE_INHERIT:
7883 case BFD_RELOC_VTABLE_ENTRY:
7884 fixP->fx_done = 0;
7885 return;
7886
7887 default:
7888 break;
7889 }
7890#endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7891 *valP = value;
7892#endif /* !defined (TE_Mach) */
7893
7894 /* Are we finished with this relocation now? */
7895 if (fixP->fx_addsy == NULL)
7896 fixP->fx_done = 1;
7897#if defined (OBJ_COFF) && defined (TE_PE)
7898 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7899 {
7900 fixP->fx_done = 0;
7901 /* Remember value for tc_gen_reloc. */
7902 fixP->fx_addnumber = value;
7903 /* Clear out the frag for now. */
7904 value = 0;
7905 }
7906#endif
7907 else if (use_rela_relocations)
7908 {
7909 fixP->fx_no_overflow = 1;
7910 /* Remember value for tc_gen_reloc. */
7911 fixP->fx_addnumber = value;
7912 value = 0;
7913 }
7914
7915 md_number_to_chars (p, value, fixP->fx_size);
7916}
7917\f
7918char *
7919md_atof (int type, char *litP, int *sizeP)
7920{
7921 /* This outputs the LITTLENUMs in REVERSE order;
7922 in accord with the bigendian 386. */
7923 return ieee_md_atof (type, litP, sizeP, FALSE);
7924}
7925\f
7926static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7927
7928static char *
7929output_invalid (int c)
7930{
7931 if (ISPRINT (c))
7932 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7933 "'%c'", c);
7934 else
7935 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7936 "(0x%x)", (unsigned char) c);
7937 return output_invalid_buf;
7938}
7939
7940/* REG_STRING starts *before* REGISTER_PREFIX. */
7941
7942static const reg_entry *
7943parse_real_register (char *reg_string, char **end_op)
7944{
7945 char *s = reg_string;
7946 char *p;
7947 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7948 const reg_entry *r;
7949
7950 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7951 if (*s == REGISTER_PREFIX)
7952 ++s;
7953
7954 if (is_space_char (*s))
7955 ++s;
7956
7957 p = reg_name_given;
7958 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7959 {
7960 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7961 return (const reg_entry *) NULL;
7962 s++;
7963 }
7964
7965 /* For naked regs, make sure that we are not dealing with an identifier.
7966 This prevents confusing an identifier like `eax_var' with register
7967 `eax'. */
7968 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7969 return (const reg_entry *) NULL;
7970
7971 *end_op = s;
7972
7973 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7974
7975 /* Handle floating point regs, allowing spaces in the (i) part. */
7976 if (r == i386_regtab /* %st is first entry of table */)
7977 {
7978 if (is_space_char (*s))
7979 ++s;
7980 if (*s == '(')
7981 {
7982 ++s;
7983 if (is_space_char (*s))
7984 ++s;
7985 if (*s >= '0' && *s <= '7')
7986 {
7987 int fpr = *s - '0';
7988 ++s;
7989 if (is_space_char (*s))
7990 ++s;
7991 if (*s == ')')
7992 {
7993 *end_op = s + 1;
7994 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7995 know (r);
7996 return r + fpr;
7997 }
7998 }
7999 /* We have "%st(" then garbage. */
8000 return (const reg_entry *) NULL;
8001 }
8002 }
8003
8004 if (r == NULL || allow_pseudo_reg)
8005 return r;
8006
8007 if (operand_type_all_zero (&r->reg_type))
8008 return (const reg_entry *) NULL;
8009
8010 if ((r->reg_type.bitfield.reg32
8011 || r->reg_type.bitfield.sreg3
8012 || r->reg_type.bitfield.control
8013 || r->reg_type.bitfield.debug
8014 || r->reg_type.bitfield.test)
8015 && !cpu_arch_flags.bitfield.cpui386)
8016 return (const reg_entry *) NULL;
8017
8018 if (r->reg_type.bitfield.floatreg
8019 && !cpu_arch_flags.bitfield.cpu8087
8020 && !cpu_arch_flags.bitfield.cpu287
8021 && !cpu_arch_flags.bitfield.cpu387)
8022 return (const reg_entry *) NULL;
8023
8024 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8025 return (const reg_entry *) NULL;
8026
8027 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8028 return (const reg_entry *) NULL;
8029
8030 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8031 return (const reg_entry *) NULL;
8032
8033 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8034 if (!allow_index_reg
8035 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8036 return (const reg_entry *) NULL;
8037
8038 if (((r->reg_flags & (RegRex64 | RegRex))
8039 || r->reg_type.bitfield.reg64)
8040 && (!cpu_arch_flags.bitfield.cpulm
8041 || !operand_type_equal (&r->reg_type, &control))
8042 && flag_code != CODE_64BIT)
8043 return (const reg_entry *) NULL;
8044
8045 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8046 return (const reg_entry *) NULL;
8047
8048 return r;
8049}
8050
8051/* REG_STRING starts *before* REGISTER_PREFIX. */
8052
8053static const reg_entry *
8054parse_register (char *reg_string, char **end_op)
8055{
8056 const reg_entry *r;
8057
8058 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8059 r = parse_real_register (reg_string, end_op);
8060 else
8061 r = NULL;
8062 if (!r)
8063 {
8064 char *save = input_line_pointer;
8065 char c;
8066 symbolS *symbolP;
8067
8068 input_line_pointer = reg_string;
8069 c = get_symbol_end ();
8070 symbolP = symbol_find (reg_string);
8071 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8072 {
8073 const expressionS *e = symbol_get_value_expression (symbolP);
8074
8075 know (e->X_op == O_register);
8076 know (e->X_add_number >= 0
8077 && (valueT) e->X_add_number < i386_regtab_size);
8078 r = i386_regtab + e->X_add_number;
8079 *end_op = input_line_pointer;
8080 }
8081 *input_line_pointer = c;
8082 input_line_pointer = save;
8083 }
8084 return r;
8085}
8086
8087int
8088i386_parse_name (char *name, expressionS *e, char *nextcharP)
8089{
8090 const reg_entry *r;
8091 char *end = input_line_pointer;
8092
8093 *end = *nextcharP;
8094 r = parse_register (name, &input_line_pointer);
8095 if (r && end <= input_line_pointer)
8096 {
8097 *nextcharP = *input_line_pointer;
8098 *input_line_pointer = 0;
8099 e->X_op = O_register;
8100 e->X_add_number = r - i386_regtab;
8101 return 1;
8102 }
8103 input_line_pointer = end;
8104 *end = 0;
8105 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8106}
8107
8108void
8109md_operand (expressionS *e)
8110{
8111 char *end;
8112 const reg_entry *r;
8113
8114 switch (*input_line_pointer)
8115 {
8116 case REGISTER_PREFIX:
8117 r = parse_real_register (input_line_pointer, &end);
8118 if (r)
8119 {
8120 e->X_op = O_register;
8121 e->X_add_number = r - i386_regtab;
8122 input_line_pointer = end;
8123 }
8124 break;
8125
8126 case '[':
8127 gas_assert (intel_syntax);
8128 end = input_line_pointer++;
8129 expression (e);
8130 if (*input_line_pointer == ']')
8131 {
8132 ++input_line_pointer;
8133 e->X_op_symbol = make_expr_symbol (e);
8134 e->X_add_symbol = NULL;
8135 e->X_add_number = 0;
8136 e->X_op = O_index;
8137 }
8138 else
8139 {
8140 e->X_op = O_absent;
8141 input_line_pointer = end;
8142 }
8143 break;
8144 }
8145}
8146
8147\f
8148#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8149const char *md_shortopts = "kVQ:sqn";
8150#else
8151const char *md_shortopts = "qn";
8152#endif
8153
8154#define OPTION_32 (OPTION_MD_BASE + 0)
8155#define OPTION_64 (OPTION_MD_BASE + 1)
8156#define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8157#define OPTION_MARCH (OPTION_MD_BASE + 3)
8158#define OPTION_MTUNE (OPTION_MD_BASE + 4)
8159#define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8160#define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8161#define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8162#define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8163#define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8164#define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8165#define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8166#define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8167#define OPTION_X32 (OPTION_MD_BASE + 13)
8168
8169struct option md_longopts[] =
8170{
8171 {"32", no_argument, NULL, OPTION_32},
8172#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8173 || defined (TE_PE) || defined (TE_PEP))
8174 {"64", no_argument, NULL, OPTION_64},
8175#endif
8176#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8177 {"x32", no_argument, NULL, OPTION_X32},
8178#endif
8179 {"divide", no_argument, NULL, OPTION_DIVIDE},
8180 {"march", required_argument, NULL, OPTION_MARCH},
8181 {"mtune", required_argument, NULL, OPTION_MTUNE},
8182 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8183 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8184 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8185 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8186 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8187 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8188 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8189 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8190 {NULL, no_argument, NULL, 0}
8191};
8192size_t md_longopts_size = sizeof (md_longopts);
8193
8194int
8195md_parse_option (int c, char *arg)
8196{
8197 unsigned int j;
8198 char *arch, *next;
8199
8200 switch (c)
8201 {
8202 case 'n':
8203 optimize_align_code = 0;
8204 break;
8205
8206 case 'q':
8207 quiet_warnings = 1;
8208 break;
8209
8210#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8211 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8212 should be emitted or not. FIXME: Not implemented. */
8213 case 'Q':
8214 break;
8215
8216 /* -V: SVR4 argument to print version ID. */
8217 case 'V':
8218 print_version_id ();
8219 break;
8220
8221 /* -k: Ignore for FreeBSD compatibility. */
8222 case 'k':
8223 break;
8224
8225 case 's':
8226 /* -s: On i386 Solaris, this tells the native assembler to use
8227 .stab instead of .stab.excl. We always use .stab anyhow. */
8228 break;
8229#endif
8230#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8231 || defined (TE_PE) || defined (TE_PEP))
8232 case OPTION_64:
8233 {
8234 const char **list, **l;
8235
8236 list = bfd_target_list ();
8237 for (l = list; *l != NULL; l++)
8238 if (CONST_STRNEQ (*l, "elf64-x86-64")
8239 || strcmp (*l, "coff-x86-64") == 0
8240 || strcmp (*l, "pe-x86-64") == 0
8241 || strcmp (*l, "pei-x86-64") == 0)
8242 {
8243 default_arch = "x86_64";
8244 break;
8245 }
8246 if (*l == NULL)
8247 as_fatal (_("No compiled in support for x86_64"));
8248 free (list);
8249 }
8250 break;
8251#endif
8252
8253#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8254 case OPTION_X32:
8255 if (IS_ELF)
8256 {
8257 const char **list, **l;
8258
8259 list = bfd_target_list ();
8260 for (l = list; *l != NULL; l++)
8261 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8262 {
8263 default_arch = "x86_64:32";
8264 break;
8265 }
8266 if (*l == NULL)
8267 as_fatal (_("No compiled in support for 32bit x86_64"));
8268 free (list);
8269 }
8270 else
8271 as_fatal (_("32bit x86_64 is only supported for ELF"));
8272 break;
8273#endif
8274
8275 case OPTION_32:
8276 default_arch = "i386";
8277 break;
8278
8279 case OPTION_DIVIDE:
8280#ifdef SVR4_COMMENT_CHARS
8281 {
8282 char *n, *t;
8283 const char *s;
8284
8285 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8286 t = n;
8287 for (s = i386_comment_chars; *s != '\0'; s++)
8288 if (*s != '/')
8289 *t++ = *s;
8290 *t = '\0';
8291 i386_comment_chars = n;
8292 }
8293#endif
8294 break;
8295
8296 case OPTION_MARCH:
8297 arch = xstrdup (arg);
8298 do
8299 {
8300 if (*arch == '.')
8301 as_fatal (_("Invalid -march= option: `%s'"), arg);
8302 next = strchr (arch, '+');
8303 if (next)
8304 *next++ = '\0';
8305 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8306 {
8307 if (strcmp (arch, cpu_arch [j].name) == 0)
8308 {
8309 /* Processor. */
8310 if (! cpu_arch[j].flags.bitfield.cpui386)
8311 continue;
8312
8313 cpu_arch_name = cpu_arch[j].name;
8314 cpu_sub_arch_name = NULL;
8315 cpu_arch_flags = cpu_arch[j].flags;
8316 cpu_arch_isa = cpu_arch[j].type;
8317 cpu_arch_isa_flags = cpu_arch[j].flags;
8318 if (!cpu_arch_tune_set)
8319 {
8320 cpu_arch_tune = cpu_arch_isa;
8321 cpu_arch_tune_flags = cpu_arch_isa_flags;
8322 }
8323 break;
8324 }
8325 else if (*cpu_arch [j].name == '.'
8326 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8327 {
8328 /* ISA entension. */
8329 i386_cpu_flags flags;
8330
8331 if (!cpu_arch[j].negated)
8332 flags = cpu_flags_or (cpu_arch_flags,
8333 cpu_arch[j].flags);
8334 else
8335 flags = cpu_flags_and_not (cpu_arch_flags,
8336 cpu_arch[j].flags);
8337 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8338 {
8339 if (cpu_sub_arch_name)
8340 {
8341 char *name = cpu_sub_arch_name;
8342 cpu_sub_arch_name = concat (name,
8343 cpu_arch[j].name,
8344 (const char *) NULL);
8345 free (name);
8346 }
8347 else
8348 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8349 cpu_arch_flags = flags;
8350 }
8351 break;
8352 }
8353 }
8354
8355 if (j >= ARRAY_SIZE (cpu_arch))
8356 as_fatal (_("Invalid -march= option: `%s'"), arg);
8357
8358 arch = next;
8359 }
8360 while (next != NULL );
8361 break;
8362
8363 case OPTION_MTUNE:
8364 if (*arg == '.')
8365 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8366 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8367 {
8368 if (strcmp (arg, cpu_arch [j].name) == 0)
8369 {
8370 cpu_arch_tune_set = 1;
8371 cpu_arch_tune = cpu_arch [j].type;
8372 cpu_arch_tune_flags = cpu_arch[j].flags;
8373 break;
8374 }
8375 }
8376 if (j >= ARRAY_SIZE (cpu_arch))
8377 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8378 break;
8379
8380 case OPTION_MMNEMONIC:
8381 if (strcasecmp (arg, "att") == 0)
8382 intel_mnemonic = 0;
8383 else if (strcasecmp (arg, "intel") == 0)
8384 intel_mnemonic = 1;
8385 else
8386 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8387 break;
8388
8389 case OPTION_MSYNTAX:
8390 if (strcasecmp (arg, "att") == 0)
8391 intel_syntax = 0;
8392 else if (strcasecmp (arg, "intel") == 0)
8393 intel_syntax = 1;
8394 else
8395 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8396 break;
8397
8398 case OPTION_MINDEX_REG:
8399 allow_index_reg = 1;
8400 break;
8401
8402 case OPTION_MNAKED_REG:
8403 allow_naked_reg = 1;
8404 break;
8405
8406 case OPTION_MOLD_GCC:
8407 old_gcc = 1;
8408 break;
8409
8410 case OPTION_MSSE2AVX:
8411 sse2avx = 1;
8412 break;
8413
8414 case OPTION_MSSE_CHECK:
8415 if (strcasecmp (arg, "error") == 0)
8416 sse_check = sse_check_error;
8417 else if (strcasecmp (arg, "warning") == 0)
8418 sse_check = sse_check_warning;
8419 else if (strcasecmp (arg, "none") == 0)
8420 sse_check = sse_check_none;
8421 else
8422 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8423 break;
8424
8425 case OPTION_MAVXSCALAR:
8426 if (strcasecmp (arg, "128") == 0)
8427 avxscalar = vex128;
8428 else if (strcasecmp (arg, "256") == 0)
8429 avxscalar = vex256;
8430 else
8431 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8432 break;
8433
8434 default:
8435 return 0;
8436 }
8437 return 1;
8438}
8439
8440#define MESSAGE_TEMPLATE \
8441" "
8442
8443static void
8444show_arch (FILE *stream, int ext, int check)
8445{
8446 static char message[] = MESSAGE_TEMPLATE;
8447 char *start = message + 27;
8448 char *p;
8449 int size = sizeof (MESSAGE_TEMPLATE);
8450 int left;
8451 const char *name;
8452 int len;
8453 unsigned int j;
8454
8455 p = start;
8456 left = size - (start - message);
8457 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8458 {
8459 /* Should it be skipped? */
8460 if (cpu_arch [j].skip)
8461 continue;
8462
8463 name = cpu_arch [j].name;
8464 len = cpu_arch [j].len;
8465 if (*name == '.')
8466 {
8467 /* It is an extension. Skip if we aren't asked to show it. */
8468 if (ext)
8469 {
8470 name++;
8471 len--;
8472 }
8473 else
8474 continue;
8475 }
8476 else if (ext)
8477 {
8478 /* It is an processor. Skip if we show only extension. */
8479 continue;
8480 }
8481 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8482 {
8483 /* It is an impossible processor - skip. */
8484 continue;
8485 }
8486
8487 /* Reserve 2 spaces for ", " or ",\0" */
8488 left -= len + 2;
8489
8490 /* Check if there is any room. */
8491 if (left >= 0)
8492 {
8493 if (p != start)
8494 {
8495 *p++ = ',';
8496 *p++ = ' ';
8497 }
8498 p = mempcpy (p, name, len);
8499 }
8500 else
8501 {
8502 /* Output the current message now and start a new one. */
8503 *p++ = ',';
8504 *p = '\0';
8505 fprintf (stream, "%s\n", message);
8506 p = start;
8507 left = size - (start - message) - len - 2;
8508
8509 gas_assert (left >= 0);
8510
8511 p = mempcpy (p, name, len);
8512 }
8513 }
8514
8515 *p = '\0';
8516 fprintf (stream, "%s\n", message);
8517}
8518
8519void
8520md_show_usage (FILE *stream)
8521{
8522#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8523 fprintf (stream, _("\
8524 -Q ignored\n\
8525 -V print assembler version number\n\
8526 -k ignored\n"));
8527#endif
8528 fprintf (stream, _("\
8529 -n Do not optimize code alignment\n\
8530 -q quieten some warnings\n"));
8531#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8532 fprintf (stream, _("\
8533 -s ignored\n"));
8534#endif
8535#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8536 || defined (TE_PE) || defined (TE_PEP))
8537 fprintf (stream, _("\
8538 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8539#endif
8540#ifdef SVR4_COMMENT_CHARS
8541 fprintf (stream, _("\
8542 --divide do not treat `/' as a comment character\n"));
8543#else
8544 fprintf (stream, _("\
8545 --divide ignored\n"));
8546#endif
8547 fprintf (stream, _("\
8548 -march=CPU[,+EXTENSION...]\n\
8549 generate code for CPU and EXTENSION, CPU is one of:\n"));
8550 show_arch (stream, 0, 1);
8551 fprintf (stream, _("\
8552 EXTENSION is combination of:\n"));
8553 show_arch (stream, 1, 0);
8554 fprintf (stream, _("\
8555 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8556 show_arch (stream, 0, 0);
8557 fprintf (stream, _("\
8558 -msse2avx encode SSE instructions with VEX prefix\n"));
8559 fprintf (stream, _("\
8560 -msse-check=[none|error|warning]\n\
8561 check SSE instructions\n"));
8562 fprintf (stream, _("\
8563 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8564 length\n"));
8565 fprintf (stream, _("\
8566 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8567 fprintf (stream, _("\
8568 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8569 fprintf (stream, _("\
8570 -mindex-reg support pseudo index registers\n"));
8571 fprintf (stream, _("\
8572 -mnaked-reg don't require `%%' prefix for registers\n"));
8573 fprintf (stream, _("\
8574 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8575}
8576
8577#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8578 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8579 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8580
8581/* Pick the target format to use. */
8582
8583const char *
8584i386_target_format (void)
8585{
8586 if (!strncmp (default_arch, "x86_64", 6))
8587 {
8588 update_code_flag (CODE_64BIT, 1);
8589 if (default_arch[6] == '\0')
8590 x86_elf_abi = X86_64_ABI;
8591 else
8592 x86_elf_abi = X86_64_X32_ABI;
8593 }
8594 else if (!strcmp (default_arch, "i386"))
8595 update_code_flag (CODE_32BIT, 1);
8596 else
8597 as_fatal (_("Unknown architecture"));
8598
8599 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8600 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8601 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8602 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8603
8604 switch (OUTPUT_FLAVOR)
8605 {
8606#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8607 case bfd_target_aout_flavour:
8608 return AOUT_TARGET_FORMAT;
8609#endif
8610#if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8611# if defined (TE_PE) || defined (TE_PEP)
8612 case bfd_target_coff_flavour:
8613 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8614# elif defined (TE_GO32)
8615 case bfd_target_coff_flavour:
8616 return "coff-go32";
8617# else
8618 case bfd_target_coff_flavour:
8619 return "coff-i386";
8620# endif
8621#endif
8622#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8623 case bfd_target_elf_flavour:
8624 {
8625 const char *format;
8626
8627 switch (x86_elf_abi)
8628 {
8629 default:
8630 format = ELF_TARGET_FORMAT;
8631 break;
8632 case X86_64_ABI:
8633 use_rela_relocations = 1;
8634 object_64bit = 1;
8635 format = ELF_TARGET_FORMAT64;
8636 break;
8637 case X86_64_X32_ABI:
8638 use_rela_relocations = 1;
8639 object_64bit = 1;
8640 disallow_64bit_disp = 1;
8641 format = ELF_TARGET_FORMAT32;
8642 break;
8643 }
8644 if (cpu_arch_isa == PROCESSOR_L1OM)
8645 {
8646 if (x86_elf_abi != X86_64_ABI)
8647 as_fatal (_("Intel L1OM is 64bit only"));
8648 return ELF_TARGET_L1OM_FORMAT;
8649 }
8650 else
8651 return format;
8652 }
8653#endif
8654#if defined (OBJ_MACH_O)
8655 case bfd_target_mach_o_flavour:
8656 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8657#endif
8658 default:
8659 abort ();
8660 return NULL;
8661 }
8662}
8663
8664#endif /* OBJ_MAYBE_ more than one */
8665
8666#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8667void
8668i386_elf_emit_arch_note (void)
8669{
8670 if (IS_ELF && cpu_arch_name != NULL)
8671 {
8672 char *p;
8673 asection *seg = now_seg;
8674 subsegT subseg = now_subseg;
8675 Elf_Internal_Note i_note;
8676 Elf_External_Note e_note;
8677 asection *note_secp;
8678 int len;
8679
8680 /* Create the .note section. */
8681 note_secp = subseg_new (".note", 0);
8682 bfd_set_section_flags (stdoutput,
8683 note_secp,
8684 SEC_HAS_CONTENTS | SEC_READONLY);
8685
8686 /* Process the arch string. */
8687 len = strlen (cpu_arch_name);
8688
8689 i_note.namesz = len + 1;
8690 i_note.descsz = 0;
8691 i_note.type = NT_ARCH;
8692 p = frag_more (sizeof (e_note.namesz));
8693 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8694 p = frag_more (sizeof (e_note.descsz));
8695 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8696 p = frag_more (sizeof (e_note.type));
8697 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8698 p = frag_more (len + 1);
8699 strcpy (p, cpu_arch_name);
8700
8701 frag_align (2, 0, 0);
8702
8703 subseg_set (seg, subseg);
8704 }
8705}
8706#endif
8707\f
8708symbolS *
8709md_undefined_symbol (name)
8710 char *name;
8711{
8712 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8713 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8714 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8715 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8716 {
8717 if (!GOT_symbol)
8718 {
8719 if (symbol_find (name))
8720 as_bad (_("GOT already in symbol table"));
8721 GOT_symbol = symbol_new (name, undefined_section,
8722 (valueT) 0, &zero_address_frag);
8723 };
8724 return GOT_symbol;
8725 }
8726 return 0;
8727}
8728
8729/* Round up a section size to the appropriate boundary. */
8730
8731valueT
8732md_section_align (segment, size)
8733 segT segment ATTRIBUTE_UNUSED;
8734 valueT size;
8735{
8736#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8737 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8738 {
8739 /* For a.out, force the section size to be aligned. If we don't do
8740 this, BFD will align it for us, but it will not write out the
8741 final bytes of the section. This may be a bug in BFD, but it is
8742 easier to fix it here since that is how the other a.out targets
8743 work. */
8744 int align;
8745
8746 align = bfd_get_section_alignment (stdoutput, segment);
8747 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8748 }
8749#endif
8750
8751 return size;
8752}
8753
8754/* On the i386, PC-relative offsets are relative to the start of the
8755 next instruction. That is, the address of the offset, plus its
8756 size, since the offset is always the last part of the insn. */
8757
8758long
8759md_pcrel_from (fixS *fixP)
8760{
8761 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8762}
8763
8764#ifndef I386COFF
8765
8766static void
8767s_bss (int ignore ATTRIBUTE_UNUSED)
8768{
8769 int temp;
8770
8771#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8772 if (IS_ELF)
8773 obj_elf_section_change_hook ();
8774#endif
8775 temp = get_absolute_expression ();
8776 subseg_set (bss_section, (subsegT) temp);
8777 demand_empty_rest_of_line ();
8778}
8779
8780#endif
8781
8782void
8783i386_validate_fix (fixS *fixp)
8784{
8785 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8786 {
8787 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8788 {
8789 if (!object_64bit)
8790 abort ();
8791 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8792 }
8793 else
8794 {
8795 if (!object_64bit)
8796 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8797 else
8798 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8799 }
8800 fixp->fx_subsy = 0;
8801 }
8802}
8803
8804arelent *
8805tc_gen_reloc (section, fixp)
8806 asection *section ATTRIBUTE_UNUSED;
8807 fixS *fixp;
8808{
8809 arelent *rel;
8810 bfd_reloc_code_real_type code;
8811
8812 switch (fixp->fx_r_type)
8813 {
8814 case BFD_RELOC_X86_64_PLT32:
8815 case BFD_RELOC_X86_64_GOT32:
8816 case BFD_RELOC_X86_64_GOTPCREL:
8817 case BFD_RELOC_386_PLT32:
8818 case BFD_RELOC_386_GOT32:
8819 case BFD_RELOC_386_GOTOFF:
8820 case BFD_RELOC_386_GOTPC:
8821 case BFD_RELOC_386_TLS_GD:
8822 case BFD_RELOC_386_TLS_LDM:
8823 case BFD_RELOC_386_TLS_LDO_32:
8824 case BFD_RELOC_386_TLS_IE_32:
8825 case BFD_RELOC_386_TLS_IE:
8826 case BFD_RELOC_386_TLS_GOTIE:
8827 case BFD_RELOC_386_TLS_LE_32:
8828 case BFD_RELOC_386_TLS_LE:
8829 case BFD_RELOC_386_TLS_GOTDESC:
8830 case BFD_RELOC_386_TLS_DESC_CALL:
8831 case BFD_RELOC_X86_64_TLSGD:
8832 case BFD_RELOC_X86_64_TLSLD:
8833 case BFD_RELOC_X86_64_DTPOFF32:
8834 case BFD_RELOC_X86_64_DTPOFF64:
8835 case BFD_RELOC_X86_64_GOTTPOFF:
8836 case BFD_RELOC_X86_64_TPOFF32:
8837 case BFD_RELOC_X86_64_TPOFF64:
8838 case BFD_RELOC_X86_64_GOTOFF64:
8839 case BFD_RELOC_X86_64_GOTPC32:
8840 case BFD_RELOC_X86_64_GOT64:
8841 case BFD_RELOC_X86_64_GOTPCREL64:
8842 case BFD_RELOC_X86_64_GOTPC64:
8843 case BFD_RELOC_X86_64_GOTPLT64:
8844 case BFD_RELOC_X86_64_PLTOFF64:
8845 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8846 case BFD_RELOC_X86_64_TLSDESC_CALL:
8847 case BFD_RELOC_RVA:
8848 case BFD_RELOC_VTABLE_ENTRY:
8849 case BFD_RELOC_VTABLE_INHERIT:
8850#ifdef TE_PE
8851 case BFD_RELOC_32_SECREL:
8852#endif
8853 code = fixp->fx_r_type;
8854 break;
8855 case BFD_RELOC_X86_64_32S:
8856 if (!fixp->fx_pcrel)
8857 {
8858 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8859 code = fixp->fx_r_type;
8860 break;
8861 }
8862 default:
8863 if (fixp->fx_pcrel)
8864 {
8865 switch (fixp->fx_size)
8866 {
8867 default:
8868 as_bad_where (fixp->fx_file, fixp->fx_line,
8869 _("can not do %d byte pc-relative relocation"),
8870 fixp->fx_size);
8871 code = BFD_RELOC_32_PCREL;
8872 break;
8873 case 1: code = BFD_RELOC_8_PCREL; break;
8874 case 2: code = BFD_RELOC_16_PCREL; break;
8875 case 4: code = BFD_RELOC_32_PCREL; break;
8876#ifdef BFD64
8877 case 8: code = BFD_RELOC_64_PCREL; break;
8878#endif
8879 }
8880 }
8881 else
8882 {
8883 switch (fixp->fx_size)
8884 {
8885 default:
8886 as_bad_where (fixp->fx_file, fixp->fx_line,
8887 _("can not do %d byte relocation"),
8888 fixp->fx_size);
8889 code = BFD_RELOC_32;
8890 break;
8891 case 1: code = BFD_RELOC_8; break;
8892 case 2: code = BFD_RELOC_16; break;
8893 case 4: code = BFD_RELOC_32; break;
8894#ifdef BFD64
8895 case 8: code = BFD_RELOC_64; break;
8896#endif
8897 }
8898 }
8899 break;
8900 }
8901
8902 if ((code == BFD_RELOC_32
8903 || code == BFD_RELOC_32_PCREL
8904 || code == BFD_RELOC_X86_64_32S)
8905 && GOT_symbol
8906 && fixp->fx_addsy == GOT_symbol)
8907 {
8908 if (!object_64bit)
8909 code = BFD_RELOC_386_GOTPC;
8910 else
8911 code = BFD_RELOC_X86_64_GOTPC32;
8912 }
8913 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8914 && GOT_symbol
8915 && fixp->fx_addsy == GOT_symbol)
8916 {
8917 code = BFD_RELOC_X86_64_GOTPC64;
8918 }
8919
8920 rel = (arelent *) xmalloc (sizeof (arelent));
8921 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8922 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8923
8924 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8925
8926 if (!use_rela_relocations)
8927 {
8928 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8929 vtable entry to be used in the relocation's section offset. */
8930 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8931 rel->address = fixp->fx_offset;
8932#if defined (OBJ_COFF) && defined (TE_PE)
8933 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8934 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8935 else
8936#endif
8937 rel->addend = 0;
8938 }
8939 /* Use the rela in 64bit mode. */
8940 else
8941 {
8942 if (!fixp->fx_pcrel)
8943 rel->addend = fixp->fx_offset;
8944 else
8945 switch (code)
8946 {
8947 case BFD_RELOC_X86_64_PLT32:
8948 case BFD_RELOC_X86_64_GOT32:
8949 case BFD_RELOC_X86_64_GOTPCREL:
8950 case BFD_RELOC_X86_64_TLSGD:
8951 case BFD_RELOC_X86_64_TLSLD:
8952 case BFD_RELOC_X86_64_GOTTPOFF:
8953 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8954 case BFD_RELOC_X86_64_TLSDESC_CALL:
8955 rel->addend = fixp->fx_offset - fixp->fx_size;
8956 break;
8957 default:
8958 rel->addend = (section->vma
8959 - fixp->fx_size
8960 + fixp->fx_addnumber
8961 + md_pcrel_from (fixp));
8962 break;
8963 }
8964 }
8965
8966 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8967 if (rel->howto == NULL)
8968 {
8969 as_bad_where (fixp->fx_file, fixp->fx_line,
8970 _("cannot represent relocation type %s"),
8971 bfd_get_reloc_code_name (code));
8972 /* Set howto to a garbage value so that we can keep going. */
8973 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8974 gas_assert (rel->howto != NULL);
8975 }
8976
8977 return rel;
8978}
8979
8980#include "tc-i386-intel.c"
8981
8982void
8983tc_x86_parse_to_dw2regnum (expressionS *exp)
8984{
8985 int saved_naked_reg;
8986 char saved_register_dot;
8987
8988 saved_naked_reg = allow_naked_reg;
8989 allow_naked_reg = 1;
8990 saved_register_dot = register_chars['.'];
8991 register_chars['.'] = '.';
8992 allow_pseudo_reg = 1;
8993 expression_and_evaluate (exp);
8994 allow_pseudo_reg = 0;
8995 register_chars['.'] = saved_register_dot;
8996 allow_naked_reg = saved_naked_reg;
8997
8998 if (exp->X_op == O_register && exp->X_add_number >= 0)
8999 {
9000 if ((addressT) exp->X_add_number < i386_regtab_size)
9001 {
9002 exp->X_op = O_constant;
9003 exp->X_add_number = i386_regtab[exp->X_add_number]
9004 .dw2_regnum[flag_code >> 1];
9005 }
9006 else
9007 exp->X_op = O_illegal;
9008 }
9009}
9010
9011void
9012tc_x86_frame_initial_instructions (void)
9013{
9014 static unsigned int sp_regno[2];
9015
9016 if (!sp_regno[flag_code >> 1])
9017 {
9018 char *saved_input = input_line_pointer;
9019 char sp[][4] = {"esp", "rsp"};
9020 expressionS exp;
9021
9022 input_line_pointer = sp[flag_code >> 1];
9023 tc_x86_parse_to_dw2regnum (&exp);
9024 gas_assert (exp.X_op == O_constant);
9025 sp_regno[flag_code >> 1] = exp.X_add_number;
9026 input_line_pointer = saved_input;
9027 }
9028
9029 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9030 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9031}
9032
9033int
9034i386_elf_section_type (const char *str, size_t len)
9035{
9036 if (flag_code == CODE_64BIT
9037 && len == sizeof ("unwind") - 1
9038 && strncmp (str, "unwind", 6) == 0)
9039 return SHT_X86_64_UNWIND;
9040
9041 return -1;
9042}
9043
9044#ifdef TE_SOLARIS
9045void
9046i386_solaris_fix_up_eh_frame (segT sec)
9047{
9048 if (flag_code == CODE_64BIT)
9049 elf_section_type (sec) = SHT_X86_64_UNWIND;
9050}
9051#endif
9052
9053#ifdef TE_PE
9054void
9055tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9056{
9057 expressionS exp;
9058
9059 exp.X_op = O_secrel;
9060 exp.X_add_symbol = symbol;
9061 exp.X_add_number = 0;
9062 emit_expr (&exp, size);
9063}
9064#endif
9065
9066#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9067/* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9068
9069bfd_vma
9070x86_64_section_letter (int letter, char **ptr_msg)
9071{
9072 if (flag_code == CODE_64BIT)
9073 {
9074 if (letter == 'l')
9075 return SHF_X86_64_LARGE;
9076
9077 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9078 }
9079 else
9080 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9081 return -1;
9082}
9083
9084bfd_vma
9085x86_64_section_word (char *str, size_t len)
9086{
9087 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9088 return SHF_X86_64_LARGE;
9089
9090 return -1;
9091}
9092
9093static void
9094handle_large_common (int small ATTRIBUTE_UNUSED)
9095{
9096 if (flag_code != CODE_64BIT)
9097 {
9098 s_comm_internal (0, elf_common_parse);
9099 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9100 }
9101 else
9102 {
9103 static segT lbss_section;
9104 asection *saved_com_section_ptr = elf_com_section_ptr;
9105 asection *saved_bss_section = bss_section;
9106
9107 if (lbss_section == NULL)
9108 {
9109 flagword applicable;
9110 segT seg = now_seg;
9111 subsegT subseg = now_subseg;
9112
9113 /* The .lbss section is for local .largecomm symbols. */
9114 lbss_section = subseg_new (".lbss", 0);
9115 applicable = bfd_applicable_section_flags (stdoutput);
9116 bfd_set_section_flags (stdoutput, lbss_section,
9117 applicable & SEC_ALLOC);
9118 seg_info (lbss_section)->bss = 1;
9119
9120 subseg_set (seg, subseg);
9121 }
9122
9123 elf_com_section_ptr = &_bfd_elf_large_com_section;
9124 bss_section = lbss_section;
9125
9126 s_comm_internal (0, elf_common_parse);
9127
9128 elf_com_section_ptr = saved_com_section_ptr;
9129 bss_section = saved_bss_section;
9130 }
9131}
9132#endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.052281 seconds and 4 git commands to generate.