dbac2cef04f3b865d60bcf53dcd2b8afb317d816
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define LOCK_PREFIX 5
70 #define REX_PREFIX 6 /* must come last. */
71 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72
73 /* we define the syntax here (modulo base,index,scale syntax) */
74 #define REGISTER_PREFIX '%'
75 #define IMMEDIATE_PREFIX '$'
76 #define ABSOLUTE_PREFIX '*'
77
78 /* these are the instruction mnemonic suffixes in AT&T syntax or
79 memory operand size in Intel syntax. */
80 #define WORD_MNEM_SUFFIX 'w'
81 #define BYTE_MNEM_SUFFIX 'b'
82 #define SHORT_MNEM_SUFFIX 's'
83 #define LONG_MNEM_SUFFIX 'l'
84 #define QWORD_MNEM_SUFFIX 'q'
85 #define XMMWORD_MNEM_SUFFIX 'x'
86 #define YMMWORD_MNEM_SUFFIX 'y'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
88 in instructions. */
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90
91 #define END_OF_INSN '\0'
92
93 /*
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
98 END.
99 */
100 typedef struct
101 {
102 const insn_template *start;
103 const insn_template *end;
104 }
105 templates;
106
107 /* 386 operand encoding bytes: see 386 book for details of this. */
108 typedef struct
109 {
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
113 }
114 modrm_byte;
115
116 /* x86-64 extension prefix. */
117 typedef int rex_byte;
118
119 /* 386 opcode byte to code indirect addressing. */
120 typedef struct
121 {
122 unsigned base;
123 unsigned index;
124 unsigned scale;
125 }
126 sib_byte;
127
128 /* x86 arch names, types and features */
129 typedef struct
130 {
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 unsigned int negated; /* turn off indicated flags. */
137 }
138 arch_entry;
139
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_sse_check (int);
147 static void set_cpu_arch (int);
148 #ifdef TE_PE
149 static void pe_directive_secrel (int);
150 #endif
151 static void signed_cons (int);
152 static char *output_invalid (int c);
153 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
154 const char *);
155 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
156 const char *);
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS *);
160 static int i386_intel_parse_name (const char *, expressionS *);
161 static const reg_entry *parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template *match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry *build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS *, offsetT);
180 static void output_disp (fragS *, offsetT);
181 #ifndef I386COFF
182 static void s_bss (int);
183 #endif
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* VEX prefix. */
191 typedef struct
192 {
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198 } vex_prefix;
199
200 /* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203 union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210 enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported,
222 invalid_vsib_address,
223 unsupported_vector_index_register
224 };
225
226 struct _i386_insn
227 {
228 /* TM holds the template for the insn were currently assembling. */
229 insn_template tm;
230
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
233 char suffix;
234
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands;
237
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
240 operands. */
241 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
242
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types[MAX_OPERANDS];
246
247 /* Displacement expression, immediate expression, or register for each
248 operand. */
249 union i386_op op[MAX_OPERANDS];
250
251 /* Flags for operands. */
252 unsigned int flags[MAX_OPERANDS];
253 #define Operand_PCrel 1
254
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
257
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry *base_reg;
261 const reg_entry *index_reg;
262 unsigned int log2_scale_factor;
263
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry *seg[2];
267
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes;
271 unsigned char prefix[MAX_PREFIXES];
272
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
275 modrm_byte rm;
276 rex_byte rex;
277 sib_byte sib;
278 vex_prefix vex;
279
280 /* Swap operand in encoding. */
281 unsigned int swap_operand;
282
283 /* Prefer 8bit or 32bit displacement in encoding. */
284 enum
285 {
286 disp_encoding_default = 0,
287 disp_encoding_8bit,
288 disp_encoding_32bit
289 } disp_encoding;
290
291 /* Error message. */
292 enum i386_error error;
293 };
294
295 typedef struct _i386_insn i386_insn;
296
297 /* List of chars besides those in app.c:symbol_chars that can start an
298 operand. Used to prevent the scrubber eating vital white-space. */
299 const char extra_symbol_chars[] = "*%-(["
300 #ifdef LEX_AT
301 "@"
302 #endif
303 #ifdef LEX_QM
304 "?"
305 #endif
306 ;
307
308 #if (defined (TE_I386AIX) \
309 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
310 && !defined (TE_GNU) \
311 && !defined (TE_LINUX) \
312 && !defined (TE_NETWARE) \
313 && !defined (TE_FreeBSD) \
314 && !defined (TE_DragonFly) \
315 && !defined (TE_NetBSD)))
316 /* This array holds the chars that always start a comment. If the
317 pre-processor is disabled, these aren't very useful. The option
318 --divide will remove '/' from this list. */
319 const char *i386_comment_chars = "#/";
320 #define SVR4_COMMENT_CHARS 1
321 #define PREFIX_SEPARATOR '\\'
322
323 #else
324 const char *i386_comment_chars = "#";
325 #define PREFIX_SEPARATOR '/'
326 #endif
327
328 /* This array holds the chars that only start a comment at the beginning of
329 a line. If the line seems to have the form '# 123 filename'
330 .line and .file directives will appear in the pre-processed output.
331 Note that input_file.c hand checks for '#' at the beginning of the
332 first line of the input file. This is because the compiler outputs
333 #NO_APP at the beginning of its output.
334 Also note that comments started like this one will always work if
335 '/' isn't otherwise defined. */
336 const char line_comment_chars[] = "#/";
337
338 const char line_separator_chars[] = ";";
339
340 /* Chars that can be used to separate mant from exp in floating point
341 nums. */
342 const char EXP_CHARS[] = "eE";
343
344 /* Chars that mean this number is a floating point constant
345 As in 0f12.456
346 or 0d1.2345e12. */
347 const char FLT_CHARS[] = "fFdDxX";
348
349 /* Tables for lexical analysis. */
350 static char mnemonic_chars[256];
351 static char register_chars[256];
352 static char operand_chars[256];
353 static char identifier_chars[256];
354 static char digit_chars[256];
355
356 /* Lexical macros. */
357 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
358 #define is_operand_char(x) (operand_chars[(unsigned char) x])
359 #define is_register_char(x) (register_chars[(unsigned char) x])
360 #define is_space_char(x) ((x) == ' ')
361 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
362 #define is_digit_char(x) (digit_chars[(unsigned char) x])
363
364 /* All non-digit non-letter characters that may occur in an operand. */
365 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
366
367 /* md_assemble() always leaves the strings it's passed unaltered. To
368 effect this we maintain a stack of saved characters that we've smashed
369 with '\0's (indicating end of strings for various sub-fields of the
370 assembler instruction). */
371 static char save_stack[32];
372 static char *save_stack_p;
373 #define END_STRING_AND_SAVE(s) \
374 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
375 #define RESTORE_END_STRING(s) \
376 do { *(s) = *--save_stack_p; } while (0)
377
378 /* The instruction we're assembling. */
379 static i386_insn i;
380
381 /* Possible templates for current insn. */
382 static const templates *current_templates;
383
384 /* Per instruction expressionS buffers: max displacements & immediates. */
385 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
386 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
387
388 /* Current operand we are working on. */
389 static int this_operand = -1;
390
391 /* We support four different modes. FLAG_CODE variable is used to distinguish
392 these. */
393
394 enum flag_code {
395 CODE_32BIT,
396 CODE_16BIT,
397 CODE_64BIT };
398
399 static enum flag_code flag_code;
400 static unsigned int object_64bit;
401 static unsigned int disallow_64bit_reloc;
402 static int use_rela_relocations = 0;
403
404 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
405 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
406 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
407
408 /* The ELF ABI to use. */
409 enum x86_elf_abi
410 {
411 I386_ABI,
412 X86_64_ABI,
413 X86_64_X32_ABI
414 };
415
416 static enum x86_elf_abi x86_elf_abi = I386_ABI;
417 #endif
418
419 /* The names used to print error messages. */
420 static const char *flag_code_names[] =
421 {
422 "32",
423 "16",
424 "64"
425 };
426
427 /* 1 for intel syntax,
428 0 if att syntax. */
429 static int intel_syntax = 0;
430
431 /* 1 for intel mnemonic,
432 0 if att mnemonic. */
433 static int intel_mnemonic = !SYSV386_COMPAT;
434
435 /* 1 if support old (<= 2.8.1) versions of gcc. */
436 static int old_gcc = OLDGCC_COMPAT;
437
438 /* 1 if pseudo registers are permitted. */
439 static int allow_pseudo_reg = 0;
440
441 /* 1 if register prefix % not required. */
442 static int allow_naked_reg = 0;
443
444 /* 1 if pseudo index register, eiz/riz, is allowed . */
445 static int allow_index_reg = 0;
446
447 static enum
448 {
449 sse_check_none = 0,
450 sse_check_warning,
451 sse_check_error
452 }
453 sse_check;
454
455 /* Register prefix used for error message. */
456 static const char *register_prefix = "%";
457
458 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
459 leave, push, and pop instructions so that gcc has the same stack
460 frame as in 32 bit mode. */
461 static char stackop_size = '\0';
462
463 /* Non-zero to optimize code alignment. */
464 int optimize_align_code = 1;
465
466 /* Non-zero to quieten some warnings. */
467 static int quiet_warnings = 0;
468
469 /* CPU name. */
470 static const char *cpu_arch_name = NULL;
471 static char *cpu_sub_arch_name = NULL;
472
473 /* CPU feature flags. */
474 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
475
476 /* If we have selected a cpu we are generating instructions for. */
477 static int cpu_arch_tune_set = 0;
478
479 /* Cpu we are generating instructions for. */
480 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
481
482 /* CPU feature flags of cpu we are generating instructions for. */
483 static i386_cpu_flags cpu_arch_tune_flags;
484
485 /* CPU instruction set architecture used. */
486 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
487
488 /* CPU feature flags of instruction set architecture used. */
489 i386_cpu_flags cpu_arch_isa_flags;
490
491 /* If set, conditional jumps are not automatically promoted to handle
492 larger than a byte offset. */
493 static unsigned int no_cond_jump_promotion = 0;
494
495 /* Encode SSE instructions with VEX prefix. */
496 static unsigned int sse2avx;
497
498 /* Encode scalar AVX instructions with specific vector length. */
499 static enum
500 {
501 vex128 = 0,
502 vex256
503 } avxscalar;
504
505 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
506 static symbolS *GOT_symbol;
507
508 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
509 unsigned int x86_dwarf2_return_column;
510
511 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
512 int x86_cie_data_alignment;
513
514 /* Interface to relax_segment.
515 There are 3 major relax states for 386 jump insns because the
516 different types of jumps add different sizes to frags when we're
517 figuring out what sort of jump to choose to reach a given label. */
518
519 /* Types. */
520 #define UNCOND_JUMP 0
521 #define COND_JUMP 1
522 #define COND_JUMP86 2
523
524 /* Sizes. */
525 #define CODE16 1
526 #define SMALL 0
527 #define SMALL16 (SMALL | CODE16)
528 #define BIG 2
529 #define BIG16 (BIG | CODE16)
530
531 #ifndef INLINE
532 #ifdef __GNUC__
533 #define INLINE __inline__
534 #else
535 #define INLINE
536 #endif
537 #endif
538
539 #define ENCODE_RELAX_STATE(type, size) \
540 ((relax_substateT) (((type) << 2) | (size)))
541 #define TYPE_FROM_RELAX_STATE(s) \
542 ((s) >> 2)
543 #define DISP_SIZE_FROM_RELAX_STATE(s) \
544 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
545
546 /* This table is used by relax_frag to promote short jumps to long
547 ones where necessary. SMALL (short) jumps may be promoted to BIG
548 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
549 don't allow a short jump in a 32 bit code segment to be promoted to
550 a 16 bit offset jump because it's slower (requires data size
551 prefix), and doesn't work, unless the destination is in the bottom
552 64k of the code segment (The top 16 bits of eip are zeroed). */
553
554 const relax_typeS md_relax_table[] =
555 {
556 /* The fields are:
557 1) most positive reach of this state,
558 2) most negative reach of this state,
559 3) how many bytes this mode will have in the variable part of the frag
560 4) which index into the table to try if we can't fit into this one. */
561
562 /* UNCOND_JUMP states. */
563 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
564 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
565 /* dword jmp adds 4 bytes to frag:
566 0 extra opcode bytes, 4 displacement bytes. */
567 {0, 0, 4, 0},
568 /* word jmp adds 2 byte2 to frag:
569 0 extra opcode bytes, 2 displacement bytes. */
570 {0, 0, 2, 0},
571
572 /* COND_JUMP states. */
573 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
574 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
575 /* dword conditionals adds 5 bytes to frag:
576 1 extra opcode byte, 4 displacement bytes. */
577 {0, 0, 5, 0},
578 /* word conditionals add 3 bytes to frag:
579 1 extra opcode byte, 2 displacement bytes. */
580 {0, 0, 3, 0},
581
582 /* COND_JUMP86 states. */
583 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
584 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
585 /* dword conditionals adds 5 bytes to frag:
586 1 extra opcode byte, 4 displacement bytes. */
587 {0, 0, 5, 0},
588 /* word conditionals add 4 bytes to frag:
589 1 displacement byte and a 3 byte long branch insn. */
590 {0, 0, 4, 0}
591 };
592
593 static const arch_entry cpu_arch[] =
594 {
595 /* Do not replace the first two entries - i386_target_format()
596 relies on them being there in this order. */
597 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
598 CPU_GENERIC32_FLAGS, 0, 0 },
599 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
600 CPU_GENERIC64_FLAGS, 0, 0 },
601 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
602 CPU_NONE_FLAGS, 0, 0 },
603 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
604 CPU_I186_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
606 CPU_I286_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
608 CPU_I386_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
610 CPU_I486_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
612 CPU_I586_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
614 CPU_I686_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
616 CPU_I586_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
618 CPU_PENTIUMPRO_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
620 CPU_P2_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
622 CPU_P3_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
624 CPU_P4_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
626 CPU_CORE_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
628 CPU_NOCONA_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
630 CPU_CORE_FLAGS, 1, 0 },
631 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
634 CPU_CORE2_FLAGS, 1, 0 },
635 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
636 CPU_CORE2_FLAGS, 0, 0 },
637 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
638 CPU_COREI7_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
640 CPU_L1OM_FLAGS, 0, 0 },
641 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
642 CPU_K1OM_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
644 CPU_K6_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
646 CPU_K6_2_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
648 CPU_ATHLON_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
650 CPU_K8_FLAGS, 1, 0 },
651 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
652 CPU_K8_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
654 CPU_K8_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
656 CPU_AMDFAM10_FLAGS, 0, 0 },
657 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
658 CPU_BDVER1_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
660 CPU_BDVER2_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
662 CPU_8087_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
664 CPU_287_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
666 CPU_387_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
668 CPU_ANY87_FLAGS, 0, 1 },
669 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
670 CPU_MMX_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
672 CPU_3DNOWA_FLAGS, 0, 1 },
673 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
674 CPU_SSE_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
676 CPU_SSE2_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
678 CPU_SSE3_FLAGS, 0, 0 },
679 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
680 CPU_SSSE3_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
682 CPU_SSE4_1_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
684 CPU_SSE4_2_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
686 CPU_SSE4_2_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
688 CPU_ANY_SSE_FLAGS, 0, 1 },
689 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
690 CPU_AVX_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
692 CPU_AVX2_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
694 CPU_ANY_AVX_FLAGS, 0, 1 },
695 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
696 CPU_VMX_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
698 CPU_VMFUNC_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
700 CPU_SMX_FLAGS, 0, 0 },
701 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
702 CPU_XSAVE_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
704 CPU_XSAVEOPT_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
706 CPU_AES_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
708 CPU_PCLMUL_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
710 CPU_PCLMUL_FLAGS, 1, 0 },
711 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
712 CPU_FSGSBASE_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
714 CPU_RDRND_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
716 CPU_F16C_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
718 CPU_BMI2_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
720 CPU_FMA_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
722 CPU_FMA4_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
724 CPU_XOP_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
726 CPU_LWP_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
728 CPU_MOVBE_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
730 CPU_EPT_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
732 CPU_LZCNT_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
734 CPU_INVPCID_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
736 CPU_CLFLUSH_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
738 CPU_NOP_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
740 CPU_SYSCALL_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
742 CPU_RDTSCP_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
744 CPU_3DNOW_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
746 CPU_3DNOWA_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
748 CPU_PADLOCK_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
750 CPU_SVME_FLAGS, 1, 0 },
751 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
752 CPU_SVME_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
754 CPU_SSE4A_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
756 CPU_ABM_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
758 CPU_BMI_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
760 CPU_TBM_FLAGS, 0, 0 },
761 };
762
763 #ifdef I386COFF
764 /* Like s_lcomm_internal in gas/read.c but the alignment string
765 is allowed to be optional. */
766
767 static symbolS *
768 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
769 {
770 addressT align = 0;
771
772 SKIP_WHITESPACE ();
773
774 if (needs_align
775 && *input_line_pointer == ',')
776 {
777 align = parse_align (needs_align - 1);
778
779 if (align == (addressT) -1)
780 return NULL;
781 }
782 else
783 {
784 if (size >= 8)
785 align = 3;
786 else if (size >= 4)
787 align = 2;
788 else if (size >= 2)
789 align = 1;
790 else
791 align = 0;
792 }
793
794 bss_alloc (symbolP, size, align);
795 return symbolP;
796 }
797
798 static void
799 pe_lcomm (int needs_align)
800 {
801 s_comm_internal (needs_align * 2, pe_lcomm_internal);
802 }
803 #endif
804
805 const pseudo_typeS md_pseudo_table[] =
806 {
807 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
808 {"align", s_align_bytes, 0},
809 #else
810 {"align", s_align_ptwo, 0},
811 #endif
812 {"arch", set_cpu_arch, 0},
813 #ifndef I386COFF
814 {"bss", s_bss, 0},
815 #else
816 {"lcomm", pe_lcomm, 1},
817 #endif
818 {"ffloat", float_cons, 'f'},
819 {"dfloat", float_cons, 'd'},
820 {"tfloat", float_cons, 'x'},
821 {"value", cons, 2},
822 {"slong", signed_cons, 4},
823 {"noopt", s_ignore, 0},
824 {"optim", s_ignore, 0},
825 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
826 {"code16", set_code_flag, CODE_16BIT},
827 {"code32", set_code_flag, CODE_32BIT},
828 {"code64", set_code_flag, CODE_64BIT},
829 {"intel_syntax", set_intel_syntax, 1},
830 {"att_syntax", set_intel_syntax, 0},
831 {"intel_mnemonic", set_intel_mnemonic, 1},
832 {"att_mnemonic", set_intel_mnemonic, 0},
833 {"allow_index_reg", set_allow_index_reg, 1},
834 {"disallow_index_reg", set_allow_index_reg, 0},
835 {"sse_check", set_sse_check, 0},
836 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
837 {"largecomm", handle_large_common, 0},
838 #else
839 {"file", (void (*) (int)) dwarf2_directive_file, 0},
840 {"loc", dwarf2_directive_loc, 0},
841 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
842 #endif
843 #ifdef TE_PE
844 {"secrel32", pe_directive_secrel, 0},
845 #endif
846 {0, 0, 0}
847 };
848
849 /* For interface with expression (). */
850 extern char *input_line_pointer;
851
852 /* Hash table for instruction mnemonic lookup. */
853 static struct hash_control *op_hash;
854
855 /* Hash table for register lookup. */
856 static struct hash_control *reg_hash;
857 \f
858 void
859 i386_align_code (fragS *fragP, int count)
860 {
861 /* Various efficient no-op patterns for aligning code labels.
862 Note: Don't try to assemble the instructions in the comments.
863 0L and 0w are not legal. */
864 static const char f32_1[] =
865 {0x90}; /* nop */
866 static const char f32_2[] =
867 {0x66,0x90}; /* xchg %ax,%ax */
868 static const char f32_3[] =
869 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
870 static const char f32_4[] =
871 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
872 static const char f32_5[] =
873 {0x90, /* nop */
874 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
875 static const char f32_6[] =
876 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
877 static const char f32_7[] =
878 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
879 static const char f32_8[] =
880 {0x90, /* nop */
881 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
882 static const char f32_9[] =
883 {0x89,0xf6, /* movl %esi,%esi */
884 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
885 static const char f32_10[] =
886 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
887 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
888 static const char f32_11[] =
889 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
890 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
891 static const char f32_12[] =
892 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
893 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
894 static const char f32_13[] =
895 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
896 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
897 static const char f32_14[] =
898 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
899 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
900 static const char f16_3[] =
901 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
902 static const char f16_4[] =
903 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
904 static const char f16_5[] =
905 {0x90, /* nop */
906 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
907 static const char f16_6[] =
908 {0x89,0xf6, /* mov %si,%si */
909 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
910 static const char f16_7[] =
911 {0x8d,0x74,0x00, /* lea 0(%si),%si */
912 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
913 static const char f16_8[] =
914 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
915 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
916 static const char jump_31[] =
917 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
918 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
919 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
920 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
921 static const char *const f32_patt[] = {
922 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
923 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
924 };
925 static const char *const f16_patt[] = {
926 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
927 };
928 /* nopl (%[re]ax) */
929 static const char alt_3[] =
930 {0x0f,0x1f,0x00};
931 /* nopl 0(%[re]ax) */
932 static const char alt_4[] =
933 {0x0f,0x1f,0x40,0x00};
934 /* nopl 0(%[re]ax,%[re]ax,1) */
935 static const char alt_5[] =
936 {0x0f,0x1f,0x44,0x00,0x00};
937 /* nopw 0(%[re]ax,%[re]ax,1) */
938 static const char alt_6[] =
939 {0x66,0x0f,0x1f,0x44,0x00,0x00};
940 /* nopl 0L(%[re]ax) */
941 static const char alt_7[] =
942 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
943 /* nopl 0L(%[re]ax,%[re]ax,1) */
944 static const char alt_8[] =
945 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
946 /* nopw 0L(%[re]ax,%[re]ax,1) */
947 static const char alt_9[] =
948 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
949 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
950 static const char alt_10[] =
951 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
952 /* data16
953 nopw %cs:0L(%[re]ax,%[re]ax,1) */
954 static const char alt_long_11[] =
955 {0x66,
956 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
957 /* data16
958 data16
959 nopw %cs:0L(%[re]ax,%[re]ax,1) */
960 static const char alt_long_12[] =
961 {0x66,
962 0x66,
963 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
964 /* data16
965 data16
966 data16
967 nopw %cs:0L(%[re]ax,%[re]ax,1) */
968 static const char alt_long_13[] =
969 {0x66,
970 0x66,
971 0x66,
972 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
973 /* data16
974 data16
975 data16
976 data16
977 nopw %cs:0L(%[re]ax,%[re]ax,1) */
978 static const char alt_long_14[] =
979 {0x66,
980 0x66,
981 0x66,
982 0x66,
983 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
984 /* data16
985 data16
986 data16
987 data16
988 data16
989 nopw %cs:0L(%[re]ax,%[re]ax,1) */
990 static const char alt_long_15[] =
991 {0x66,
992 0x66,
993 0x66,
994 0x66,
995 0x66,
996 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
997 /* nopl 0(%[re]ax,%[re]ax,1)
998 nopw 0(%[re]ax,%[re]ax,1) */
999 static const char alt_short_11[] =
1000 {0x0f,0x1f,0x44,0x00,0x00,
1001 0x66,0x0f,0x1f,0x44,0x00,0x00};
1002 /* nopw 0(%[re]ax,%[re]ax,1)
1003 nopw 0(%[re]ax,%[re]ax,1) */
1004 static const char alt_short_12[] =
1005 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1006 0x66,0x0f,0x1f,0x44,0x00,0x00};
1007 /* nopw 0(%[re]ax,%[re]ax,1)
1008 nopl 0L(%[re]ax) */
1009 static const char alt_short_13[] =
1010 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1011 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1012 /* nopl 0L(%[re]ax)
1013 nopl 0L(%[re]ax) */
1014 static const char alt_short_14[] =
1015 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1016 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1017 /* nopl 0L(%[re]ax)
1018 nopl 0L(%[re]ax,%[re]ax,1) */
1019 static const char alt_short_15[] =
1020 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1021 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1022 static const char *const alt_short_patt[] = {
1023 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1024 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1025 alt_short_14, alt_short_15
1026 };
1027 static const char *const alt_long_patt[] = {
1028 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1029 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1030 alt_long_14, alt_long_15
1031 };
1032
1033 /* Only align for at least a positive non-zero boundary. */
1034 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1035 return;
1036
1037 /* We need to decide which NOP sequence to use for 32bit and
1038 64bit. When -mtune= is used:
1039
1040 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1041 PROCESSOR_GENERIC32, f32_patt will be used.
1042 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1043 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1044 PROCESSOR_GENERIC64, alt_long_patt will be used.
1045 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1046 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1047 will be used.
1048
1049 When -mtune= isn't used, alt_long_patt will be used if
1050 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1051 be used.
1052
1053 When -march= or .arch is used, we can't use anything beyond
1054 cpu_arch_isa_flags. */
1055
1056 if (flag_code == CODE_16BIT)
1057 {
1058 if (count > 8)
1059 {
1060 memcpy (fragP->fr_literal + fragP->fr_fix,
1061 jump_31, count);
1062 /* Adjust jump offset. */
1063 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1064 }
1065 else
1066 memcpy (fragP->fr_literal + fragP->fr_fix,
1067 f16_patt[count - 1], count);
1068 }
1069 else
1070 {
1071 const char *const *patt = NULL;
1072
1073 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1074 {
1075 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1076 switch (cpu_arch_tune)
1077 {
1078 case PROCESSOR_UNKNOWN:
1079 /* We use cpu_arch_isa_flags to check if we SHOULD
1080 optimize with nops. */
1081 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1082 patt = alt_long_patt;
1083 else
1084 patt = f32_patt;
1085 break;
1086 case PROCESSOR_PENTIUM4:
1087 case PROCESSOR_NOCONA:
1088 case PROCESSOR_CORE:
1089 case PROCESSOR_CORE2:
1090 case PROCESSOR_COREI7:
1091 case PROCESSOR_L1OM:
1092 case PROCESSOR_K1OM:
1093 case PROCESSOR_GENERIC64:
1094 patt = alt_long_patt;
1095 break;
1096 case PROCESSOR_K6:
1097 case PROCESSOR_ATHLON:
1098 case PROCESSOR_K8:
1099 case PROCESSOR_AMDFAM10:
1100 case PROCESSOR_BD:
1101 patt = alt_short_patt;
1102 break;
1103 case PROCESSOR_I386:
1104 case PROCESSOR_I486:
1105 case PROCESSOR_PENTIUM:
1106 case PROCESSOR_PENTIUMPRO:
1107 case PROCESSOR_GENERIC32:
1108 patt = f32_patt;
1109 break;
1110 }
1111 }
1112 else
1113 {
1114 switch (fragP->tc_frag_data.tune)
1115 {
1116 case PROCESSOR_UNKNOWN:
1117 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1118 PROCESSOR_UNKNOWN. */
1119 abort ();
1120 break;
1121
1122 case PROCESSOR_I386:
1123 case PROCESSOR_I486:
1124 case PROCESSOR_PENTIUM:
1125 case PROCESSOR_K6:
1126 case PROCESSOR_ATHLON:
1127 case PROCESSOR_K8:
1128 case PROCESSOR_AMDFAM10:
1129 case PROCESSOR_BD:
1130 case PROCESSOR_GENERIC32:
1131 /* We use cpu_arch_isa_flags to check if we CAN optimize
1132 with nops. */
1133 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1134 patt = alt_short_patt;
1135 else
1136 patt = f32_patt;
1137 break;
1138 case PROCESSOR_PENTIUMPRO:
1139 case PROCESSOR_PENTIUM4:
1140 case PROCESSOR_NOCONA:
1141 case PROCESSOR_CORE:
1142 case PROCESSOR_CORE2:
1143 case PROCESSOR_COREI7:
1144 case PROCESSOR_L1OM:
1145 case PROCESSOR_K1OM:
1146 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1147 patt = alt_long_patt;
1148 else
1149 patt = f32_patt;
1150 break;
1151 case PROCESSOR_GENERIC64:
1152 patt = alt_long_patt;
1153 break;
1154 }
1155 }
1156
1157 if (patt == f32_patt)
1158 {
1159 /* If the padding is less than 15 bytes, we use the normal
1160 ones. Otherwise, we use a jump instruction and adjust
1161 its offset. */
1162 int limit;
1163
1164 /* For 64bit, the limit is 3 bytes. */
1165 if (flag_code == CODE_64BIT
1166 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1167 limit = 3;
1168 else
1169 limit = 15;
1170 if (count < limit)
1171 memcpy (fragP->fr_literal + fragP->fr_fix,
1172 patt[count - 1], count);
1173 else
1174 {
1175 memcpy (fragP->fr_literal + fragP->fr_fix,
1176 jump_31, count);
1177 /* Adjust jump offset. */
1178 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1179 }
1180 }
1181 else
1182 {
1183 /* Maximum length of an instruction is 15 byte. If the
1184 padding is greater than 15 bytes and we don't use jump,
1185 we have to break it into smaller pieces. */
1186 int padding = count;
1187 while (padding > 15)
1188 {
1189 padding -= 15;
1190 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1191 patt [14], 15);
1192 }
1193
1194 if (padding)
1195 memcpy (fragP->fr_literal + fragP->fr_fix,
1196 patt [padding - 1], padding);
1197 }
1198 }
1199 fragP->fr_var = count;
1200 }
1201
1202 static INLINE int
1203 operand_type_all_zero (const union i386_operand_type *x)
1204 {
1205 switch (ARRAY_SIZE(x->array))
1206 {
1207 case 3:
1208 if (x->array[2])
1209 return 0;
1210 case 2:
1211 if (x->array[1])
1212 return 0;
1213 case 1:
1214 return !x->array[0];
1215 default:
1216 abort ();
1217 }
1218 }
1219
1220 static INLINE void
1221 operand_type_set (union i386_operand_type *x, unsigned int v)
1222 {
1223 switch (ARRAY_SIZE(x->array))
1224 {
1225 case 3:
1226 x->array[2] = v;
1227 case 2:
1228 x->array[1] = v;
1229 case 1:
1230 x->array[0] = v;
1231 break;
1232 default:
1233 abort ();
1234 }
1235 }
1236
1237 static INLINE int
1238 operand_type_equal (const union i386_operand_type *x,
1239 const union i386_operand_type *y)
1240 {
1241 switch (ARRAY_SIZE(x->array))
1242 {
1243 case 3:
1244 if (x->array[2] != y->array[2])
1245 return 0;
1246 case 2:
1247 if (x->array[1] != y->array[1])
1248 return 0;
1249 case 1:
1250 return x->array[0] == y->array[0];
1251 break;
1252 default:
1253 abort ();
1254 }
1255 }
1256
1257 static INLINE int
1258 cpu_flags_all_zero (const union i386_cpu_flags *x)
1259 {
1260 switch (ARRAY_SIZE(x->array))
1261 {
1262 case 3:
1263 if (x->array[2])
1264 return 0;
1265 case 2:
1266 if (x->array[1])
1267 return 0;
1268 case 1:
1269 return !x->array[0];
1270 default:
1271 abort ();
1272 }
1273 }
1274
1275 static INLINE void
1276 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1277 {
1278 switch (ARRAY_SIZE(x->array))
1279 {
1280 case 3:
1281 x->array[2] = v;
1282 case 2:
1283 x->array[1] = v;
1284 case 1:
1285 x->array[0] = v;
1286 break;
1287 default:
1288 abort ();
1289 }
1290 }
1291
1292 static INLINE int
1293 cpu_flags_equal (const union i386_cpu_flags *x,
1294 const union i386_cpu_flags *y)
1295 {
1296 switch (ARRAY_SIZE(x->array))
1297 {
1298 case 3:
1299 if (x->array[2] != y->array[2])
1300 return 0;
1301 case 2:
1302 if (x->array[1] != y->array[1])
1303 return 0;
1304 case 1:
1305 return x->array[0] == y->array[0];
1306 break;
1307 default:
1308 abort ();
1309 }
1310 }
1311
1312 static INLINE int
1313 cpu_flags_check_cpu64 (i386_cpu_flags f)
1314 {
1315 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1316 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1317 }
1318
1319 static INLINE i386_cpu_flags
1320 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1321 {
1322 switch (ARRAY_SIZE (x.array))
1323 {
1324 case 3:
1325 x.array [2] &= y.array [2];
1326 case 2:
1327 x.array [1] &= y.array [1];
1328 case 1:
1329 x.array [0] &= y.array [0];
1330 break;
1331 default:
1332 abort ();
1333 }
1334 return x;
1335 }
1336
1337 static INLINE i386_cpu_flags
1338 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1339 {
1340 switch (ARRAY_SIZE (x.array))
1341 {
1342 case 3:
1343 x.array [2] |= y.array [2];
1344 case 2:
1345 x.array [1] |= y.array [1];
1346 case 1:
1347 x.array [0] |= y.array [0];
1348 break;
1349 default:
1350 abort ();
1351 }
1352 return x;
1353 }
1354
1355 static INLINE i386_cpu_flags
1356 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1357 {
1358 switch (ARRAY_SIZE (x.array))
1359 {
1360 case 3:
1361 x.array [2] &= ~y.array [2];
1362 case 2:
1363 x.array [1] &= ~y.array [1];
1364 case 1:
1365 x.array [0] &= ~y.array [0];
1366 break;
1367 default:
1368 abort ();
1369 }
1370 return x;
1371 }
1372
1373 #define CPU_FLAGS_ARCH_MATCH 0x1
1374 #define CPU_FLAGS_64BIT_MATCH 0x2
1375 #define CPU_FLAGS_AES_MATCH 0x4
1376 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1377 #define CPU_FLAGS_AVX_MATCH 0x10
1378
1379 #define CPU_FLAGS_32BIT_MATCH \
1380 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1381 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1382 #define CPU_FLAGS_PERFECT_MATCH \
1383 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1384
1385 /* Return CPU flags match bits. */
1386
1387 static int
1388 cpu_flags_match (const insn_template *t)
1389 {
1390 i386_cpu_flags x = t->cpu_flags;
1391 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1392
1393 x.bitfield.cpu64 = 0;
1394 x.bitfield.cpuno64 = 0;
1395
1396 if (cpu_flags_all_zero (&x))
1397 {
1398 /* This instruction is available on all archs. */
1399 match |= CPU_FLAGS_32BIT_MATCH;
1400 }
1401 else
1402 {
1403 /* This instruction is available only on some archs. */
1404 i386_cpu_flags cpu = cpu_arch_flags;
1405
1406 cpu.bitfield.cpu64 = 0;
1407 cpu.bitfield.cpuno64 = 0;
1408 cpu = cpu_flags_and (x, cpu);
1409 if (!cpu_flags_all_zero (&cpu))
1410 {
1411 if (x.bitfield.cpuavx)
1412 {
1413 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1414 if (cpu.bitfield.cpuavx)
1415 {
1416 /* Check SSE2AVX. */
1417 if (!t->opcode_modifier.sse2avx|| sse2avx)
1418 {
1419 match |= (CPU_FLAGS_ARCH_MATCH
1420 | CPU_FLAGS_AVX_MATCH);
1421 /* Check AES. */
1422 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1423 match |= CPU_FLAGS_AES_MATCH;
1424 /* Check PCLMUL. */
1425 if (!x.bitfield.cpupclmul
1426 || cpu.bitfield.cpupclmul)
1427 match |= CPU_FLAGS_PCLMUL_MATCH;
1428 }
1429 }
1430 else
1431 match |= CPU_FLAGS_ARCH_MATCH;
1432 }
1433 else
1434 match |= CPU_FLAGS_32BIT_MATCH;
1435 }
1436 }
1437 return match;
1438 }
1439
1440 static INLINE i386_operand_type
1441 operand_type_and (i386_operand_type x, i386_operand_type y)
1442 {
1443 switch (ARRAY_SIZE (x.array))
1444 {
1445 case 3:
1446 x.array [2] &= y.array [2];
1447 case 2:
1448 x.array [1] &= y.array [1];
1449 case 1:
1450 x.array [0] &= y.array [0];
1451 break;
1452 default:
1453 abort ();
1454 }
1455 return x;
1456 }
1457
1458 static INLINE i386_operand_type
1459 operand_type_or (i386_operand_type x, i386_operand_type y)
1460 {
1461 switch (ARRAY_SIZE (x.array))
1462 {
1463 case 3:
1464 x.array [2] |= y.array [2];
1465 case 2:
1466 x.array [1] |= y.array [1];
1467 case 1:
1468 x.array [0] |= y.array [0];
1469 break;
1470 default:
1471 abort ();
1472 }
1473 return x;
1474 }
1475
1476 static INLINE i386_operand_type
1477 operand_type_xor (i386_operand_type x, i386_operand_type y)
1478 {
1479 switch (ARRAY_SIZE (x.array))
1480 {
1481 case 3:
1482 x.array [2] ^= y.array [2];
1483 case 2:
1484 x.array [1] ^= y.array [1];
1485 case 1:
1486 x.array [0] ^= y.array [0];
1487 break;
1488 default:
1489 abort ();
1490 }
1491 return x;
1492 }
1493
1494 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1495 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1496 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1497 static const i386_operand_type inoutportreg
1498 = OPERAND_TYPE_INOUTPORTREG;
1499 static const i386_operand_type reg16_inoutportreg
1500 = OPERAND_TYPE_REG16_INOUTPORTREG;
1501 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1502 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1503 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1504 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1505 static const i386_operand_type anydisp
1506 = OPERAND_TYPE_ANYDISP;
1507 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1508 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1509 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1510 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1511 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1512 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1513 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1514 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1515 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1516 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1517 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1518 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1519
1520 enum operand_type
1521 {
1522 reg,
1523 imm,
1524 disp,
1525 anymem
1526 };
1527
1528 static INLINE int
1529 operand_type_check (i386_operand_type t, enum operand_type c)
1530 {
1531 switch (c)
1532 {
1533 case reg:
1534 return (t.bitfield.reg8
1535 || t.bitfield.reg16
1536 || t.bitfield.reg32
1537 || t.bitfield.reg64);
1538
1539 case imm:
1540 return (t.bitfield.imm8
1541 || t.bitfield.imm8s
1542 || t.bitfield.imm16
1543 || t.bitfield.imm32
1544 || t.bitfield.imm32s
1545 || t.bitfield.imm64);
1546
1547 case disp:
1548 return (t.bitfield.disp8
1549 || t.bitfield.disp16
1550 || t.bitfield.disp32
1551 || t.bitfield.disp32s
1552 || t.bitfield.disp64);
1553
1554 case anymem:
1555 return (t.bitfield.disp8
1556 || t.bitfield.disp16
1557 || t.bitfield.disp32
1558 || t.bitfield.disp32s
1559 || t.bitfield.disp64
1560 || t.bitfield.baseindex);
1561
1562 default:
1563 abort ();
1564 }
1565
1566 return 0;
1567 }
1568
1569 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1570 operand J for instruction template T. */
1571
1572 static INLINE int
1573 match_reg_size (const insn_template *t, unsigned int j)
1574 {
1575 return !((i.types[j].bitfield.byte
1576 && !t->operand_types[j].bitfield.byte)
1577 || (i.types[j].bitfield.word
1578 && !t->operand_types[j].bitfield.word)
1579 || (i.types[j].bitfield.dword
1580 && !t->operand_types[j].bitfield.dword)
1581 || (i.types[j].bitfield.qword
1582 && !t->operand_types[j].bitfield.qword));
1583 }
1584
1585 /* Return 1 if there is no conflict in any size on operand J for
1586 instruction template T. */
1587
1588 static INLINE int
1589 match_mem_size (const insn_template *t, unsigned int j)
1590 {
1591 return (match_reg_size (t, j)
1592 && !((i.types[j].bitfield.unspecified
1593 && !t->operand_types[j].bitfield.unspecified)
1594 || (i.types[j].bitfield.fword
1595 && !t->operand_types[j].bitfield.fword)
1596 || (i.types[j].bitfield.tbyte
1597 && !t->operand_types[j].bitfield.tbyte)
1598 || (i.types[j].bitfield.xmmword
1599 && !t->operand_types[j].bitfield.xmmword)
1600 || (i.types[j].bitfield.ymmword
1601 && !t->operand_types[j].bitfield.ymmword)));
1602 }
1603
1604 /* Return 1 if there is no size conflict on any operands for
1605 instruction template T. */
1606
1607 static INLINE int
1608 operand_size_match (const insn_template *t)
1609 {
1610 unsigned int j;
1611 int match = 1;
1612
1613 /* Don't check jump instructions. */
1614 if (t->opcode_modifier.jump
1615 || t->opcode_modifier.jumpbyte
1616 || t->opcode_modifier.jumpdword
1617 || t->opcode_modifier.jumpintersegment)
1618 return match;
1619
1620 /* Check memory and accumulator operand size. */
1621 for (j = 0; j < i.operands; j++)
1622 {
1623 if (t->operand_types[j].bitfield.anysize)
1624 continue;
1625
1626 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1627 {
1628 match = 0;
1629 break;
1630 }
1631
1632 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1633 {
1634 match = 0;
1635 break;
1636 }
1637 }
1638
1639 if (match)
1640 return match;
1641 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1642 {
1643 mismatch:
1644 i.error = operand_size_mismatch;
1645 return 0;
1646 }
1647
1648 /* Check reverse. */
1649 gas_assert (i.operands == 2);
1650
1651 match = 1;
1652 for (j = 0; j < 2; j++)
1653 {
1654 if (t->operand_types[j].bitfield.acc
1655 && !match_reg_size (t, j ? 0 : 1))
1656 goto mismatch;
1657
1658 if (i.types[j].bitfield.mem
1659 && !match_mem_size (t, j ? 0 : 1))
1660 goto mismatch;
1661 }
1662
1663 return match;
1664 }
1665
1666 static INLINE int
1667 operand_type_match (i386_operand_type overlap,
1668 i386_operand_type given)
1669 {
1670 i386_operand_type temp = overlap;
1671
1672 temp.bitfield.jumpabsolute = 0;
1673 temp.bitfield.unspecified = 0;
1674 temp.bitfield.byte = 0;
1675 temp.bitfield.word = 0;
1676 temp.bitfield.dword = 0;
1677 temp.bitfield.fword = 0;
1678 temp.bitfield.qword = 0;
1679 temp.bitfield.tbyte = 0;
1680 temp.bitfield.xmmword = 0;
1681 temp.bitfield.ymmword = 0;
1682 if (operand_type_all_zero (&temp))
1683 goto mismatch;
1684
1685 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1686 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1687 return 1;
1688
1689 mismatch:
1690 i.error = operand_type_mismatch;
1691 return 0;
1692 }
1693
1694 /* If given types g0 and g1 are registers they must be of the same type
1695 unless the expected operand type register overlap is null.
1696 Note that Acc in a template matches every size of reg. */
1697
1698 static INLINE int
1699 operand_type_register_match (i386_operand_type m0,
1700 i386_operand_type g0,
1701 i386_operand_type t0,
1702 i386_operand_type m1,
1703 i386_operand_type g1,
1704 i386_operand_type t1)
1705 {
1706 if (!operand_type_check (g0, reg))
1707 return 1;
1708
1709 if (!operand_type_check (g1, reg))
1710 return 1;
1711
1712 if (g0.bitfield.reg8 == g1.bitfield.reg8
1713 && g0.bitfield.reg16 == g1.bitfield.reg16
1714 && g0.bitfield.reg32 == g1.bitfield.reg32
1715 && g0.bitfield.reg64 == g1.bitfield.reg64)
1716 return 1;
1717
1718 if (m0.bitfield.acc)
1719 {
1720 t0.bitfield.reg8 = 1;
1721 t0.bitfield.reg16 = 1;
1722 t0.bitfield.reg32 = 1;
1723 t0.bitfield.reg64 = 1;
1724 }
1725
1726 if (m1.bitfield.acc)
1727 {
1728 t1.bitfield.reg8 = 1;
1729 t1.bitfield.reg16 = 1;
1730 t1.bitfield.reg32 = 1;
1731 t1.bitfield.reg64 = 1;
1732 }
1733
1734 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1735 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1736 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1737 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1738 return 1;
1739
1740 i.error = register_type_mismatch;
1741
1742 return 0;
1743 }
1744
1745 static INLINE unsigned int
1746 mode_from_disp_size (i386_operand_type t)
1747 {
1748 if (t.bitfield.disp8)
1749 return 1;
1750 else if (t.bitfield.disp16
1751 || t.bitfield.disp32
1752 || t.bitfield.disp32s)
1753 return 2;
1754 else
1755 return 0;
1756 }
1757
1758 static INLINE int
1759 fits_in_signed_byte (offsetT num)
1760 {
1761 return (num >= -128) && (num <= 127);
1762 }
1763
1764 static INLINE int
1765 fits_in_unsigned_byte (offsetT num)
1766 {
1767 return (num & 0xff) == num;
1768 }
1769
1770 static INLINE int
1771 fits_in_unsigned_word (offsetT num)
1772 {
1773 return (num & 0xffff) == num;
1774 }
1775
1776 static INLINE int
1777 fits_in_signed_word (offsetT num)
1778 {
1779 return (-32768 <= num) && (num <= 32767);
1780 }
1781
1782 static INLINE int
1783 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1784 {
1785 #ifndef BFD64
1786 return 1;
1787 #else
1788 return (!(((offsetT) -1 << 31) & num)
1789 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1790 #endif
1791 } /* fits_in_signed_long() */
1792
1793 static INLINE int
1794 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1795 {
1796 #ifndef BFD64
1797 return 1;
1798 #else
1799 return (num & (((offsetT) 2 << 31) - 1)) == num;
1800 #endif
1801 } /* fits_in_unsigned_long() */
1802
1803 static INLINE int
1804 fits_in_imm4 (offsetT num)
1805 {
1806 return (num & 0xf) == num;
1807 }
1808
1809 static i386_operand_type
1810 smallest_imm_type (offsetT num)
1811 {
1812 i386_operand_type t;
1813
1814 operand_type_set (&t, 0);
1815 t.bitfield.imm64 = 1;
1816
1817 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1818 {
1819 /* This code is disabled on the 486 because all the Imm1 forms
1820 in the opcode table are slower on the i486. They're the
1821 versions with the implicitly specified single-position
1822 displacement, which has another syntax if you really want to
1823 use that form. */
1824 t.bitfield.imm1 = 1;
1825 t.bitfield.imm8 = 1;
1826 t.bitfield.imm8s = 1;
1827 t.bitfield.imm16 = 1;
1828 t.bitfield.imm32 = 1;
1829 t.bitfield.imm32s = 1;
1830 }
1831 else if (fits_in_signed_byte (num))
1832 {
1833 t.bitfield.imm8 = 1;
1834 t.bitfield.imm8s = 1;
1835 t.bitfield.imm16 = 1;
1836 t.bitfield.imm32 = 1;
1837 t.bitfield.imm32s = 1;
1838 }
1839 else if (fits_in_unsigned_byte (num))
1840 {
1841 t.bitfield.imm8 = 1;
1842 t.bitfield.imm16 = 1;
1843 t.bitfield.imm32 = 1;
1844 t.bitfield.imm32s = 1;
1845 }
1846 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1847 {
1848 t.bitfield.imm16 = 1;
1849 t.bitfield.imm32 = 1;
1850 t.bitfield.imm32s = 1;
1851 }
1852 else if (fits_in_signed_long (num))
1853 {
1854 t.bitfield.imm32 = 1;
1855 t.bitfield.imm32s = 1;
1856 }
1857 else if (fits_in_unsigned_long (num))
1858 t.bitfield.imm32 = 1;
1859
1860 return t;
1861 }
1862
1863 static offsetT
1864 offset_in_range (offsetT val, int size)
1865 {
1866 addressT mask;
1867
1868 switch (size)
1869 {
1870 case 1: mask = ((addressT) 1 << 8) - 1; break;
1871 case 2: mask = ((addressT) 1 << 16) - 1; break;
1872 case 4: mask = ((addressT) 2 << 31) - 1; break;
1873 #ifdef BFD64
1874 case 8: mask = ((addressT) 2 << 63) - 1; break;
1875 #endif
1876 default: abort ();
1877 }
1878
1879 #ifdef BFD64
1880 /* If BFD64, sign extend val for 32bit address mode. */
1881 if (flag_code != CODE_64BIT
1882 || i.prefix[ADDR_PREFIX])
1883 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1884 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1885 #endif
1886
1887 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1888 {
1889 char buf1[40], buf2[40];
1890
1891 sprint_value (buf1, val);
1892 sprint_value (buf2, val & mask);
1893 as_warn (_("%s shortened to %s"), buf1, buf2);
1894 }
1895 return val & mask;
1896 }
1897
1898 enum PREFIX_GROUP
1899 {
1900 PREFIX_EXIST = 0,
1901 PREFIX_LOCK,
1902 PREFIX_REP,
1903 PREFIX_OTHER
1904 };
1905
1906 /* Returns
1907 a. PREFIX_EXIST if attempting to add a prefix where one from the
1908 same class already exists.
1909 b. PREFIX_LOCK if lock prefix is added.
1910 c. PREFIX_REP if rep/repne prefix is added.
1911 d. PREFIX_OTHER if other prefix is added.
1912 */
1913
1914 static enum PREFIX_GROUP
1915 add_prefix (unsigned int prefix)
1916 {
1917 enum PREFIX_GROUP ret = PREFIX_OTHER;
1918 unsigned int q;
1919
1920 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1921 && flag_code == CODE_64BIT)
1922 {
1923 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1924 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1925 && (prefix & (REX_R | REX_X | REX_B))))
1926 ret = PREFIX_EXIST;
1927 q = REX_PREFIX;
1928 }
1929 else
1930 {
1931 switch (prefix)
1932 {
1933 default:
1934 abort ();
1935
1936 case CS_PREFIX_OPCODE:
1937 case DS_PREFIX_OPCODE:
1938 case ES_PREFIX_OPCODE:
1939 case FS_PREFIX_OPCODE:
1940 case GS_PREFIX_OPCODE:
1941 case SS_PREFIX_OPCODE:
1942 q = SEG_PREFIX;
1943 break;
1944
1945 case REPNE_PREFIX_OPCODE:
1946 case REPE_PREFIX_OPCODE:
1947 q = REP_PREFIX;
1948 ret = PREFIX_REP;
1949 break;
1950
1951 case LOCK_PREFIX_OPCODE:
1952 q = LOCK_PREFIX;
1953 ret = PREFIX_LOCK;
1954 break;
1955
1956 case FWAIT_OPCODE:
1957 q = WAIT_PREFIX;
1958 break;
1959
1960 case ADDR_PREFIX_OPCODE:
1961 q = ADDR_PREFIX;
1962 break;
1963
1964 case DATA_PREFIX_OPCODE:
1965 q = DATA_PREFIX;
1966 break;
1967 }
1968 if (i.prefix[q] != 0)
1969 ret = PREFIX_EXIST;
1970 }
1971
1972 if (ret)
1973 {
1974 if (!i.prefix[q])
1975 ++i.prefixes;
1976 i.prefix[q] |= prefix;
1977 }
1978 else
1979 as_bad (_("same type of prefix used twice"));
1980
1981 return ret;
1982 }
1983
1984 static void
1985 update_code_flag (int value, int check)
1986 {
1987 PRINTF_LIKE ((*as_error));
1988
1989 flag_code = (enum flag_code) value;
1990 if (flag_code == CODE_64BIT)
1991 {
1992 cpu_arch_flags.bitfield.cpu64 = 1;
1993 cpu_arch_flags.bitfield.cpuno64 = 0;
1994 }
1995 else
1996 {
1997 cpu_arch_flags.bitfield.cpu64 = 0;
1998 cpu_arch_flags.bitfield.cpuno64 = 1;
1999 }
2000 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2001 {
2002 if (check)
2003 as_error = as_fatal;
2004 else
2005 as_error = as_bad;
2006 (*as_error) (_("64bit mode not supported on `%s'."),
2007 cpu_arch_name ? cpu_arch_name : default_arch);
2008 }
2009 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2010 {
2011 if (check)
2012 as_error = as_fatal;
2013 else
2014 as_error = as_bad;
2015 (*as_error) (_("32bit mode not supported on `%s'."),
2016 cpu_arch_name ? cpu_arch_name : default_arch);
2017 }
2018 stackop_size = '\0';
2019 }
2020
2021 static void
2022 set_code_flag (int value)
2023 {
2024 update_code_flag (value, 0);
2025 }
2026
2027 static void
2028 set_16bit_gcc_code_flag (int new_code_flag)
2029 {
2030 flag_code = (enum flag_code) new_code_flag;
2031 if (flag_code != CODE_16BIT)
2032 abort ();
2033 cpu_arch_flags.bitfield.cpu64 = 0;
2034 cpu_arch_flags.bitfield.cpuno64 = 1;
2035 stackop_size = LONG_MNEM_SUFFIX;
2036 }
2037
2038 static void
2039 set_intel_syntax (int syntax_flag)
2040 {
2041 /* Find out if register prefixing is specified. */
2042 int ask_naked_reg = 0;
2043
2044 SKIP_WHITESPACE ();
2045 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2046 {
2047 char *string = input_line_pointer;
2048 int e = get_symbol_end ();
2049
2050 if (strcmp (string, "prefix") == 0)
2051 ask_naked_reg = 1;
2052 else if (strcmp (string, "noprefix") == 0)
2053 ask_naked_reg = -1;
2054 else
2055 as_bad (_("bad argument to syntax directive."));
2056 *input_line_pointer = e;
2057 }
2058 demand_empty_rest_of_line ();
2059
2060 intel_syntax = syntax_flag;
2061
2062 if (ask_naked_reg == 0)
2063 allow_naked_reg = (intel_syntax
2064 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2065 else
2066 allow_naked_reg = (ask_naked_reg < 0);
2067
2068 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2069
2070 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2071 identifier_chars['$'] = intel_syntax ? '$' : 0;
2072 register_prefix = allow_naked_reg ? "" : "%";
2073 }
2074
2075 static void
2076 set_intel_mnemonic (int mnemonic_flag)
2077 {
2078 intel_mnemonic = mnemonic_flag;
2079 }
2080
2081 static void
2082 set_allow_index_reg (int flag)
2083 {
2084 allow_index_reg = flag;
2085 }
2086
2087 static void
2088 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2089 {
2090 SKIP_WHITESPACE ();
2091
2092 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2093 {
2094 char *string = input_line_pointer;
2095 int e = get_symbol_end ();
2096
2097 if (strcmp (string, "none") == 0)
2098 sse_check = sse_check_none;
2099 else if (strcmp (string, "warning") == 0)
2100 sse_check = sse_check_warning;
2101 else if (strcmp (string, "error") == 0)
2102 sse_check = sse_check_error;
2103 else
2104 as_bad (_("bad argument to sse_check directive."));
2105 *input_line_pointer = e;
2106 }
2107 else
2108 as_bad (_("missing argument for sse_check directive"));
2109
2110 demand_empty_rest_of_line ();
2111 }
2112
2113 static void
2114 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2115 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2116 {
2117 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2118 static const char *arch;
2119
2120 /* Intel LIOM is only supported on ELF. */
2121 if (!IS_ELF)
2122 return;
2123
2124 if (!arch)
2125 {
2126 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2127 use default_arch. */
2128 arch = cpu_arch_name;
2129 if (!arch)
2130 arch = default_arch;
2131 }
2132
2133 /* If we are targeting Intel L1OM, we must enable it. */
2134 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2135 || new_flag.bitfield.cpul1om)
2136 return;
2137
2138 /* If we are targeting Intel K1OM, we must enable it. */
2139 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2140 || new_flag.bitfield.cpuk1om)
2141 return;
2142
2143 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2144 #endif
2145 }
2146
2147 static void
2148 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2149 {
2150 SKIP_WHITESPACE ();
2151
2152 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2153 {
2154 char *string = input_line_pointer;
2155 int e = get_symbol_end ();
2156 unsigned int j;
2157 i386_cpu_flags flags;
2158
2159 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2160 {
2161 if (strcmp (string, cpu_arch[j].name) == 0)
2162 {
2163 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2164
2165 if (*string != '.')
2166 {
2167 cpu_arch_name = cpu_arch[j].name;
2168 cpu_sub_arch_name = NULL;
2169 cpu_arch_flags = cpu_arch[j].flags;
2170 if (flag_code == CODE_64BIT)
2171 {
2172 cpu_arch_flags.bitfield.cpu64 = 1;
2173 cpu_arch_flags.bitfield.cpuno64 = 0;
2174 }
2175 else
2176 {
2177 cpu_arch_flags.bitfield.cpu64 = 0;
2178 cpu_arch_flags.bitfield.cpuno64 = 1;
2179 }
2180 cpu_arch_isa = cpu_arch[j].type;
2181 cpu_arch_isa_flags = cpu_arch[j].flags;
2182 if (!cpu_arch_tune_set)
2183 {
2184 cpu_arch_tune = cpu_arch_isa;
2185 cpu_arch_tune_flags = cpu_arch_isa_flags;
2186 }
2187 break;
2188 }
2189
2190 if (!cpu_arch[j].negated)
2191 flags = cpu_flags_or (cpu_arch_flags,
2192 cpu_arch[j].flags);
2193 else
2194 flags = cpu_flags_and_not (cpu_arch_flags,
2195 cpu_arch[j].flags);
2196 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2197 {
2198 if (cpu_sub_arch_name)
2199 {
2200 char *name = cpu_sub_arch_name;
2201 cpu_sub_arch_name = concat (name,
2202 cpu_arch[j].name,
2203 (const char *) NULL);
2204 free (name);
2205 }
2206 else
2207 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2208 cpu_arch_flags = flags;
2209 cpu_arch_isa_flags = flags;
2210 }
2211 *input_line_pointer = e;
2212 demand_empty_rest_of_line ();
2213 return;
2214 }
2215 }
2216 if (j >= ARRAY_SIZE (cpu_arch))
2217 as_bad (_("no such architecture: `%s'"), string);
2218
2219 *input_line_pointer = e;
2220 }
2221 else
2222 as_bad (_("missing cpu architecture"));
2223
2224 no_cond_jump_promotion = 0;
2225 if (*input_line_pointer == ','
2226 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2227 {
2228 char *string = ++input_line_pointer;
2229 int e = get_symbol_end ();
2230
2231 if (strcmp (string, "nojumps") == 0)
2232 no_cond_jump_promotion = 1;
2233 else if (strcmp (string, "jumps") == 0)
2234 ;
2235 else
2236 as_bad (_("no such architecture modifier: `%s'"), string);
2237
2238 *input_line_pointer = e;
2239 }
2240
2241 demand_empty_rest_of_line ();
2242 }
2243
2244 enum bfd_architecture
2245 i386_arch (void)
2246 {
2247 if (cpu_arch_isa == PROCESSOR_L1OM)
2248 {
2249 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2250 || flag_code != CODE_64BIT)
2251 as_fatal (_("Intel L1OM is 64bit ELF only"));
2252 return bfd_arch_l1om;
2253 }
2254 else if (cpu_arch_isa == PROCESSOR_K1OM)
2255 {
2256 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2257 || flag_code != CODE_64BIT)
2258 as_fatal (_("Intel K1OM is 64bit ELF only"));
2259 return bfd_arch_k1om;
2260 }
2261 else
2262 return bfd_arch_i386;
2263 }
2264
2265 unsigned long
2266 i386_mach (void)
2267 {
2268 if (!strncmp (default_arch, "x86_64", 6))
2269 {
2270 if (cpu_arch_isa == PROCESSOR_L1OM)
2271 {
2272 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2273 || default_arch[6] != '\0')
2274 as_fatal (_("Intel L1OM is 64bit ELF only"));
2275 return bfd_mach_l1om;
2276 }
2277 else if (cpu_arch_isa == PROCESSOR_K1OM)
2278 {
2279 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2280 || default_arch[6] != '\0')
2281 as_fatal (_("Intel K1OM is 64bit ELF only"));
2282 return bfd_mach_k1om;
2283 }
2284 else if (default_arch[6] == '\0')
2285 return bfd_mach_x86_64;
2286 else
2287 return bfd_mach_x64_32;
2288 }
2289 else if (!strcmp (default_arch, "i386"))
2290 return bfd_mach_i386_i386;
2291 else
2292 as_fatal (_("unknown architecture"));
2293 }
2294 \f
2295 void
2296 md_begin (void)
2297 {
2298 const char *hash_err;
2299
2300 /* Initialize op_hash hash table. */
2301 op_hash = hash_new ();
2302
2303 {
2304 const insn_template *optab;
2305 templates *core_optab;
2306
2307 /* Setup for loop. */
2308 optab = i386_optab;
2309 core_optab = (templates *) xmalloc (sizeof (templates));
2310 core_optab->start = optab;
2311
2312 while (1)
2313 {
2314 ++optab;
2315 if (optab->name == NULL
2316 || strcmp (optab->name, (optab - 1)->name) != 0)
2317 {
2318 /* different name --> ship out current template list;
2319 add to hash table; & begin anew. */
2320 core_optab->end = optab;
2321 hash_err = hash_insert (op_hash,
2322 (optab - 1)->name,
2323 (void *) core_optab);
2324 if (hash_err)
2325 {
2326 as_fatal (_("internal Error: Can't hash %s: %s"),
2327 (optab - 1)->name,
2328 hash_err);
2329 }
2330 if (optab->name == NULL)
2331 break;
2332 core_optab = (templates *) xmalloc (sizeof (templates));
2333 core_optab->start = optab;
2334 }
2335 }
2336 }
2337
2338 /* Initialize reg_hash hash table. */
2339 reg_hash = hash_new ();
2340 {
2341 const reg_entry *regtab;
2342 unsigned int regtab_size = i386_regtab_size;
2343
2344 for (regtab = i386_regtab; regtab_size--; regtab++)
2345 {
2346 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2347 if (hash_err)
2348 as_fatal (_("internal Error: Can't hash %s: %s"),
2349 regtab->reg_name,
2350 hash_err);
2351 }
2352 }
2353
2354 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2355 {
2356 int c;
2357 char *p;
2358
2359 for (c = 0; c < 256; c++)
2360 {
2361 if (ISDIGIT (c))
2362 {
2363 digit_chars[c] = c;
2364 mnemonic_chars[c] = c;
2365 register_chars[c] = c;
2366 operand_chars[c] = c;
2367 }
2368 else if (ISLOWER (c))
2369 {
2370 mnemonic_chars[c] = c;
2371 register_chars[c] = c;
2372 operand_chars[c] = c;
2373 }
2374 else if (ISUPPER (c))
2375 {
2376 mnemonic_chars[c] = TOLOWER (c);
2377 register_chars[c] = mnemonic_chars[c];
2378 operand_chars[c] = c;
2379 }
2380
2381 if (ISALPHA (c) || ISDIGIT (c))
2382 identifier_chars[c] = c;
2383 else if (c >= 128)
2384 {
2385 identifier_chars[c] = c;
2386 operand_chars[c] = c;
2387 }
2388 }
2389
2390 #ifdef LEX_AT
2391 identifier_chars['@'] = '@';
2392 #endif
2393 #ifdef LEX_QM
2394 identifier_chars['?'] = '?';
2395 operand_chars['?'] = '?';
2396 #endif
2397 digit_chars['-'] = '-';
2398 mnemonic_chars['_'] = '_';
2399 mnemonic_chars['-'] = '-';
2400 mnemonic_chars['.'] = '.';
2401 identifier_chars['_'] = '_';
2402 identifier_chars['.'] = '.';
2403
2404 for (p = operand_special_chars; *p != '\0'; p++)
2405 operand_chars[(unsigned char) *p] = *p;
2406 }
2407
2408 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2409 if (IS_ELF)
2410 {
2411 record_alignment (text_section, 2);
2412 record_alignment (data_section, 2);
2413 record_alignment (bss_section, 2);
2414 }
2415 #endif
2416
2417 if (flag_code == CODE_64BIT)
2418 {
2419 #if defined (OBJ_COFF) && defined (TE_PE)
2420 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2421 ? 32 : 16);
2422 #else
2423 x86_dwarf2_return_column = 16;
2424 #endif
2425 x86_cie_data_alignment = -8;
2426 }
2427 else
2428 {
2429 x86_dwarf2_return_column = 8;
2430 x86_cie_data_alignment = -4;
2431 }
2432 }
2433
2434 void
2435 i386_print_statistics (FILE *file)
2436 {
2437 hash_print_statistics (file, "i386 opcode", op_hash);
2438 hash_print_statistics (file, "i386 register", reg_hash);
2439 }
2440 \f
2441 #ifdef DEBUG386
2442
2443 /* Debugging routines for md_assemble. */
2444 static void pte (insn_template *);
2445 static void pt (i386_operand_type);
2446 static void pe (expressionS *);
2447 static void ps (symbolS *);
2448
2449 static void
2450 pi (char *line, i386_insn *x)
2451 {
2452 unsigned int j;
2453
2454 fprintf (stdout, "%s: template ", line);
2455 pte (&x->tm);
2456 fprintf (stdout, " address: base %s index %s scale %x\n",
2457 x->base_reg ? x->base_reg->reg_name : "none",
2458 x->index_reg ? x->index_reg->reg_name : "none",
2459 x->log2_scale_factor);
2460 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2461 x->rm.mode, x->rm.reg, x->rm.regmem);
2462 fprintf (stdout, " sib: base %x index %x scale %x\n",
2463 x->sib.base, x->sib.index, x->sib.scale);
2464 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2465 (x->rex & REX_W) != 0,
2466 (x->rex & REX_R) != 0,
2467 (x->rex & REX_X) != 0,
2468 (x->rex & REX_B) != 0);
2469 for (j = 0; j < x->operands; j++)
2470 {
2471 fprintf (stdout, " #%d: ", j + 1);
2472 pt (x->types[j]);
2473 fprintf (stdout, "\n");
2474 if (x->types[j].bitfield.reg8
2475 || x->types[j].bitfield.reg16
2476 || x->types[j].bitfield.reg32
2477 || x->types[j].bitfield.reg64
2478 || x->types[j].bitfield.regmmx
2479 || x->types[j].bitfield.regxmm
2480 || x->types[j].bitfield.regymm
2481 || x->types[j].bitfield.sreg2
2482 || x->types[j].bitfield.sreg3
2483 || x->types[j].bitfield.control
2484 || x->types[j].bitfield.debug
2485 || x->types[j].bitfield.test)
2486 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2487 if (operand_type_check (x->types[j], imm))
2488 pe (x->op[j].imms);
2489 if (operand_type_check (x->types[j], disp))
2490 pe (x->op[j].disps);
2491 }
2492 }
2493
2494 static void
2495 pte (insn_template *t)
2496 {
2497 unsigned int j;
2498 fprintf (stdout, " %d operands ", t->operands);
2499 fprintf (stdout, "opcode %x ", t->base_opcode);
2500 if (t->extension_opcode != None)
2501 fprintf (stdout, "ext %x ", t->extension_opcode);
2502 if (t->opcode_modifier.d)
2503 fprintf (stdout, "D");
2504 if (t->opcode_modifier.w)
2505 fprintf (stdout, "W");
2506 fprintf (stdout, "\n");
2507 for (j = 0; j < t->operands; j++)
2508 {
2509 fprintf (stdout, " #%d type ", j + 1);
2510 pt (t->operand_types[j]);
2511 fprintf (stdout, "\n");
2512 }
2513 }
2514
2515 static void
2516 pe (expressionS *e)
2517 {
2518 fprintf (stdout, " operation %d\n", e->X_op);
2519 fprintf (stdout, " add_number %ld (%lx)\n",
2520 (long) e->X_add_number, (long) e->X_add_number);
2521 if (e->X_add_symbol)
2522 {
2523 fprintf (stdout, " add_symbol ");
2524 ps (e->X_add_symbol);
2525 fprintf (stdout, "\n");
2526 }
2527 if (e->X_op_symbol)
2528 {
2529 fprintf (stdout, " op_symbol ");
2530 ps (e->X_op_symbol);
2531 fprintf (stdout, "\n");
2532 }
2533 }
2534
2535 static void
2536 ps (symbolS *s)
2537 {
2538 fprintf (stdout, "%s type %s%s",
2539 S_GET_NAME (s),
2540 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2541 segment_name (S_GET_SEGMENT (s)));
2542 }
2543
2544 static struct type_name
2545 {
2546 i386_operand_type mask;
2547 const char *name;
2548 }
2549 const type_names[] =
2550 {
2551 { OPERAND_TYPE_REG8, "r8" },
2552 { OPERAND_TYPE_REG16, "r16" },
2553 { OPERAND_TYPE_REG32, "r32" },
2554 { OPERAND_TYPE_REG64, "r64" },
2555 { OPERAND_TYPE_IMM8, "i8" },
2556 { OPERAND_TYPE_IMM8, "i8s" },
2557 { OPERAND_TYPE_IMM16, "i16" },
2558 { OPERAND_TYPE_IMM32, "i32" },
2559 { OPERAND_TYPE_IMM32S, "i32s" },
2560 { OPERAND_TYPE_IMM64, "i64" },
2561 { OPERAND_TYPE_IMM1, "i1" },
2562 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2563 { OPERAND_TYPE_DISP8, "d8" },
2564 { OPERAND_TYPE_DISP16, "d16" },
2565 { OPERAND_TYPE_DISP32, "d32" },
2566 { OPERAND_TYPE_DISP32S, "d32s" },
2567 { OPERAND_TYPE_DISP64, "d64" },
2568 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2569 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2570 { OPERAND_TYPE_CONTROL, "control reg" },
2571 { OPERAND_TYPE_TEST, "test reg" },
2572 { OPERAND_TYPE_DEBUG, "debug reg" },
2573 { OPERAND_TYPE_FLOATREG, "FReg" },
2574 { OPERAND_TYPE_FLOATACC, "FAcc" },
2575 { OPERAND_TYPE_SREG2, "SReg2" },
2576 { OPERAND_TYPE_SREG3, "SReg3" },
2577 { OPERAND_TYPE_ACC, "Acc" },
2578 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2579 { OPERAND_TYPE_REGMMX, "rMMX" },
2580 { OPERAND_TYPE_REGXMM, "rXMM" },
2581 { OPERAND_TYPE_REGYMM, "rYMM" },
2582 { OPERAND_TYPE_ESSEG, "es" },
2583 };
2584
2585 static void
2586 pt (i386_operand_type t)
2587 {
2588 unsigned int j;
2589 i386_operand_type a;
2590
2591 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2592 {
2593 a = operand_type_and (t, type_names[j].mask);
2594 if (!operand_type_all_zero (&a))
2595 fprintf (stdout, "%s, ", type_names[j].name);
2596 }
2597 fflush (stdout);
2598 }
2599
2600 #endif /* DEBUG386 */
2601 \f
2602 static bfd_reloc_code_real_type
2603 reloc (unsigned int size,
2604 int pcrel,
2605 int sign,
2606 bfd_reloc_code_real_type other)
2607 {
2608 if (other != NO_RELOC)
2609 {
2610 reloc_howto_type *rel;
2611
2612 if (size == 8)
2613 switch (other)
2614 {
2615 case BFD_RELOC_X86_64_GOT32:
2616 return BFD_RELOC_X86_64_GOT64;
2617 break;
2618 case BFD_RELOC_X86_64_PLTOFF64:
2619 return BFD_RELOC_X86_64_PLTOFF64;
2620 break;
2621 case BFD_RELOC_X86_64_GOTPC32:
2622 other = BFD_RELOC_X86_64_GOTPC64;
2623 break;
2624 case BFD_RELOC_X86_64_GOTPCREL:
2625 other = BFD_RELOC_X86_64_GOTPCREL64;
2626 break;
2627 case BFD_RELOC_X86_64_TPOFF32:
2628 other = BFD_RELOC_X86_64_TPOFF64;
2629 break;
2630 case BFD_RELOC_X86_64_DTPOFF32:
2631 other = BFD_RELOC_X86_64_DTPOFF64;
2632 break;
2633 default:
2634 break;
2635 }
2636
2637 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2638 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2639 sign = -1;
2640
2641 rel = bfd_reloc_type_lookup (stdoutput, other);
2642 if (!rel)
2643 as_bad (_("unknown relocation (%u)"), other);
2644 else if (size != bfd_get_reloc_size (rel))
2645 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2646 bfd_get_reloc_size (rel),
2647 size);
2648 else if (pcrel && !rel->pc_relative)
2649 as_bad (_("non-pc-relative relocation for pc-relative field"));
2650 else if ((rel->complain_on_overflow == complain_overflow_signed
2651 && !sign)
2652 || (rel->complain_on_overflow == complain_overflow_unsigned
2653 && sign > 0))
2654 as_bad (_("relocated field and relocation type differ in signedness"));
2655 else
2656 return other;
2657 return NO_RELOC;
2658 }
2659
2660 if (pcrel)
2661 {
2662 if (!sign)
2663 as_bad (_("there are no unsigned pc-relative relocations"));
2664 switch (size)
2665 {
2666 case 1: return BFD_RELOC_8_PCREL;
2667 case 2: return BFD_RELOC_16_PCREL;
2668 case 4: return BFD_RELOC_32_PCREL;
2669 case 8: return BFD_RELOC_64_PCREL;
2670 }
2671 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2672 }
2673 else
2674 {
2675 if (sign > 0)
2676 switch (size)
2677 {
2678 case 4: return BFD_RELOC_X86_64_32S;
2679 }
2680 else
2681 switch (size)
2682 {
2683 case 1: return BFD_RELOC_8;
2684 case 2: return BFD_RELOC_16;
2685 case 4: return BFD_RELOC_32;
2686 case 8: return BFD_RELOC_64;
2687 }
2688 as_bad (_("cannot do %s %u byte relocation"),
2689 sign > 0 ? "signed" : "unsigned", size);
2690 }
2691
2692 return NO_RELOC;
2693 }
2694
2695 /* Here we decide which fixups can be adjusted to make them relative to
2696 the beginning of the section instead of the symbol. Basically we need
2697 to make sure that the dynamic relocations are done correctly, so in
2698 some cases we force the original symbol to be used. */
2699
2700 int
2701 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2702 {
2703 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2704 if (!IS_ELF)
2705 return 1;
2706
2707 /* Don't adjust pc-relative references to merge sections in 64-bit
2708 mode. */
2709 if (use_rela_relocations
2710 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2711 && fixP->fx_pcrel)
2712 return 0;
2713
2714 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2715 and changed later by validate_fix. */
2716 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2717 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2718 return 0;
2719
2720 /* adjust_reloc_syms doesn't know about the GOT. */
2721 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2722 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2723 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2724 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2725 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2726 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2727 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2728 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2729 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2730 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2731 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2732 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2733 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2734 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2735 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2736 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2737 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2738 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2739 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2740 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2741 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2742 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2743 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2744 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2745 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2746 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2747 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2748 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2749 return 0;
2750 #endif
2751 return 1;
2752 }
2753
2754 static int
2755 intel_float_operand (const char *mnemonic)
2756 {
2757 /* Note that the value returned is meaningful only for opcodes with (memory)
2758 operands, hence the code here is free to improperly handle opcodes that
2759 have no operands (for better performance and smaller code). */
2760
2761 if (mnemonic[0] != 'f')
2762 return 0; /* non-math */
2763
2764 switch (mnemonic[1])
2765 {
2766 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2767 the fs segment override prefix not currently handled because no
2768 call path can make opcodes without operands get here */
2769 case 'i':
2770 return 2 /* integer op */;
2771 case 'l':
2772 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2773 return 3; /* fldcw/fldenv */
2774 break;
2775 case 'n':
2776 if (mnemonic[2] != 'o' /* fnop */)
2777 return 3; /* non-waiting control op */
2778 break;
2779 case 'r':
2780 if (mnemonic[2] == 's')
2781 return 3; /* frstor/frstpm */
2782 break;
2783 case 's':
2784 if (mnemonic[2] == 'a')
2785 return 3; /* fsave */
2786 if (mnemonic[2] == 't')
2787 {
2788 switch (mnemonic[3])
2789 {
2790 case 'c': /* fstcw */
2791 case 'd': /* fstdw */
2792 case 'e': /* fstenv */
2793 case 's': /* fsts[gw] */
2794 return 3;
2795 }
2796 }
2797 break;
2798 case 'x':
2799 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2800 return 0; /* fxsave/fxrstor are not really math ops */
2801 break;
2802 }
2803
2804 return 1;
2805 }
2806
2807 /* Build the VEX prefix. */
2808
2809 static void
2810 build_vex_prefix (const insn_template *t)
2811 {
2812 unsigned int register_specifier;
2813 unsigned int implied_prefix;
2814 unsigned int vector_length;
2815
2816 /* Check register specifier. */
2817 if (i.vex.register_specifier)
2818 {
2819 register_specifier = i.vex.register_specifier->reg_num;
2820 if ((i.vex.register_specifier->reg_flags & RegRex))
2821 register_specifier += 8;
2822 register_specifier = ~register_specifier & 0xf;
2823 }
2824 else
2825 register_specifier = 0xf;
2826
2827 /* Use 2-byte VEX prefix by swappping destination and source
2828 operand. */
2829 if (!i.swap_operand
2830 && i.operands == i.reg_operands
2831 && i.tm.opcode_modifier.vexopcode == VEX0F
2832 && i.tm.opcode_modifier.s
2833 && i.rex == REX_B)
2834 {
2835 unsigned int xchg = i.operands - 1;
2836 union i386_op temp_op;
2837 i386_operand_type temp_type;
2838
2839 temp_type = i.types[xchg];
2840 i.types[xchg] = i.types[0];
2841 i.types[0] = temp_type;
2842 temp_op = i.op[xchg];
2843 i.op[xchg] = i.op[0];
2844 i.op[0] = temp_op;
2845
2846 gas_assert (i.rm.mode == 3);
2847
2848 i.rex = REX_R;
2849 xchg = i.rm.regmem;
2850 i.rm.regmem = i.rm.reg;
2851 i.rm.reg = xchg;
2852
2853 /* Use the next insn. */
2854 i.tm = t[1];
2855 }
2856
2857 if (i.tm.opcode_modifier.vex == VEXScalar)
2858 vector_length = avxscalar;
2859 else
2860 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2861
2862 switch ((i.tm.base_opcode >> 8) & 0xff)
2863 {
2864 case 0:
2865 implied_prefix = 0;
2866 break;
2867 case DATA_PREFIX_OPCODE:
2868 implied_prefix = 1;
2869 break;
2870 case REPE_PREFIX_OPCODE:
2871 implied_prefix = 2;
2872 break;
2873 case REPNE_PREFIX_OPCODE:
2874 implied_prefix = 3;
2875 break;
2876 default:
2877 abort ();
2878 }
2879
2880 /* Use 2-byte VEX prefix if possible. */
2881 if (i.tm.opcode_modifier.vexopcode == VEX0F
2882 && i.tm.opcode_modifier.vexw != VEXW1
2883 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2884 {
2885 /* 2-byte VEX prefix. */
2886 unsigned int r;
2887
2888 i.vex.length = 2;
2889 i.vex.bytes[0] = 0xc5;
2890
2891 /* Check the REX.R bit. */
2892 r = (i.rex & REX_R) ? 0 : 1;
2893 i.vex.bytes[1] = (r << 7
2894 | register_specifier << 3
2895 | vector_length << 2
2896 | implied_prefix);
2897 }
2898 else
2899 {
2900 /* 3-byte VEX prefix. */
2901 unsigned int m, w;
2902
2903 i.vex.length = 3;
2904
2905 switch (i.tm.opcode_modifier.vexopcode)
2906 {
2907 case VEX0F:
2908 m = 0x1;
2909 i.vex.bytes[0] = 0xc4;
2910 break;
2911 case VEX0F38:
2912 m = 0x2;
2913 i.vex.bytes[0] = 0xc4;
2914 break;
2915 case VEX0F3A:
2916 m = 0x3;
2917 i.vex.bytes[0] = 0xc4;
2918 break;
2919 case XOP08:
2920 m = 0x8;
2921 i.vex.bytes[0] = 0x8f;
2922 break;
2923 case XOP09:
2924 m = 0x9;
2925 i.vex.bytes[0] = 0x8f;
2926 break;
2927 case XOP0A:
2928 m = 0xa;
2929 i.vex.bytes[0] = 0x8f;
2930 break;
2931 default:
2932 abort ();
2933 }
2934
2935 /* The high 3 bits of the second VEX byte are 1's compliment
2936 of RXB bits from REX. */
2937 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2938
2939 /* Check the REX.W bit. */
2940 w = (i.rex & REX_W) ? 1 : 0;
2941 if (i.tm.opcode_modifier.vexw)
2942 {
2943 if (w)
2944 abort ();
2945
2946 if (i.tm.opcode_modifier.vexw == VEXW1)
2947 w = 1;
2948 }
2949
2950 i.vex.bytes[2] = (w << 7
2951 | register_specifier << 3
2952 | vector_length << 2
2953 | implied_prefix);
2954 }
2955 }
2956
2957 static void
2958 process_immext (void)
2959 {
2960 expressionS *exp;
2961
2962 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2963 {
2964 /* SSE3 Instructions have the fixed operands with an opcode
2965 suffix which is coded in the same place as an 8-bit immediate
2966 field would be. Here we check those operands and remove them
2967 afterwards. */
2968 unsigned int x;
2969
2970 for (x = 0; x < i.operands; x++)
2971 if (i.op[x].regs->reg_num != x)
2972 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2973 register_prefix, i.op[x].regs->reg_name, x + 1,
2974 i.tm.name);
2975
2976 i.operands = 0;
2977 }
2978
2979 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2980 which is coded in the same place as an 8-bit immediate field
2981 would be. Here we fake an 8-bit immediate operand from the
2982 opcode suffix stored in tm.extension_opcode.
2983
2984 AVX instructions also use this encoding, for some of
2985 3 argument instructions. */
2986
2987 gas_assert (i.imm_operands == 0
2988 && (i.operands <= 2
2989 || (i.tm.opcode_modifier.vex
2990 && i.operands <= 4)));
2991
2992 exp = &im_expressions[i.imm_operands++];
2993 i.op[i.operands].imms = exp;
2994 i.types[i.operands] = imm8;
2995 i.operands++;
2996 exp->X_op = O_constant;
2997 exp->X_add_number = i.tm.extension_opcode;
2998 i.tm.extension_opcode = None;
2999 }
3000
3001 /* This is the guts of the machine-dependent assembler. LINE points to a
3002 machine dependent instruction. This function is supposed to emit
3003 the frags/bytes it assembles to. */
3004
3005 void
3006 md_assemble (char *line)
3007 {
3008 unsigned int j;
3009 char mnemonic[MAX_MNEM_SIZE];
3010 const insn_template *t;
3011
3012 /* Initialize globals. */
3013 memset (&i, '\0', sizeof (i));
3014 for (j = 0; j < MAX_OPERANDS; j++)
3015 i.reloc[j] = NO_RELOC;
3016 memset (disp_expressions, '\0', sizeof (disp_expressions));
3017 memset (im_expressions, '\0', sizeof (im_expressions));
3018 save_stack_p = save_stack;
3019
3020 /* First parse an instruction mnemonic & call i386_operand for the operands.
3021 We assume that the scrubber has arranged it so that line[0] is the valid
3022 start of a (possibly prefixed) mnemonic. */
3023
3024 line = parse_insn (line, mnemonic);
3025 if (line == NULL)
3026 return;
3027
3028 line = parse_operands (line, mnemonic);
3029 this_operand = -1;
3030 if (line == NULL)
3031 return;
3032
3033 /* Now we've parsed the mnemonic into a set of templates, and have the
3034 operands at hand. */
3035
3036 /* All intel opcodes have reversed operands except for "bound" and
3037 "enter". We also don't reverse intersegment "jmp" and "call"
3038 instructions with 2 immediate operands so that the immediate segment
3039 precedes the offset, as it does when in AT&T mode. */
3040 if (intel_syntax
3041 && i.operands > 1
3042 && (strcmp (mnemonic, "bound") != 0)
3043 && (strcmp (mnemonic, "invlpga") != 0)
3044 && !(operand_type_check (i.types[0], imm)
3045 && operand_type_check (i.types[1], imm)))
3046 swap_operands ();
3047
3048 /* The order of the immediates should be reversed
3049 for 2 immediates extrq and insertq instructions */
3050 if (i.imm_operands == 2
3051 && (strcmp (mnemonic, "extrq") == 0
3052 || strcmp (mnemonic, "insertq") == 0))
3053 swap_2_operands (0, 1);
3054
3055 if (i.imm_operands)
3056 optimize_imm ();
3057
3058 /* Don't optimize displacement for movabs since it only takes 64bit
3059 displacement. */
3060 if (i.disp_operands
3061 && i.disp_encoding != disp_encoding_32bit
3062 && (flag_code != CODE_64BIT
3063 || strcmp (mnemonic, "movabs") != 0))
3064 optimize_disp ();
3065
3066 /* Next, we find a template that matches the given insn,
3067 making sure the overlap of the given operands types is consistent
3068 with the template operand types. */
3069
3070 if (!(t = match_template ()))
3071 return;
3072
3073 if (sse_check != sse_check_none
3074 && !i.tm.opcode_modifier.noavx
3075 && (i.tm.cpu_flags.bitfield.cpusse
3076 || i.tm.cpu_flags.bitfield.cpusse2
3077 || i.tm.cpu_flags.bitfield.cpusse3
3078 || i.tm.cpu_flags.bitfield.cpussse3
3079 || i.tm.cpu_flags.bitfield.cpusse4_1
3080 || i.tm.cpu_flags.bitfield.cpusse4_2))
3081 {
3082 (sse_check == sse_check_warning
3083 ? as_warn
3084 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3085 }
3086
3087 /* Zap movzx and movsx suffix. The suffix has been set from
3088 "word ptr" or "byte ptr" on the source operand in Intel syntax
3089 or extracted from mnemonic in AT&T syntax. But we'll use
3090 the destination register to choose the suffix for encoding. */
3091 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3092 {
3093 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3094 there is no suffix, the default will be byte extension. */
3095 if (i.reg_operands != 2
3096 && !i.suffix
3097 && intel_syntax)
3098 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3099
3100 i.suffix = 0;
3101 }
3102
3103 if (i.tm.opcode_modifier.fwait)
3104 if (!add_prefix (FWAIT_OPCODE))
3105 return;
3106
3107 /* Check for lock without a lockable instruction. Destination operand
3108 must be memory unless it is xchg (0x86). */
3109 if (i.prefix[LOCK_PREFIX]
3110 && (!i.tm.opcode_modifier.islockable
3111 || i.mem_operands == 0
3112 || (i.tm.base_opcode != 0x86
3113 && !operand_type_check (i.types[i.operands - 1], anymem))))
3114 {
3115 as_bad (_("expecting lockable instruction after `lock'"));
3116 return;
3117 }
3118
3119 /* Check string instruction segment overrides. */
3120 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3121 {
3122 if (!check_string ())
3123 return;
3124 i.disp_operands = 0;
3125 }
3126
3127 if (!process_suffix ())
3128 return;
3129
3130 /* Update operand types. */
3131 for (j = 0; j < i.operands; j++)
3132 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3133
3134 /* Make still unresolved immediate matches conform to size of immediate
3135 given in i.suffix. */
3136 if (!finalize_imm ())
3137 return;
3138
3139 if (i.types[0].bitfield.imm1)
3140 i.imm_operands = 0; /* kludge for shift insns. */
3141
3142 /* We only need to check those implicit registers for instructions
3143 with 3 operands or less. */
3144 if (i.operands <= 3)
3145 for (j = 0; j < i.operands; j++)
3146 if (i.types[j].bitfield.inoutportreg
3147 || i.types[j].bitfield.shiftcount
3148 || i.types[j].bitfield.acc
3149 || i.types[j].bitfield.floatacc)
3150 i.reg_operands--;
3151
3152 /* ImmExt should be processed after SSE2AVX. */
3153 if (!i.tm.opcode_modifier.sse2avx
3154 && i.tm.opcode_modifier.immext)
3155 process_immext ();
3156
3157 /* For insns with operands there are more diddles to do to the opcode. */
3158 if (i.operands)
3159 {
3160 if (!process_operands ())
3161 return;
3162 }
3163 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3164 {
3165 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3166 as_warn (_("translating to `%sp'"), i.tm.name);
3167 }
3168
3169 if (i.tm.opcode_modifier.vex)
3170 build_vex_prefix (t);
3171
3172 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3173 instructions may define INT_OPCODE as well, so avoid this corner
3174 case for those instructions that use MODRM. */
3175 if (i.tm.base_opcode == INT_OPCODE
3176 && !i.tm.opcode_modifier.modrm
3177 && i.op[0].imms->X_add_number == 3)
3178 {
3179 i.tm.base_opcode = INT3_OPCODE;
3180 i.imm_operands = 0;
3181 }
3182
3183 if ((i.tm.opcode_modifier.jump
3184 || i.tm.opcode_modifier.jumpbyte
3185 || i.tm.opcode_modifier.jumpdword)
3186 && i.op[0].disps->X_op == O_constant)
3187 {
3188 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3189 the absolute address given by the constant. Since ix86 jumps and
3190 calls are pc relative, we need to generate a reloc. */
3191 i.op[0].disps->X_add_symbol = &abs_symbol;
3192 i.op[0].disps->X_op = O_symbol;
3193 }
3194
3195 if (i.tm.opcode_modifier.rex64)
3196 i.rex |= REX_W;
3197
3198 /* For 8 bit registers we need an empty rex prefix. Also if the
3199 instruction already has a prefix, we need to convert old
3200 registers to new ones. */
3201
3202 if ((i.types[0].bitfield.reg8
3203 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3204 || (i.types[1].bitfield.reg8
3205 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3206 || ((i.types[0].bitfield.reg8
3207 || i.types[1].bitfield.reg8)
3208 && i.rex != 0))
3209 {
3210 int x;
3211
3212 i.rex |= REX_OPCODE;
3213 for (x = 0; x < 2; x++)
3214 {
3215 /* Look for 8 bit operand that uses old registers. */
3216 if (i.types[x].bitfield.reg8
3217 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3218 {
3219 /* In case it is "hi" register, give up. */
3220 if (i.op[x].regs->reg_num > 3)
3221 as_bad (_("can't encode register '%s%s' in an "
3222 "instruction requiring REX prefix."),
3223 register_prefix, i.op[x].regs->reg_name);
3224
3225 /* Otherwise it is equivalent to the extended register.
3226 Since the encoding doesn't change this is merely
3227 cosmetic cleanup for debug output. */
3228
3229 i.op[x].regs = i.op[x].regs + 8;
3230 }
3231 }
3232 }
3233
3234 if (i.rex != 0)
3235 add_prefix (REX_OPCODE | i.rex);
3236
3237 /* We are ready to output the insn. */
3238 output_insn ();
3239 }
3240
3241 static char *
3242 parse_insn (char *line, char *mnemonic)
3243 {
3244 char *l = line;
3245 char *token_start = l;
3246 char *mnem_p;
3247 int supported;
3248 const insn_template *t;
3249 char *dot_p = NULL;
3250
3251 /* Non-zero if we found a prefix only acceptable with string insns. */
3252 const char *expecting_string_instruction = NULL;
3253
3254 while (1)
3255 {
3256 mnem_p = mnemonic;
3257 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3258 {
3259 if (*mnem_p == '.')
3260 dot_p = mnem_p;
3261 mnem_p++;
3262 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3263 {
3264 as_bad (_("no such instruction: `%s'"), token_start);
3265 return NULL;
3266 }
3267 l++;
3268 }
3269 if (!is_space_char (*l)
3270 && *l != END_OF_INSN
3271 && (intel_syntax
3272 || (*l != PREFIX_SEPARATOR
3273 && *l != ',')))
3274 {
3275 as_bad (_("invalid character %s in mnemonic"),
3276 output_invalid (*l));
3277 return NULL;
3278 }
3279 if (token_start == l)
3280 {
3281 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3282 as_bad (_("expecting prefix; got nothing"));
3283 else
3284 as_bad (_("expecting mnemonic; got nothing"));
3285 return NULL;
3286 }
3287
3288 /* Look up instruction (or prefix) via hash table. */
3289 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3290
3291 if (*l != END_OF_INSN
3292 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3293 && current_templates
3294 && current_templates->start->opcode_modifier.isprefix)
3295 {
3296 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3297 {
3298 as_bad ((flag_code != CODE_64BIT
3299 ? _("`%s' is only supported in 64-bit mode")
3300 : _("`%s' is not supported in 64-bit mode")),
3301 current_templates->start->name);
3302 return NULL;
3303 }
3304 /* If we are in 16-bit mode, do not allow addr16 or data16.
3305 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3306 if ((current_templates->start->opcode_modifier.size16
3307 || current_templates->start->opcode_modifier.size32)
3308 && flag_code != CODE_64BIT
3309 && (current_templates->start->opcode_modifier.size32
3310 ^ (flag_code == CODE_16BIT)))
3311 {
3312 as_bad (_("redundant %s prefix"),
3313 current_templates->start->name);
3314 return NULL;
3315 }
3316 /* Add prefix, checking for repeated prefixes. */
3317 switch (add_prefix (current_templates->start->base_opcode))
3318 {
3319 case PREFIX_EXIST:
3320 return NULL;
3321 case PREFIX_REP:
3322 expecting_string_instruction = current_templates->start->name;
3323 break;
3324 default:
3325 break;
3326 }
3327 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3328 token_start = ++l;
3329 }
3330 else
3331 break;
3332 }
3333
3334 if (!current_templates)
3335 {
3336 /* Check if we should swap operand or force 32bit displacement in
3337 encoding. */
3338 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3339 i.swap_operand = 1;
3340 else if (mnem_p - 3 == dot_p
3341 && dot_p[1] == 'd'
3342 && dot_p[2] == '8')
3343 i.disp_encoding = disp_encoding_8bit;
3344 else if (mnem_p - 4 == dot_p
3345 && dot_p[1] == 'd'
3346 && dot_p[2] == '3'
3347 && dot_p[3] == '2')
3348 i.disp_encoding = disp_encoding_32bit;
3349 else
3350 goto check_suffix;
3351 mnem_p = dot_p;
3352 *dot_p = '\0';
3353 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3354 }
3355
3356 if (!current_templates)
3357 {
3358 check_suffix:
3359 /* See if we can get a match by trimming off a suffix. */
3360 switch (mnem_p[-1])
3361 {
3362 case WORD_MNEM_SUFFIX:
3363 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3364 i.suffix = SHORT_MNEM_SUFFIX;
3365 else
3366 case BYTE_MNEM_SUFFIX:
3367 case QWORD_MNEM_SUFFIX:
3368 i.suffix = mnem_p[-1];
3369 mnem_p[-1] = '\0';
3370 current_templates = (const templates *) hash_find (op_hash,
3371 mnemonic);
3372 break;
3373 case SHORT_MNEM_SUFFIX:
3374 case LONG_MNEM_SUFFIX:
3375 if (!intel_syntax)
3376 {
3377 i.suffix = mnem_p[-1];
3378 mnem_p[-1] = '\0';
3379 current_templates = (const templates *) hash_find (op_hash,
3380 mnemonic);
3381 }
3382 break;
3383
3384 /* Intel Syntax. */
3385 case 'd':
3386 if (intel_syntax)
3387 {
3388 if (intel_float_operand (mnemonic) == 1)
3389 i.suffix = SHORT_MNEM_SUFFIX;
3390 else
3391 i.suffix = LONG_MNEM_SUFFIX;
3392 mnem_p[-1] = '\0';
3393 current_templates = (const templates *) hash_find (op_hash,
3394 mnemonic);
3395 }
3396 break;
3397 }
3398 if (!current_templates)
3399 {
3400 as_bad (_("no such instruction: `%s'"), token_start);
3401 return NULL;
3402 }
3403 }
3404
3405 if (current_templates->start->opcode_modifier.jump
3406 || current_templates->start->opcode_modifier.jumpbyte)
3407 {
3408 /* Check for a branch hint. We allow ",pt" and ",pn" for
3409 predict taken and predict not taken respectively.
3410 I'm not sure that branch hints actually do anything on loop
3411 and jcxz insns (JumpByte) for current Pentium4 chips. They
3412 may work in the future and it doesn't hurt to accept them
3413 now. */
3414 if (l[0] == ',' && l[1] == 'p')
3415 {
3416 if (l[2] == 't')
3417 {
3418 if (!add_prefix (DS_PREFIX_OPCODE))
3419 return NULL;
3420 l += 3;
3421 }
3422 else if (l[2] == 'n')
3423 {
3424 if (!add_prefix (CS_PREFIX_OPCODE))
3425 return NULL;
3426 l += 3;
3427 }
3428 }
3429 }
3430 /* Any other comma loses. */
3431 if (*l == ',')
3432 {
3433 as_bad (_("invalid character %s in mnemonic"),
3434 output_invalid (*l));
3435 return NULL;
3436 }
3437
3438 /* Check if instruction is supported on specified architecture. */
3439 supported = 0;
3440 for (t = current_templates->start; t < current_templates->end; ++t)
3441 {
3442 supported |= cpu_flags_match (t);
3443 if (supported == CPU_FLAGS_PERFECT_MATCH)
3444 goto skip;
3445 }
3446
3447 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3448 {
3449 as_bad (flag_code == CODE_64BIT
3450 ? _("`%s' is not supported in 64-bit mode")
3451 : _("`%s' is only supported in 64-bit mode"),
3452 current_templates->start->name);
3453 return NULL;
3454 }
3455 if (supported != CPU_FLAGS_PERFECT_MATCH)
3456 {
3457 as_bad (_("`%s' is not supported on `%s%s'"),
3458 current_templates->start->name,
3459 cpu_arch_name ? cpu_arch_name : default_arch,
3460 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3461 return NULL;
3462 }
3463
3464 skip:
3465 if (!cpu_arch_flags.bitfield.cpui386
3466 && (flag_code != CODE_16BIT))
3467 {
3468 as_warn (_("use .code16 to ensure correct addressing mode"));
3469 }
3470
3471 /* Check for rep/repne without a string instruction. */
3472 if (expecting_string_instruction)
3473 {
3474 static templates override;
3475
3476 for (t = current_templates->start; t < current_templates->end; ++t)
3477 if (t->opcode_modifier.isstring)
3478 break;
3479 if (t >= current_templates->end)
3480 {
3481 as_bad (_("expecting string instruction after `%s'"),
3482 expecting_string_instruction);
3483 return NULL;
3484 }
3485 for (override.start = t; t < current_templates->end; ++t)
3486 if (!t->opcode_modifier.isstring)
3487 break;
3488 override.end = t;
3489 current_templates = &override;
3490 }
3491
3492 return l;
3493 }
3494
3495 static char *
3496 parse_operands (char *l, const char *mnemonic)
3497 {
3498 char *token_start;
3499
3500 /* 1 if operand is pending after ','. */
3501 unsigned int expecting_operand = 0;
3502
3503 /* Non-zero if operand parens not balanced. */
3504 unsigned int paren_not_balanced;
3505
3506 while (*l != END_OF_INSN)
3507 {
3508 /* Skip optional white space before operand. */
3509 if (is_space_char (*l))
3510 ++l;
3511 if (!is_operand_char (*l) && *l != END_OF_INSN)
3512 {
3513 as_bad (_("invalid character %s before operand %d"),
3514 output_invalid (*l),
3515 i.operands + 1);
3516 return NULL;
3517 }
3518 token_start = l; /* after white space */
3519 paren_not_balanced = 0;
3520 while (paren_not_balanced || *l != ',')
3521 {
3522 if (*l == END_OF_INSN)
3523 {
3524 if (paren_not_balanced)
3525 {
3526 if (!intel_syntax)
3527 as_bad (_("unbalanced parenthesis in operand %d."),
3528 i.operands + 1);
3529 else
3530 as_bad (_("unbalanced brackets in operand %d."),
3531 i.operands + 1);
3532 return NULL;
3533 }
3534 else
3535 break; /* we are done */
3536 }
3537 else if (!is_operand_char (*l) && !is_space_char (*l))
3538 {
3539 as_bad (_("invalid character %s in operand %d"),
3540 output_invalid (*l),
3541 i.operands + 1);
3542 return NULL;
3543 }
3544 if (!intel_syntax)
3545 {
3546 if (*l == '(')
3547 ++paren_not_balanced;
3548 if (*l == ')')
3549 --paren_not_balanced;
3550 }
3551 else
3552 {
3553 if (*l == '[')
3554 ++paren_not_balanced;
3555 if (*l == ']')
3556 --paren_not_balanced;
3557 }
3558 l++;
3559 }
3560 if (l != token_start)
3561 { /* Yes, we've read in another operand. */
3562 unsigned int operand_ok;
3563 this_operand = i.operands++;
3564 i.types[this_operand].bitfield.unspecified = 1;
3565 if (i.operands > MAX_OPERANDS)
3566 {
3567 as_bad (_("spurious operands; (%d operands/instruction max)"),
3568 MAX_OPERANDS);
3569 return NULL;
3570 }
3571 /* Now parse operand adding info to 'i' as we go along. */
3572 END_STRING_AND_SAVE (l);
3573
3574 if (intel_syntax)
3575 operand_ok =
3576 i386_intel_operand (token_start,
3577 intel_float_operand (mnemonic));
3578 else
3579 operand_ok = i386_att_operand (token_start);
3580
3581 RESTORE_END_STRING (l);
3582 if (!operand_ok)
3583 return NULL;
3584 }
3585 else
3586 {
3587 if (expecting_operand)
3588 {
3589 expecting_operand_after_comma:
3590 as_bad (_("expecting operand after ','; got nothing"));
3591 return NULL;
3592 }
3593 if (*l == ',')
3594 {
3595 as_bad (_("expecting operand before ','; got nothing"));
3596 return NULL;
3597 }
3598 }
3599
3600 /* Now *l must be either ',' or END_OF_INSN. */
3601 if (*l == ',')
3602 {
3603 if (*++l == END_OF_INSN)
3604 {
3605 /* Just skip it, if it's \n complain. */
3606 goto expecting_operand_after_comma;
3607 }
3608 expecting_operand = 1;
3609 }
3610 }
3611 return l;
3612 }
3613
3614 static void
3615 swap_2_operands (int xchg1, int xchg2)
3616 {
3617 union i386_op temp_op;
3618 i386_operand_type temp_type;
3619 enum bfd_reloc_code_real temp_reloc;
3620
3621 temp_type = i.types[xchg2];
3622 i.types[xchg2] = i.types[xchg1];
3623 i.types[xchg1] = temp_type;
3624 temp_op = i.op[xchg2];
3625 i.op[xchg2] = i.op[xchg1];
3626 i.op[xchg1] = temp_op;
3627 temp_reloc = i.reloc[xchg2];
3628 i.reloc[xchg2] = i.reloc[xchg1];
3629 i.reloc[xchg1] = temp_reloc;
3630 }
3631
3632 static void
3633 swap_operands (void)
3634 {
3635 switch (i.operands)
3636 {
3637 case 5:
3638 case 4:
3639 swap_2_operands (1, i.operands - 2);
3640 case 3:
3641 case 2:
3642 swap_2_operands (0, i.operands - 1);
3643 break;
3644 default:
3645 abort ();
3646 }
3647
3648 if (i.mem_operands == 2)
3649 {
3650 const seg_entry *temp_seg;
3651 temp_seg = i.seg[0];
3652 i.seg[0] = i.seg[1];
3653 i.seg[1] = temp_seg;
3654 }
3655 }
3656
3657 /* Try to ensure constant immediates are represented in the smallest
3658 opcode possible. */
3659 static void
3660 optimize_imm (void)
3661 {
3662 char guess_suffix = 0;
3663 int op;
3664
3665 if (i.suffix)
3666 guess_suffix = i.suffix;
3667 else if (i.reg_operands)
3668 {
3669 /* Figure out a suffix from the last register operand specified.
3670 We can't do this properly yet, ie. excluding InOutPortReg,
3671 but the following works for instructions with immediates.
3672 In any case, we can't set i.suffix yet. */
3673 for (op = i.operands; --op >= 0;)
3674 if (i.types[op].bitfield.reg8)
3675 {
3676 guess_suffix = BYTE_MNEM_SUFFIX;
3677 break;
3678 }
3679 else if (i.types[op].bitfield.reg16)
3680 {
3681 guess_suffix = WORD_MNEM_SUFFIX;
3682 break;
3683 }
3684 else if (i.types[op].bitfield.reg32)
3685 {
3686 guess_suffix = LONG_MNEM_SUFFIX;
3687 break;
3688 }
3689 else if (i.types[op].bitfield.reg64)
3690 {
3691 guess_suffix = QWORD_MNEM_SUFFIX;
3692 break;
3693 }
3694 }
3695 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3696 guess_suffix = WORD_MNEM_SUFFIX;
3697
3698 for (op = i.operands; --op >= 0;)
3699 if (operand_type_check (i.types[op], imm))
3700 {
3701 switch (i.op[op].imms->X_op)
3702 {
3703 case O_constant:
3704 /* If a suffix is given, this operand may be shortened. */
3705 switch (guess_suffix)
3706 {
3707 case LONG_MNEM_SUFFIX:
3708 i.types[op].bitfield.imm32 = 1;
3709 i.types[op].bitfield.imm64 = 1;
3710 break;
3711 case WORD_MNEM_SUFFIX:
3712 i.types[op].bitfield.imm16 = 1;
3713 i.types[op].bitfield.imm32 = 1;
3714 i.types[op].bitfield.imm32s = 1;
3715 i.types[op].bitfield.imm64 = 1;
3716 break;
3717 case BYTE_MNEM_SUFFIX:
3718 i.types[op].bitfield.imm8 = 1;
3719 i.types[op].bitfield.imm8s = 1;
3720 i.types[op].bitfield.imm16 = 1;
3721 i.types[op].bitfield.imm32 = 1;
3722 i.types[op].bitfield.imm32s = 1;
3723 i.types[op].bitfield.imm64 = 1;
3724 break;
3725 }
3726
3727 /* If this operand is at most 16 bits, convert it
3728 to a signed 16 bit number before trying to see
3729 whether it will fit in an even smaller size.
3730 This allows a 16-bit operand such as $0xffe0 to
3731 be recognised as within Imm8S range. */
3732 if ((i.types[op].bitfield.imm16)
3733 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3734 {
3735 i.op[op].imms->X_add_number =
3736 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3737 }
3738 if ((i.types[op].bitfield.imm32)
3739 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3740 == 0))
3741 {
3742 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3743 ^ ((offsetT) 1 << 31))
3744 - ((offsetT) 1 << 31));
3745 }
3746 i.types[op]
3747 = operand_type_or (i.types[op],
3748 smallest_imm_type (i.op[op].imms->X_add_number));
3749
3750 /* We must avoid matching of Imm32 templates when 64bit
3751 only immediate is available. */
3752 if (guess_suffix == QWORD_MNEM_SUFFIX)
3753 i.types[op].bitfield.imm32 = 0;
3754 break;
3755
3756 case O_absent:
3757 case O_register:
3758 abort ();
3759
3760 /* Symbols and expressions. */
3761 default:
3762 /* Convert symbolic operand to proper sizes for matching, but don't
3763 prevent matching a set of insns that only supports sizes other
3764 than those matching the insn suffix. */
3765 {
3766 i386_operand_type mask, allowed;
3767 const insn_template *t;
3768
3769 operand_type_set (&mask, 0);
3770 operand_type_set (&allowed, 0);
3771
3772 for (t = current_templates->start;
3773 t < current_templates->end;
3774 ++t)
3775 allowed = operand_type_or (allowed,
3776 t->operand_types[op]);
3777 switch (guess_suffix)
3778 {
3779 case QWORD_MNEM_SUFFIX:
3780 mask.bitfield.imm64 = 1;
3781 mask.bitfield.imm32s = 1;
3782 break;
3783 case LONG_MNEM_SUFFIX:
3784 mask.bitfield.imm32 = 1;
3785 break;
3786 case WORD_MNEM_SUFFIX:
3787 mask.bitfield.imm16 = 1;
3788 break;
3789 case BYTE_MNEM_SUFFIX:
3790 mask.bitfield.imm8 = 1;
3791 break;
3792 default:
3793 break;
3794 }
3795 allowed = operand_type_and (mask, allowed);
3796 if (!operand_type_all_zero (&allowed))
3797 i.types[op] = operand_type_and (i.types[op], mask);
3798 }
3799 break;
3800 }
3801 }
3802 }
3803
3804 /* Try to use the smallest displacement type too. */
3805 static void
3806 optimize_disp (void)
3807 {
3808 int op;
3809
3810 for (op = i.operands; --op >= 0;)
3811 if (operand_type_check (i.types[op], disp))
3812 {
3813 if (i.op[op].disps->X_op == O_constant)
3814 {
3815 offsetT op_disp = i.op[op].disps->X_add_number;
3816
3817 if (i.types[op].bitfield.disp16
3818 && (op_disp & ~(offsetT) 0xffff) == 0)
3819 {
3820 /* If this operand is at most 16 bits, convert
3821 to a signed 16 bit number and don't use 64bit
3822 displacement. */
3823 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3824 i.types[op].bitfield.disp64 = 0;
3825 }
3826 if (i.types[op].bitfield.disp32
3827 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3828 {
3829 /* If this operand is at most 32 bits, convert
3830 to a signed 32 bit number and don't use 64bit
3831 displacement. */
3832 op_disp &= (((offsetT) 2 << 31) - 1);
3833 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3834 i.types[op].bitfield.disp64 = 0;
3835 }
3836 if (!op_disp && i.types[op].bitfield.baseindex)
3837 {
3838 i.types[op].bitfield.disp8 = 0;
3839 i.types[op].bitfield.disp16 = 0;
3840 i.types[op].bitfield.disp32 = 0;
3841 i.types[op].bitfield.disp32s = 0;
3842 i.types[op].bitfield.disp64 = 0;
3843 i.op[op].disps = 0;
3844 i.disp_operands--;
3845 }
3846 else if (flag_code == CODE_64BIT)
3847 {
3848 if (fits_in_signed_long (op_disp))
3849 {
3850 i.types[op].bitfield.disp64 = 0;
3851 i.types[op].bitfield.disp32s = 1;
3852 }
3853 if (i.prefix[ADDR_PREFIX]
3854 && fits_in_unsigned_long (op_disp))
3855 i.types[op].bitfield.disp32 = 1;
3856 }
3857 if ((i.types[op].bitfield.disp32
3858 || i.types[op].bitfield.disp32s
3859 || i.types[op].bitfield.disp16)
3860 && fits_in_signed_byte (op_disp))
3861 i.types[op].bitfield.disp8 = 1;
3862 }
3863 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3864 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3865 {
3866 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3867 i.op[op].disps, 0, i.reloc[op]);
3868 i.types[op].bitfield.disp8 = 0;
3869 i.types[op].bitfield.disp16 = 0;
3870 i.types[op].bitfield.disp32 = 0;
3871 i.types[op].bitfield.disp32s = 0;
3872 i.types[op].bitfield.disp64 = 0;
3873 }
3874 else
3875 /* We only support 64bit displacement on constants. */
3876 i.types[op].bitfield.disp64 = 0;
3877 }
3878 }
3879
3880 /* Check if operands are valid for the instruction. */
3881
3882 static int
3883 check_VecOperands (const insn_template *t)
3884 {
3885 /* Without VSIB byte, we can't have a vector register for index. */
3886 if (!t->opcode_modifier.vecsib
3887 && i.index_reg
3888 && (i.index_reg->reg_type.bitfield.regxmm
3889 || i.index_reg->reg_type.bitfield.regymm))
3890 {
3891 i.error = unsupported_vector_index_register;
3892 return 1;
3893 }
3894
3895 /* For VSIB byte, we need a vector register for index and no PC
3896 relative addressing is allowed. */
3897 if (t->opcode_modifier.vecsib
3898 && (!i.index_reg
3899 || !((t->opcode_modifier.vecsib == VecSIB128
3900 && i.index_reg->reg_type.bitfield.regxmm)
3901 || (t->opcode_modifier.vecsib == VecSIB256
3902 && i.index_reg->reg_type.bitfield.regymm))
3903 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3904 {
3905 i.error = invalid_vsib_address;
3906 return 1;
3907 }
3908
3909 return 0;
3910 }
3911
3912 /* Check if operands are valid for the instruction. Update VEX
3913 operand types. */
3914
3915 static int
3916 VEX_check_operands (const insn_template *t)
3917 {
3918 if (!t->opcode_modifier.vex)
3919 return 0;
3920
3921 /* Only check VEX_Imm4, which must be the first operand. */
3922 if (t->operand_types[0].bitfield.vec_imm4)
3923 {
3924 if (i.op[0].imms->X_op != O_constant
3925 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3926 {
3927 i.error = bad_imm4;
3928 return 1;
3929 }
3930
3931 /* Turn off Imm8 so that update_imm won't complain. */
3932 i.types[0] = vec_imm4;
3933 }
3934
3935 return 0;
3936 }
3937
3938 static const insn_template *
3939 match_template (void)
3940 {
3941 /* Points to template once we've found it. */
3942 const insn_template *t;
3943 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3944 i386_operand_type overlap4;
3945 unsigned int found_reverse_match;
3946 i386_opcode_modifier suffix_check;
3947 i386_operand_type operand_types [MAX_OPERANDS];
3948 int addr_prefix_disp;
3949 unsigned int j;
3950 unsigned int found_cpu_match;
3951 unsigned int check_register;
3952
3953 #if MAX_OPERANDS != 5
3954 # error "MAX_OPERANDS must be 5."
3955 #endif
3956
3957 found_reverse_match = 0;
3958 addr_prefix_disp = -1;
3959
3960 memset (&suffix_check, 0, sizeof (suffix_check));
3961 if (i.suffix == BYTE_MNEM_SUFFIX)
3962 suffix_check.no_bsuf = 1;
3963 else if (i.suffix == WORD_MNEM_SUFFIX)
3964 suffix_check.no_wsuf = 1;
3965 else if (i.suffix == SHORT_MNEM_SUFFIX)
3966 suffix_check.no_ssuf = 1;
3967 else if (i.suffix == LONG_MNEM_SUFFIX)
3968 suffix_check.no_lsuf = 1;
3969 else if (i.suffix == QWORD_MNEM_SUFFIX)
3970 suffix_check.no_qsuf = 1;
3971 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3972 suffix_check.no_ldsuf = 1;
3973
3974 /* Must have right number of operands. */
3975 i.error = number_of_operands_mismatch;
3976
3977 for (t = current_templates->start; t < current_templates->end; t++)
3978 {
3979 addr_prefix_disp = -1;
3980
3981 if (i.operands != t->operands)
3982 continue;
3983
3984 /* Check processor support. */
3985 i.error = unsupported;
3986 found_cpu_match = (cpu_flags_match (t)
3987 == CPU_FLAGS_PERFECT_MATCH);
3988 if (!found_cpu_match)
3989 continue;
3990
3991 /* Check old gcc support. */
3992 i.error = old_gcc_only;
3993 if (!old_gcc && t->opcode_modifier.oldgcc)
3994 continue;
3995
3996 /* Check AT&T mnemonic. */
3997 i.error = unsupported_with_intel_mnemonic;
3998 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3999 continue;
4000
4001 /* Check AT&T/Intel syntax. */
4002 i.error = unsupported_syntax;
4003 if ((intel_syntax && t->opcode_modifier.attsyntax)
4004 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4005 continue;
4006
4007 /* Check the suffix, except for some instructions in intel mode. */
4008 i.error = invalid_instruction_suffix;
4009 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4010 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4011 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4012 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4013 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4014 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4015 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4016 continue;
4017
4018 if (!operand_size_match (t))
4019 continue;
4020
4021 for (j = 0; j < MAX_OPERANDS; j++)
4022 operand_types[j] = t->operand_types[j];
4023
4024 /* In general, don't allow 64-bit operands in 32-bit mode. */
4025 if (i.suffix == QWORD_MNEM_SUFFIX
4026 && flag_code != CODE_64BIT
4027 && (intel_syntax
4028 ? (!t->opcode_modifier.ignoresize
4029 && !intel_float_operand (t->name))
4030 : intel_float_operand (t->name) != 2)
4031 && ((!operand_types[0].bitfield.regmmx
4032 && !operand_types[0].bitfield.regxmm
4033 && !operand_types[0].bitfield.regymm)
4034 || (!operand_types[t->operands > 1].bitfield.regmmx
4035 && !!operand_types[t->operands > 1].bitfield.regxmm
4036 && !!operand_types[t->operands > 1].bitfield.regymm))
4037 && (t->base_opcode != 0x0fc7
4038 || t->extension_opcode != 1 /* cmpxchg8b */))
4039 continue;
4040
4041 /* In general, don't allow 32-bit operands on pre-386. */
4042 else if (i.suffix == LONG_MNEM_SUFFIX
4043 && !cpu_arch_flags.bitfield.cpui386
4044 && (intel_syntax
4045 ? (!t->opcode_modifier.ignoresize
4046 && !intel_float_operand (t->name))
4047 : intel_float_operand (t->name) != 2)
4048 && ((!operand_types[0].bitfield.regmmx
4049 && !operand_types[0].bitfield.regxmm)
4050 || (!operand_types[t->operands > 1].bitfield.regmmx
4051 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4052 continue;
4053
4054 /* Do not verify operands when there are none. */
4055 else
4056 {
4057 if (!t->operands)
4058 /* We've found a match; break out of loop. */
4059 break;
4060 }
4061
4062 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4063 into Disp32/Disp16/Disp32 operand. */
4064 if (i.prefix[ADDR_PREFIX] != 0)
4065 {
4066 /* There should be only one Disp operand. */
4067 switch (flag_code)
4068 {
4069 case CODE_16BIT:
4070 for (j = 0; j < MAX_OPERANDS; j++)
4071 {
4072 if (operand_types[j].bitfield.disp16)
4073 {
4074 addr_prefix_disp = j;
4075 operand_types[j].bitfield.disp32 = 1;
4076 operand_types[j].bitfield.disp16 = 0;
4077 break;
4078 }
4079 }
4080 break;
4081 case CODE_32BIT:
4082 for (j = 0; j < MAX_OPERANDS; j++)
4083 {
4084 if (operand_types[j].bitfield.disp32)
4085 {
4086 addr_prefix_disp = j;
4087 operand_types[j].bitfield.disp32 = 0;
4088 operand_types[j].bitfield.disp16 = 1;
4089 break;
4090 }
4091 }
4092 break;
4093 case CODE_64BIT:
4094 for (j = 0; j < MAX_OPERANDS; j++)
4095 {
4096 if (operand_types[j].bitfield.disp64)
4097 {
4098 addr_prefix_disp = j;
4099 operand_types[j].bitfield.disp64 = 0;
4100 operand_types[j].bitfield.disp32 = 1;
4101 break;
4102 }
4103 }
4104 break;
4105 }
4106 }
4107
4108 /* We check register size if needed. */
4109 check_register = t->opcode_modifier.checkregsize;
4110 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4111 switch (t->operands)
4112 {
4113 case 1:
4114 if (!operand_type_match (overlap0, i.types[0]))
4115 continue;
4116 break;
4117 case 2:
4118 /* xchg %eax, %eax is a special case. It is an aliase for nop
4119 only in 32bit mode and we can use opcode 0x90. In 64bit
4120 mode, we can't use 0x90 for xchg %eax, %eax since it should
4121 zero-extend %eax to %rax. */
4122 if (flag_code == CODE_64BIT
4123 && t->base_opcode == 0x90
4124 && operand_type_equal (&i.types [0], &acc32)
4125 && operand_type_equal (&i.types [1], &acc32))
4126 continue;
4127 if (i.swap_operand)
4128 {
4129 /* If we swap operand in encoding, we either match
4130 the next one or reverse direction of operands. */
4131 if (t->opcode_modifier.s)
4132 continue;
4133 else if (t->opcode_modifier.d)
4134 goto check_reverse;
4135 }
4136
4137 case 3:
4138 /* If we swap operand in encoding, we match the next one. */
4139 if (i.swap_operand && t->opcode_modifier.s)
4140 continue;
4141 case 4:
4142 case 5:
4143 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4144 if (!operand_type_match (overlap0, i.types[0])
4145 || !operand_type_match (overlap1, i.types[1])
4146 || (check_register
4147 && !operand_type_register_match (overlap0, i.types[0],
4148 operand_types[0],
4149 overlap1, i.types[1],
4150 operand_types[1])))
4151 {
4152 /* Check if other direction is valid ... */
4153 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4154 continue;
4155
4156 check_reverse:
4157 /* Try reversing direction of operands. */
4158 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4159 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4160 if (!operand_type_match (overlap0, i.types[0])
4161 || !operand_type_match (overlap1, i.types[1])
4162 || (check_register
4163 && !operand_type_register_match (overlap0,
4164 i.types[0],
4165 operand_types[1],
4166 overlap1,
4167 i.types[1],
4168 operand_types[0])))
4169 {
4170 /* Does not match either direction. */
4171 continue;
4172 }
4173 /* found_reverse_match holds which of D or FloatDR
4174 we've found. */
4175 if (t->opcode_modifier.d)
4176 found_reverse_match = Opcode_D;
4177 else if (t->opcode_modifier.floatd)
4178 found_reverse_match = Opcode_FloatD;
4179 else
4180 found_reverse_match = 0;
4181 if (t->opcode_modifier.floatr)
4182 found_reverse_match |= Opcode_FloatR;
4183 }
4184 else
4185 {
4186 /* Found a forward 2 operand match here. */
4187 switch (t->operands)
4188 {
4189 case 5:
4190 overlap4 = operand_type_and (i.types[4],
4191 operand_types[4]);
4192 case 4:
4193 overlap3 = operand_type_and (i.types[3],
4194 operand_types[3]);
4195 case 3:
4196 overlap2 = operand_type_and (i.types[2],
4197 operand_types[2]);
4198 break;
4199 }
4200
4201 switch (t->operands)
4202 {
4203 case 5:
4204 if (!operand_type_match (overlap4, i.types[4])
4205 || !operand_type_register_match (overlap3,
4206 i.types[3],
4207 operand_types[3],
4208 overlap4,
4209 i.types[4],
4210 operand_types[4]))
4211 continue;
4212 case 4:
4213 if (!operand_type_match (overlap3, i.types[3])
4214 || (check_register
4215 && !operand_type_register_match (overlap2,
4216 i.types[2],
4217 operand_types[2],
4218 overlap3,
4219 i.types[3],
4220 operand_types[3])))
4221 continue;
4222 case 3:
4223 /* Here we make use of the fact that there are no
4224 reverse match 3 operand instructions, and all 3
4225 operand instructions only need to be checked for
4226 register consistency between operands 2 and 3. */
4227 if (!operand_type_match (overlap2, i.types[2])
4228 || (check_register
4229 && !operand_type_register_match (overlap1,
4230 i.types[1],
4231 operand_types[1],
4232 overlap2,
4233 i.types[2],
4234 operand_types[2])))
4235 continue;
4236 break;
4237 }
4238 }
4239 /* Found either forward/reverse 2, 3 or 4 operand match here:
4240 slip through to break. */
4241 }
4242 if (!found_cpu_match)
4243 {
4244 found_reverse_match = 0;
4245 continue;
4246 }
4247
4248 /* Check if vector operands are valid. */
4249 if (check_VecOperands (t))
4250 continue;
4251
4252 /* Check if VEX operands are valid. */
4253 if (VEX_check_operands (t))
4254 continue;
4255
4256 /* We've found a match; break out of loop. */
4257 break;
4258 }
4259
4260 if (t == current_templates->end)
4261 {
4262 /* We found no match. */
4263 const char *err_msg;
4264 switch (i.error)
4265 {
4266 default:
4267 abort ();
4268 case operand_size_mismatch:
4269 err_msg = _("operand size mismatch");
4270 break;
4271 case operand_type_mismatch:
4272 err_msg = _("operand type mismatch");
4273 break;
4274 case register_type_mismatch:
4275 err_msg = _("register type mismatch");
4276 break;
4277 case number_of_operands_mismatch:
4278 err_msg = _("number of operands mismatch");
4279 break;
4280 case invalid_instruction_suffix:
4281 err_msg = _("invalid instruction suffix");
4282 break;
4283 case bad_imm4:
4284 err_msg = _("Imm4 isn't the first operand");
4285 break;
4286 case old_gcc_only:
4287 err_msg = _("only supported with old gcc");
4288 break;
4289 case unsupported_with_intel_mnemonic:
4290 err_msg = _("unsupported with Intel mnemonic");
4291 break;
4292 case unsupported_syntax:
4293 err_msg = _("unsupported syntax");
4294 break;
4295 case unsupported:
4296 err_msg = _("unsupported");
4297 break;
4298 case invalid_vsib_address:
4299 err_msg = _("invalid VSIB address");
4300 break;
4301 case unsupported_vector_index_register:
4302 err_msg = _("unsupported vector index register");
4303 break;
4304 }
4305 as_bad (_("%s for `%s'"), err_msg,
4306 current_templates->start->name);
4307 return NULL;
4308 }
4309
4310 if (!quiet_warnings)
4311 {
4312 if (!intel_syntax
4313 && (i.types[0].bitfield.jumpabsolute
4314 != operand_types[0].bitfield.jumpabsolute))
4315 {
4316 as_warn (_("indirect %s without `*'"), t->name);
4317 }
4318
4319 if (t->opcode_modifier.isprefix
4320 && t->opcode_modifier.ignoresize)
4321 {
4322 /* Warn them that a data or address size prefix doesn't
4323 affect assembly of the next line of code. */
4324 as_warn (_("stand-alone `%s' prefix"), t->name);
4325 }
4326 }
4327
4328 /* Copy the template we found. */
4329 i.tm = *t;
4330
4331 if (addr_prefix_disp != -1)
4332 i.tm.operand_types[addr_prefix_disp]
4333 = operand_types[addr_prefix_disp];
4334
4335 if (found_reverse_match)
4336 {
4337 /* If we found a reverse match we must alter the opcode
4338 direction bit. found_reverse_match holds bits to change
4339 (different for int & float insns). */
4340
4341 i.tm.base_opcode ^= found_reverse_match;
4342
4343 i.tm.operand_types[0] = operand_types[1];
4344 i.tm.operand_types[1] = operand_types[0];
4345 }
4346
4347 return t;
4348 }
4349
4350 static int
4351 check_string (void)
4352 {
4353 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4354 if (i.tm.operand_types[mem_op].bitfield.esseg)
4355 {
4356 if (i.seg[0] != NULL && i.seg[0] != &es)
4357 {
4358 as_bad (_("`%s' operand %d must use `%ses' segment"),
4359 i.tm.name,
4360 mem_op + 1,
4361 register_prefix);
4362 return 0;
4363 }
4364 /* There's only ever one segment override allowed per instruction.
4365 This instruction possibly has a legal segment override on the
4366 second operand, so copy the segment to where non-string
4367 instructions store it, allowing common code. */
4368 i.seg[0] = i.seg[1];
4369 }
4370 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4371 {
4372 if (i.seg[1] != NULL && i.seg[1] != &es)
4373 {
4374 as_bad (_("`%s' operand %d must use `%ses' segment"),
4375 i.tm.name,
4376 mem_op + 2,
4377 register_prefix);
4378 return 0;
4379 }
4380 }
4381 return 1;
4382 }
4383
4384 static int
4385 process_suffix (void)
4386 {
4387 /* If matched instruction specifies an explicit instruction mnemonic
4388 suffix, use it. */
4389 if (i.tm.opcode_modifier.size16)
4390 i.suffix = WORD_MNEM_SUFFIX;
4391 else if (i.tm.opcode_modifier.size32)
4392 i.suffix = LONG_MNEM_SUFFIX;
4393 else if (i.tm.opcode_modifier.size64)
4394 i.suffix = QWORD_MNEM_SUFFIX;
4395 else if (i.reg_operands)
4396 {
4397 /* If there's no instruction mnemonic suffix we try to invent one
4398 based on register operands. */
4399 if (!i.suffix)
4400 {
4401 /* We take i.suffix from the last register operand specified,
4402 Destination register type is more significant than source
4403 register type. crc32 in SSE4.2 prefers source register
4404 type. */
4405 if (i.tm.base_opcode == 0xf20f38f1)
4406 {
4407 if (i.types[0].bitfield.reg16)
4408 i.suffix = WORD_MNEM_SUFFIX;
4409 else if (i.types[0].bitfield.reg32)
4410 i.suffix = LONG_MNEM_SUFFIX;
4411 else if (i.types[0].bitfield.reg64)
4412 i.suffix = QWORD_MNEM_SUFFIX;
4413 }
4414 else if (i.tm.base_opcode == 0xf20f38f0)
4415 {
4416 if (i.types[0].bitfield.reg8)
4417 i.suffix = BYTE_MNEM_SUFFIX;
4418 }
4419
4420 if (!i.suffix)
4421 {
4422 int op;
4423
4424 if (i.tm.base_opcode == 0xf20f38f1
4425 || i.tm.base_opcode == 0xf20f38f0)
4426 {
4427 /* We have to know the operand size for crc32. */
4428 as_bad (_("ambiguous memory operand size for `%s`"),
4429 i.tm.name);
4430 return 0;
4431 }
4432
4433 for (op = i.operands; --op >= 0;)
4434 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4435 {
4436 if (i.types[op].bitfield.reg8)
4437 {
4438 i.suffix = BYTE_MNEM_SUFFIX;
4439 break;
4440 }
4441 else if (i.types[op].bitfield.reg16)
4442 {
4443 i.suffix = WORD_MNEM_SUFFIX;
4444 break;
4445 }
4446 else if (i.types[op].bitfield.reg32)
4447 {
4448 i.suffix = LONG_MNEM_SUFFIX;
4449 break;
4450 }
4451 else if (i.types[op].bitfield.reg64)
4452 {
4453 i.suffix = QWORD_MNEM_SUFFIX;
4454 break;
4455 }
4456 }
4457 }
4458 }
4459 else if (i.suffix == BYTE_MNEM_SUFFIX)
4460 {
4461 if (intel_syntax
4462 && i.tm.opcode_modifier.ignoresize
4463 && i.tm.opcode_modifier.no_bsuf)
4464 i.suffix = 0;
4465 else if (!check_byte_reg ())
4466 return 0;
4467 }
4468 else if (i.suffix == LONG_MNEM_SUFFIX)
4469 {
4470 if (intel_syntax
4471 && i.tm.opcode_modifier.ignoresize
4472 && i.tm.opcode_modifier.no_lsuf)
4473 i.suffix = 0;
4474 else if (!check_long_reg ())
4475 return 0;
4476 }
4477 else if (i.suffix == QWORD_MNEM_SUFFIX)
4478 {
4479 if (intel_syntax
4480 && i.tm.opcode_modifier.ignoresize
4481 && i.tm.opcode_modifier.no_qsuf)
4482 i.suffix = 0;
4483 else if (!check_qword_reg ())
4484 return 0;
4485 }
4486 else if (i.suffix == WORD_MNEM_SUFFIX)
4487 {
4488 if (intel_syntax
4489 && i.tm.opcode_modifier.ignoresize
4490 && i.tm.opcode_modifier.no_wsuf)
4491 i.suffix = 0;
4492 else if (!check_word_reg ())
4493 return 0;
4494 }
4495 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4496 || i.suffix == YMMWORD_MNEM_SUFFIX)
4497 {
4498 /* Skip if the instruction has x/y suffix. match_template
4499 should check if it is a valid suffix. */
4500 }
4501 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4502 /* Do nothing if the instruction is going to ignore the prefix. */
4503 ;
4504 else
4505 abort ();
4506 }
4507 else if (i.tm.opcode_modifier.defaultsize
4508 && !i.suffix
4509 /* exclude fldenv/frstor/fsave/fstenv */
4510 && i.tm.opcode_modifier.no_ssuf)
4511 {
4512 i.suffix = stackop_size;
4513 }
4514 else if (intel_syntax
4515 && !i.suffix
4516 && (i.tm.operand_types[0].bitfield.jumpabsolute
4517 || i.tm.opcode_modifier.jumpbyte
4518 || i.tm.opcode_modifier.jumpintersegment
4519 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4520 && i.tm.extension_opcode <= 3)))
4521 {
4522 switch (flag_code)
4523 {
4524 case CODE_64BIT:
4525 if (!i.tm.opcode_modifier.no_qsuf)
4526 {
4527 i.suffix = QWORD_MNEM_SUFFIX;
4528 break;
4529 }
4530 case CODE_32BIT:
4531 if (!i.tm.opcode_modifier.no_lsuf)
4532 i.suffix = LONG_MNEM_SUFFIX;
4533 break;
4534 case CODE_16BIT:
4535 if (!i.tm.opcode_modifier.no_wsuf)
4536 i.suffix = WORD_MNEM_SUFFIX;
4537 break;
4538 }
4539 }
4540
4541 if (!i.suffix)
4542 {
4543 if (!intel_syntax)
4544 {
4545 if (i.tm.opcode_modifier.w)
4546 {
4547 as_bad (_("no instruction mnemonic suffix given and "
4548 "no register operands; can't size instruction"));
4549 return 0;
4550 }
4551 }
4552 else
4553 {
4554 unsigned int suffixes;
4555
4556 suffixes = !i.tm.opcode_modifier.no_bsuf;
4557 if (!i.tm.opcode_modifier.no_wsuf)
4558 suffixes |= 1 << 1;
4559 if (!i.tm.opcode_modifier.no_lsuf)
4560 suffixes |= 1 << 2;
4561 if (!i.tm.opcode_modifier.no_ldsuf)
4562 suffixes |= 1 << 3;
4563 if (!i.tm.opcode_modifier.no_ssuf)
4564 suffixes |= 1 << 4;
4565 if (!i.tm.opcode_modifier.no_qsuf)
4566 suffixes |= 1 << 5;
4567
4568 /* There are more than suffix matches. */
4569 if (i.tm.opcode_modifier.w
4570 || ((suffixes & (suffixes - 1))
4571 && !i.tm.opcode_modifier.defaultsize
4572 && !i.tm.opcode_modifier.ignoresize))
4573 {
4574 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4575 return 0;
4576 }
4577 }
4578 }
4579
4580 /* Change the opcode based on the operand size given by i.suffix;
4581 We don't need to change things for byte insns. */
4582
4583 if (i.suffix
4584 && i.suffix != BYTE_MNEM_SUFFIX
4585 && i.suffix != XMMWORD_MNEM_SUFFIX
4586 && i.suffix != YMMWORD_MNEM_SUFFIX)
4587 {
4588 /* It's not a byte, select word/dword operation. */
4589 if (i.tm.opcode_modifier.w)
4590 {
4591 if (i.tm.opcode_modifier.shortform)
4592 i.tm.base_opcode |= 8;
4593 else
4594 i.tm.base_opcode |= 1;
4595 }
4596
4597 /* Now select between word & dword operations via the operand
4598 size prefix, except for instructions that will ignore this
4599 prefix anyway. */
4600 if (i.tm.opcode_modifier.addrprefixop0)
4601 {
4602 /* The address size override prefix changes the size of the
4603 first operand. */
4604 if ((flag_code == CODE_32BIT
4605 && i.op->regs[0].reg_type.bitfield.reg16)
4606 || (flag_code != CODE_32BIT
4607 && i.op->regs[0].reg_type.bitfield.reg32))
4608 if (!add_prefix (ADDR_PREFIX_OPCODE))
4609 return 0;
4610 }
4611 else if (i.suffix != QWORD_MNEM_SUFFIX
4612 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4613 && !i.tm.opcode_modifier.ignoresize
4614 && !i.tm.opcode_modifier.floatmf
4615 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4616 || (flag_code == CODE_64BIT
4617 && i.tm.opcode_modifier.jumpbyte)))
4618 {
4619 unsigned int prefix = DATA_PREFIX_OPCODE;
4620
4621 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4622 prefix = ADDR_PREFIX_OPCODE;
4623
4624 if (!add_prefix (prefix))
4625 return 0;
4626 }
4627
4628 /* Set mode64 for an operand. */
4629 if (i.suffix == QWORD_MNEM_SUFFIX
4630 && flag_code == CODE_64BIT
4631 && !i.tm.opcode_modifier.norex64)
4632 {
4633 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4634 need rex64. cmpxchg8b is also a special case. */
4635 if (! (i.operands == 2
4636 && i.tm.base_opcode == 0x90
4637 && i.tm.extension_opcode == None
4638 && operand_type_equal (&i.types [0], &acc64)
4639 && operand_type_equal (&i.types [1], &acc64))
4640 && ! (i.operands == 1
4641 && i.tm.base_opcode == 0xfc7
4642 && i.tm.extension_opcode == 1
4643 && !operand_type_check (i.types [0], reg)
4644 && operand_type_check (i.types [0], anymem)))
4645 i.rex |= REX_W;
4646 }
4647
4648 /* Size floating point instruction. */
4649 if (i.suffix == LONG_MNEM_SUFFIX)
4650 if (i.tm.opcode_modifier.floatmf)
4651 i.tm.base_opcode ^= 4;
4652 }
4653
4654 return 1;
4655 }
4656
4657 static int
4658 check_byte_reg (void)
4659 {
4660 int op;
4661
4662 for (op = i.operands; --op >= 0;)
4663 {
4664 /* If this is an eight bit register, it's OK. If it's the 16 or
4665 32 bit version of an eight bit register, we will just use the
4666 low portion, and that's OK too. */
4667 if (i.types[op].bitfield.reg8)
4668 continue;
4669
4670 /* crc32 doesn't generate this warning. */
4671 if (i.tm.base_opcode == 0xf20f38f0)
4672 continue;
4673
4674 if ((i.types[op].bitfield.reg16
4675 || i.types[op].bitfield.reg32
4676 || i.types[op].bitfield.reg64)
4677 && i.op[op].regs->reg_num < 4)
4678 {
4679 /* Prohibit these changes in the 64bit mode, since the
4680 lowering is more complicated. */
4681 if (flag_code == CODE_64BIT
4682 && !i.tm.operand_types[op].bitfield.inoutportreg)
4683 {
4684 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4685 register_prefix, i.op[op].regs->reg_name,
4686 i.suffix);
4687 return 0;
4688 }
4689 #if REGISTER_WARNINGS
4690 if (!quiet_warnings
4691 && !i.tm.operand_types[op].bitfield.inoutportreg)
4692 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4693 register_prefix,
4694 (i.op[op].regs + (i.types[op].bitfield.reg16
4695 ? REGNAM_AL - REGNAM_AX
4696 : REGNAM_AL - REGNAM_EAX))->reg_name,
4697 register_prefix,
4698 i.op[op].regs->reg_name,
4699 i.suffix);
4700 #endif
4701 continue;
4702 }
4703 /* Any other register is bad. */
4704 if (i.types[op].bitfield.reg16
4705 || i.types[op].bitfield.reg32
4706 || i.types[op].bitfield.reg64
4707 || i.types[op].bitfield.regmmx
4708 || i.types[op].bitfield.regxmm
4709 || i.types[op].bitfield.regymm
4710 || i.types[op].bitfield.sreg2
4711 || i.types[op].bitfield.sreg3
4712 || i.types[op].bitfield.control
4713 || i.types[op].bitfield.debug
4714 || i.types[op].bitfield.test
4715 || i.types[op].bitfield.floatreg
4716 || i.types[op].bitfield.floatacc)
4717 {
4718 as_bad (_("`%s%s' not allowed with `%s%c'"),
4719 register_prefix,
4720 i.op[op].regs->reg_name,
4721 i.tm.name,
4722 i.suffix);
4723 return 0;
4724 }
4725 }
4726 return 1;
4727 }
4728
4729 static int
4730 check_long_reg (void)
4731 {
4732 int op;
4733
4734 for (op = i.operands; --op >= 0;)
4735 /* Reject eight bit registers, except where the template requires
4736 them. (eg. movzb) */
4737 if (i.types[op].bitfield.reg8
4738 && (i.tm.operand_types[op].bitfield.reg16
4739 || i.tm.operand_types[op].bitfield.reg32
4740 || i.tm.operand_types[op].bitfield.acc))
4741 {
4742 as_bad (_("`%s%s' not allowed with `%s%c'"),
4743 register_prefix,
4744 i.op[op].regs->reg_name,
4745 i.tm.name,
4746 i.suffix);
4747 return 0;
4748 }
4749 /* Warn if the e prefix on a general reg is missing. */
4750 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4751 && i.types[op].bitfield.reg16
4752 && (i.tm.operand_types[op].bitfield.reg32
4753 || i.tm.operand_types[op].bitfield.acc))
4754 {
4755 /* Prohibit these changes in the 64bit mode, since the
4756 lowering is more complicated. */
4757 if (flag_code == CODE_64BIT)
4758 {
4759 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4760 register_prefix, i.op[op].regs->reg_name,
4761 i.suffix);
4762 return 0;
4763 }
4764 #if REGISTER_WARNINGS
4765 else
4766 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4767 register_prefix,
4768 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4769 register_prefix,
4770 i.op[op].regs->reg_name,
4771 i.suffix);
4772 #endif
4773 }
4774 /* Warn if the r prefix on a general reg is missing. */
4775 else if (i.types[op].bitfield.reg64
4776 && (i.tm.operand_types[op].bitfield.reg32
4777 || i.tm.operand_types[op].bitfield.acc))
4778 {
4779 if (intel_syntax
4780 && i.tm.opcode_modifier.toqword
4781 && !i.types[0].bitfield.regxmm)
4782 {
4783 /* Convert to QWORD. We want REX byte. */
4784 i.suffix = QWORD_MNEM_SUFFIX;
4785 }
4786 else
4787 {
4788 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4789 register_prefix, i.op[op].regs->reg_name,
4790 i.suffix);
4791 return 0;
4792 }
4793 }
4794 return 1;
4795 }
4796
4797 static int
4798 check_qword_reg (void)
4799 {
4800 int op;
4801
4802 for (op = i.operands; --op >= 0; )
4803 /* Reject eight bit registers, except where the template requires
4804 them. (eg. movzb) */
4805 if (i.types[op].bitfield.reg8
4806 && (i.tm.operand_types[op].bitfield.reg16
4807 || i.tm.operand_types[op].bitfield.reg32
4808 || i.tm.operand_types[op].bitfield.acc))
4809 {
4810 as_bad (_("`%s%s' not allowed with `%s%c'"),
4811 register_prefix,
4812 i.op[op].regs->reg_name,
4813 i.tm.name,
4814 i.suffix);
4815 return 0;
4816 }
4817 /* Warn if the e prefix on a general reg is missing. */
4818 else if ((i.types[op].bitfield.reg16
4819 || i.types[op].bitfield.reg32)
4820 && (i.tm.operand_types[op].bitfield.reg32
4821 || i.tm.operand_types[op].bitfield.acc))
4822 {
4823 /* Prohibit these changes in the 64bit mode, since the
4824 lowering is more complicated. */
4825 if (intel_syntax
4826 && i.tm.opcode_modifier.todword
4827 && !i.types[0].bitfield.regxmm)
4828 {
4829 /* Convert to DWORD. We don't want REX byte. */
4830 i.suffix = LONG_MNEM_SUFFIX;
4831 }
4832 else
4833 {
4834 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4835 register_prefix, i.op[op].regs->reg_name,
4836 i.suffix);
4837 return 0;
4838 }
4839 }
4840 return 1;
4841 }
4842
4843 static int
4844 check_word_reg (void)
4845 {
4846 int op;
4847 for (op = i.operands; --op >= 0;)
4848 /* Reject eight bit registers, except where the template requires
4849 them. (eg. movzb) */
4850 if (i.types[op].bitfield.reg8
4851 && (i.tm.operand_types[op].bitfield.reg16
4852 || i.tm.operand_types[op].bitfield.reg32
4853 || i.tm.operand_types[op].bitfield.acc))
4854 {
4855 as_bad (_("`%s%s' not allowed with `%s%c'"),
4856 register_prefix,
4857 i.op[op].regs->reg_name,
4858 i.tm.name,
4859 i.suffix);
4860 return 0;
4861 }
4862 /* Warn if the e prefix on a general reg is present. */
4863 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4864 && i.types[op].bitfield.reg32
4865 && (i.tm.operand_types[op].bitfield.reg16
4866 || i.tm.operand_types[op].bitfield.acc))
4867 {
4868 /* Prohibit these changes in the 64bit mode, since the
4869 lowering is more complicated. */
4870 if (flag_code == CODE_64BIT)
4871 {
4872 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4873 register_prefix, i.op[op].regs->reg_name,
4874 i.suffix);
4875 return 0;
4876 }
4877 else
4878 #if REGISTER_WARNINGS
4879 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4880 register_prefix,
4881 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4882 register_prefix,
4883 i.op[op].regs->reg_name,
4884 i.suffix);
4885 #endif
4886 }
4887 return 1;
4888 }
4889
4890 static int
4891 update_imm (unsigned int j)
4892 {
4893 i386_operand_type overlap = i.types[j];
4894 if ((overlap.bitfield.imm8
4895 || overlap.bitfield.imm8s
4896 || overlap.bitfield.imm16
4897 || overlap.bitfield.imm32
4898 || overlap.bitfield.imm32s
4899 || overlap.bitfield.imm64)
4900 && !operand_type_equal (&overlap, &imm8)
4901 && !operand_type_equal (&overlap, &imm8s)
4902 && !operand_type_equal (&overlap, &imm16)
4903 && !operand_type_equal (&overlap, &imm32)
4904 && !operand_type_equal (&overlap, &imm32s)
4905 && !operand_type_equal (&overlap, &imm64))
4906 {
4907 if (i.suffix)
4908 {
4909 i386_operand_type temp;
4910
4911 operand_type_set (&temp, 0);
4912 if (i.suffix == BYTE_MNEM_SUFFIX)
4913 {
4914 temp.bitfield.imm8 = overlap.bitfield.imm8;
4915 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4916 }
4917 else if (i.suffix == WORD_MNEM_SUFFIX)
4918 temp.bitfield.imm16 = overlap.bitfield.imm16;
4919 else if (i.suffix == QWORD_MNEM_SUFFIX)
4920 {
4921 temp.bitfield.imm64 = overlap.bitfield.imm64;
4922 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4923 }
4924 else
4925 temp.bitfield.imm32 = overlap.bitfield.imm32;
4926 overlap = temp;
4927 }
4928 else if (operand_type_equal (&overlap, &imm16_32_32s)
4929 || operand_type_equal (&overlap, &imm16_32)
4930 || operand_type_equal (&overlap, &imm16_32s))
4931 {
4932 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4933 overlap = imm16;
4934 else
4935 overlap = imm32s;
4936 }
4937 if (!operand_type_equal (&overlap, &imm8)
4938 && !operand_type_equal (&overlap, &imm8s)
4939 && !operand_type_equal (&overlap, &imm16)
4940 && !operand_type_equal (&overlap, &imm32)
4941 && !operand_type_equal (&overlap, &imm32s)
4942 && !operand_type_equal (&overlap, &imm64))
4943 {
4944 as_bad (_("no instruction mnemonic suffix given; "
4945 "can't determine immediate size"));
4946 return 0;
4947 }
4948 }
4949 i.types[j] = overlap;
4950
4951 return 1;
4952 }
4953
4954 static int
4955 finalize_imm (void)
4956 {
4957 unsigned int j, n;
4958
4959 /* Update the first 2 immediate operands. */
4960 n = i.operands > 2 ? 2 : i.operands;
4961 if (n)
4962 {
4963 for (j = 0; j < n; j++)
4964 if (update_imm (j) == 0)
4965 return 0;
4966
4967 /* The 3rd operand can't be immediate operand. */
4968 gas_assert (operand_type_check (i.types[2], imm) == 0);
4969 }
4970
4971 return 1;
4972 }
4973
4974 static int
4975 bad_implicit_operand (int xmm)
4976 {
4977 const char *ireg = xmm ? "xmm0" : "ymm0";
4978
4979 if (intel_syntax)
4980 as_bad (_("the last operand of `%s' must be `%s%s'"),
4981 i.tm.name, register_prefix, ireg);
4982 else
4983 as_bad (_("the first operand of `%s' must be `%s%s'"),
4984 i.tm.name, register_prefix, ireg);
4985 return 0;
4986 }
4987
4988 static int
4989 process_operands (void)
4990 {
4991 /* Default segment register this instruction will use for memory
4992 accesses. 0 means unknown. This is only for optimizing out
4993 unnecessary segment overrides. */
4994 const seg_entry *default_seg = 0;
4995
4996 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4997 {
4998 unsigned int dupl = i.operands;
4999 unsigned int dest = dupl - 1;
5000 unsigned int j;
5001
5002 /* The destination must be an xmm register. */
5003 gas_assert (i.reg_operands
5004 && MAX_OPERANDS > dupl
5005 && operand_type_equal (&i.types[dest], &regxmm));
5006
5007 if (i.tm.opcode_modifier.firstxmm0)
5008 {
5009 /* The first operand is implicit and must be xmm0. */
5010 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5011 if (i.op[0].regs->reg_num != 0)
5012 return bad_implicit_operand (1);
5013
5014 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5015 {
5016 /* Keep xmm0 for instructions with VEX prefix and 3
5017 sources. */
5018 goto duplicate;
5019 }
5020 else
5021 {
5022 /* We remove the first xmm0 and keep the number of
5023 operands unchanged, which in fact duplicates the
5024 destination. */
5025 for (j = 1; j < i.operands; j++)
5026 {
5027 i.op[j - 1] = i.op[j];
5028 i.types[j - 1] = i.types[j];
5029 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5030 }
5031 }
5032 }
5033 else if (i.tm.opcode_modifier.implicit1stxmm0)
5034 {
5035 gas_assert ((MAX_OPERANDS - 1) > dupl
5036 && (i.tm.opcode_modifier.vexsources
5037 == VEX3SOURCES));
5038
5039 /* Add the implicit xmm0 for instructions with VEX prefix
5040 and 3 sources. */
5041 for (j = i.operands; j > 0; j--)
5042 {
5043 i.op[j] = i.op[j - 1];
5044 i.types[j] = i.types[j - 1];
5045 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5046 }
5047 i.op[0].regs
5048 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5049 i.types[0] = regxmm;
5050 i.tm.operand_types[0] = regxmm;
5051
5052 i.operands += 2;
5053 i.reg_operands += 2;
5054 i.tm.operands += 2;
5055
5056 dupl++;
5057 dest++;
5058 i.op[dupl] = i.op[dest];
5059 i.types[dupl] = i.types[dest];
5060 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5061 }
5062 else
5063 {
5064 duplicate:
5065 i.operands++;
5066 i.reg_operands++;
5067 i.tm.operands++;
5068
5069 i.op[dupl] = i.op[dest];
5070 i.types[dupl] = i.types[dest];
5071 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5072 }
5073
5074 if (i.tm.opcode_modifier.immext)
5075 process_immext ();
5076 }
5077 else if (i.tm.opcode_modifier.firstxmm0)
5078 {
5079 unsigned int j;
5080
5081 /* The first operand is implicit and must be xmm0/ymm0. */
5082 gas_assert (i.reg_operands
5083 && (operand_type_equal (&i.types[0], &regxmm)
5084 || operand_type_equal (&i.types[0], &regymm)));
5085 if (i.op[0].regs->reg_num != 0)
5086 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5087
5088 for (j = 1; j < i.operands; j++)
5089 {
5090 i.op[j - 1] = i.op[j];
5091 i.types[j - 1] = i.types[j];
5092
5093 /* We need to adjust fields in i.tm since they are used by
5094 build_modrm_byte. */
5095 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5096 }
5097
5098 i.operands--;
5099 i.reg_operands--;
5100 i.tm.operands--;
5101 }
5102 else if (i.tm.opcode_modifier.regkludge)
5103 {
5104 /* The imul $imm, %reg instruction is converted into
5105 imul $imm, %reg, %reg, and the clr %reg instruction
5106 is converted into xor %reg, %reg. */
5107
5108 unsigned int first_reg_op;
5109
5110 if (operand_type_check (i.types[0], reg))
5111 first_reg_op = 0;
5112 else
5113 first_reg_op = 1;
5114 /* Pretend we saw the extra register operand. */
5115 gas_assert (i.reg_operands == 1
5116 && i.op[first_reg_op + 1].regs == 0);
5117 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5118 i.types[first_reg_op + 1] = i.types[first_reg_op];
5119 i.operands++;
5120 i.reg_operands++;
5121 }
5122
5123 if (i.tm.opcode_modifier.shortform)
5124 {
5125 if (i.types[0].bitfield.sreg2
5126 || i.types[0].bitfield.sreg3)
5127 {
5128 if (i.tm.base_opcode == POP_SEG_SHORT
5129 && i.op[0].regs->reg_num == 1)
5130 {
5131 as_bad (_("you can't `pop %scs'"), register_prefix);
5132 return 0;
5133 }
5134 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5135 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5136 i.rex |= REX_B;
5137 }
5138 else
5139 {
5140 /* The register or float register operand is in operand
5141 0 or 1. */
5142 unsigned int op;
5143
5144 if (i.types[0].bitfield.floatreg
5145 || operand_type_check (i.types[0], reg))
5146 op = 0;
5147 else
5148 op = 1;
5149 /* Register goes in low 3 bits of opcode. */
5150 i.tm.base_opcode |= i.op[op].regs->reg_num;
5151 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5152 i.rex |= REX_B;
5153 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5154 {
5155 /* Warn about some common errors, but press on regardless.
5156 The first case can be generated by gcc (<= 2.8.1). */
5157 if (i.operands == 2)
5158 {
5159 /* Reversed arguments on faddp, fsubp, etc. */
5160 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5161 register_prefix, i.op[!intel_syntax].regs->reg_name,
5162 register_prefix, i.op[intel_syntax].regs->reg_name);
5163 }
5164 else
5165 {
5166 /* Extraneous `l' suffix on fp insn. */
5167 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5168 register_prefix, i.op[0].regs->reg_name);
5169 }
5170 }
5171 }
5172 }
5173 else if (i.tm.opcode_modifier.modrm)
5174 {
5175 /* The opcode is completed (modulo i.tm.extension_opcode which
5176 must be put into the modrm byte). Now, we make the modrm and
5177 index base bytes based on all the info we've collected. */
5178
5179 default_seg = build_modrm_byte ();
5180 }
5181 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5182 {
5183 default_seg = &ds;
5184 }
5185 else if (i.tm.opcode_modifier.isstring)
5186 {
5187 /* For the string instructions that allow a segment override
5188 on one of their operands, the default segment is ds. */
5189 default_seg = &ds;
5190 }
5191
5192 if (i.tm.base_opcode == 0x8d /* lea */
5193 && i.seg[0]
5194 && !quiet_warnings)
5195 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5196
5197 /* If a segment was explicitly specified, and the specified segment
5198 is not the default, use an opcode prefix to select it. If we
5199 never figured out what the default segment is, then default_seg
5200 will be zero at this point, and the specified segment prefix will
5201 always be used. */
5202 if ((i.seg[0]) && (i.seg[0] != default_seg))
5203 {
5204 if (!add_prefix (i.seg[0]->seg_prefix))
5205 return 0;
5206 }
5207 return 1;
5208 }
5209
5210 static const seg_entry *
5211 build_modrm_byte (void)
5212 {
5213 const seg_entry *default_seg = 0;
5214 unsigned int source, dest;
5215 int vex_3_sources;
5216
5217 /* The first operand of instructions with VEX prefix and 3 sources
5218 must be VEX_Imm4. */
5219 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5220 if (vex_3_sources)
5221 {
5222 unsigned int nds, reg_slot;
5223 expressionS *exp;
5224
5225 if (i.tm.opcode_modifier.veximmext
5226 && i.tm.opcode_modifier.immext)
5227 {
5228 dest = i.operands - 2;
5229 gas_assert (dest == 3);
5230 }
5231 else
5232 dest = i.operands - 1;
5233 nds = dest - 1;
5234
5235 /* There are 2 kinds of instructions:
5236 1. 5 operands: 4 register operands or 3 register operands
5237 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5238 VexW0 or VexW1. The destination must be either XMM or YMM
5239 register.
5240 2. 4 operands: 4 register operands or 3 register operands
5241 plus 1 memory operand, VexXDS, and VexImmExt */
5242 gas_assert ((i.reg_operands == 4
5243 || (i.reg_operands == 3 && i.mem_operands == 1))
5244 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5245 && (i.tm.opcode_modifier.veximmext
5246 || (i.imm_operands == 1
5247 && i.types[0].bitfield.vec_imm4
5248 && (i.tm.opcode_modifier.vexw == VEXW0
5249 || i.tm.opcode_modifier.vexw == VEXW1)
5250 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5251 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5252
5253 if (i.imm_operands == 0)
5254 {
5255 /* When there is no immediate operand, generate an 8bit
5256 immediate operand to encode the first operand. */
5257 exp = &im_expressions[i.imm_operands++];
5258 i.op[i.operands].imms = exp;
5259 i.types[i.operands] = imm8;
5260 i.operands++;
5261 /* If VexW1 is set, the first operand is the source and
5262 the second operand is encoded in the immediate operand. */
5263 if (i.tm.opcode_modifier.vexw == VEXW1)
5264 {
5265 source = 0;
5266 reg_slot = 1;
5267 }
5268 else
5269 {
5270 source = 1;
5271 reg_slot = 0;
5272 }
5273
5274 /* FMA swaps REG and NDS. */
5275 if (i.tm.cpu_flags.bitfield.cpufma)
5276 {
5277 unsigned int tmp;
5278 tmp = reg_slot;
5279 reg_slot = nds;
5280 nds = tmp;
5281 }
5282
5283 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5284 &regxmm)
5285 || operand_type_equal (&i.tm.operand_types[reg_slot],
5286 &regymm));
5287 exp->X_op = O_constant;
5288 exp->X_add_number
5289 = ((i.op[reg_slot].regs->reg_num
5290 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5291 << 4);
5292 }
5293 else
5294 {
5295 unsigned int imm_slot;
5296
5297 if (i.tm.opcode_modifier.vexw == VEXW0)
5298 {
5299 /* If VexW0 is set, the third operand is the source and
5300 the second operand is encoded in the immediate
5301 operand. */
5302 source = 2;
5303 reg_slot = 1;
5304 }
5305 else
5306 {
5307 /* VexW1 is set, the second operand is the source and
5308 the third operand is encoded in the immediate
5309 operand. */
5310 source = 1;
5311 reg_slot = 2;
5312 }
5313
5314 if (i.tm.opcode_modifier.immext)
5315 {
5316 /* When ImmExt is set, the immdiate byte is the last
5317 operand. */
5318 imm_slot = i.operands - 1;
5319 source--;
5320 reg_slot--;
5321 }
5322 else
5323 {
5324 imm_slot = 0;
5325
5326 /* Turn on Imm8 so that output_imm will generate it. */
5327 i.types[imm_slot].bitfield.imm8 = 1;
5328 }
5329
5330 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5331 &regxmm)
5332 || operand_type_equal (&i.tm.operand_types[reg_slot],
5333 &regymm));
5334 i.op[imm_slot].imms->X_add_number
5335 |= ((i.op[reg_slot].regs->reg_num
5336 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5337 << 4);
5338 }
5339
5340 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5341 || operand_type_equal (&i.tm.operand_types[nds],
5342 &regymm));
5343 i.vex.register_specifier = i.op[nds].regs;
5344 }
5345 else
5346 source = dest = 0;
5347
5348 /* i.reg_operands MUST be the number of real register operands;
5349 implicit registers do not count. If there are 3 register
5350 operands, it must be a instruction with VexNDS. For a
5351 instruction with VexNDD, the destination register is encoded
5352 in VEX prefix. If there are 4 register operands, it must be
5353 a instruction with VEX prefix and 3 sources. */
5354 if (i.mem_operands == 0
5355 && ((i.reg_operands == 2
5356 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5357 || (i.reg_operands == 3
5358 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5359 || (i.reg_operands == 4 && vex_3_sources)))
5360 {
5361 switch (i.operands)
5362 {
5363 case 2:
5364 source = 0;
5365 break;
5366 case 3:
5367 /* When there are 3 operands, one of them may be immediate,
5368 which may be the first or the last operand. Otherwise,
5369 the first operand must be shift count register (cl) or it
5370 is an instruction with VexNDS. */
5371 gas_assert (i.imm_operands == 1
5372 || (i.imm_operands == 0
5373 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5374 || i.types[0].bitfield.shiftcount)));
5375 if (operand_type_check (i.types[0], imm)
5376 || i.types[0].bitfield.shiftcount)
5377 source = 1;
5378 else
5379 source = 0;
5380 break;
5381 case 4:
5382 /* When there are 4 operands, the first two must be 8bit
5383 immediate operands. The source operand will be the 3rd
5384 one.
5385
5386 For instructions with VexNDS, if the first operand
5387 an imm8, the source operand is the 2nd one. If the last
5388 operand is imm8, the source operand is the first one. */
5389 gas_assert ((i.imm_operands == 2
5390 && i.types[0].bitfield.imm8
5391 && i.types[1].bitfield.imm8)
5392 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5393 && i.imm_operands == 1
5394 && (i.types[0].bitfield.imm8
5395 || i.types[i.operands - 1].bitfield.imm8)));
5396 if (i.imm_operands == 2)
5397 source = 2;
5398 else
5399 {
5400 if (i.types[0].bitfield.imm8)
5401 source = 1;
5402 else
5403 source = 0;
5404 }
5405 break;
5406 case 5:
5407 break;
5408 default:
5409 abort ();
5410 }
5411
5412 if (!vex_3_sources)
5413 {
5414 dest = source + 1;
5415
5416 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5417 {
5418 /* For instructions with VexNDS, the register-only
5419 source operand must be 32/64bit integer, XMM or
5420 YMM register. It is encoded in VEX prefix. We
5421 need to clear RegMem bit before calling
5422 operand_type_equal. */
5423
5424 i386_operand_type op;
5425 unsigned int vvvv;
5426
5427 /* Check register-only source operand when two source
5428 operands are swapped. */
5429 if (!i.tm.operand_types[source].bitfield.baseindex
5430 && i.tm.operand_types[dest].bitfield.baseindex)
5431 {
5432 vvvv = source;
5433 source = dest;
5434 }
5435 else
5436 vvvv = dest;
5437
5438 op = i.tm.operand_types[vvvv];
5439 op.bitfield.regmem = 0;
5440 if ((dest + 1) >= i.operands
5441 || (op.bitfield.reg32 != 1
5442 && !op.bitfield.reg64 != 1
5443 && !operand_type_equal (&op, &regxmm)
5444 && !operand_type_equal (&op, &regymm)))
5445 abort ();
5446 i.vex.register_specifier = i.op[vvvv].regs;
5447 dest++;
5448 }
5449 }
5450
5451 i.rm.mode = 3;
5452 /* One of the register operands will be encoded in the i.tm.reg
5453 field, the other in the combined i.tm.mode and i.tm.regmem
5454 fields. If no form of this instruction supports a memory
5455 destination operand, then we assume the source operand may
5456 sometimes be a memory operand and so we need to store the
5457 destination in the i.rm.reg field. */
5458 if (!i.tm.operand_types[dest].bitfield.regmem
5459 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5460 {
5461 i.rm.reg = i.op[dest].regs->reg_num;
5462 i.rm.regmem = i.op[source].regs->reg_num;
5463 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5464 i.rex |= REX_R;
5465 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5466 i.rex |= REX_B;
5467 }
5468 else
5469 {
5470 i.rm.reg = i.op[source].regs->reg_num;
5471 i.rm.regmem = i.op[dest].regs->reg_num;
5472 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5473 i.rex |= REX_B;
5474 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5475 i.rex |= REX_R;
5476 }
5477 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5478 {
5479 if (!i.types[0].bitfield.control
5480 && !i.types[1].bitfield.control)
5481 abort ();
5482 i.rex &= ~(REX_R | REX_B);
5483 add_prefix (LOCK_PREFIX_OPCODE);
5484 }
5485 }
5486 else
5487 { /* If it's not 2 reg operands... */
5488 unsigned int mem;
5489
5490 if (i.mem_operands)
5491 {
5492 unsigned int fake_zero_displacement = 0;
5493 unsigned int op;
5494
5495 for (op = 0; op < i.operands; op++)
5496 if (operand_type_check (i.types[op], anymem))
5497 break;
5498 gas_assert (op < i.operands);
5499
5500 if (i.tm.opcode_modifier.vecsib)
5501 {
5502 if (i.index_reg->reg_num == RegEiz
5503 || i.index_reg->reg_num == RegRiz)
5504 abort ();
5505
5506 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5507 if (!i.base_reg)
5508 {
5509 i.sib.base = NO_BASE_REGISTER;
5510 i.sib.scale = i.log2_scale_factor;
5511 i.types[op].bitfield.disp8 = 0;
5512 i.types[op].bitfield.disp16 = 0;
5513 i.types[op].bitfield.disp64 = 0;
5514 if (flag_code != CODE_64BIT)
5515 {
5516 /* Must be 32 bit */
5517 i.types[op].bitfield.disp32 = 1;
5518 i.types[op].bitfield.disp32s = 0;
5519 }
5520 else
5521 {
5522 i.types[op].bitfield.disp32 = 0;
5523 i.types[op].bitfield.disp32s = 1;
5524 }
5525 }
5526 i.sib.index = i.index_reg->reg_num;
5527 if ((i.index_reg->reg_flags & RegRex) != 0)
5528 i.rex |= REX_X;
5529 }
5530
5531 default_seg = &ds;
5532
5533 if (i.base_reg == 0)
5534 {
5535 i.rm.mode = 0;
5536 if (!i.disp_operands)
5537 {
5538 fake_zero_displacement = 1;
5539 /* Instructions with VSIB byte need 32bit displacement
5540 if there is no base register. */
5541 if (i.tm.opcode_modifier.vecsib)
5542 i.types[op].bitfield.disp32 = 1;
5543 }
5544 if (i.index_reg == 0)
5545 {
5546 gas_assert (!i.tm.opcode_modifier.vecsib);
5547 /* Operand is just <disp> */
5548 if (flag_code == CODE_64BIT)
5549 {
5550 /* 64bit mode overwrites the 32bit absolute
5551 addressing by RIP relative addressing and
5552 absolute addressing is encoded by one of the
5553 redundant SIB forms. */
5554 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5555 i.sib.base = NO_BASE_REGISTER;
5556 i.sib.index = NO_INDEX_REGISTER;
5557 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5558 ? disp32s : disp32);
5559 }
5560 else if ((flag_code == CODE_16BIT)
5561 ^ (i.prefix[ADDR_PREFIX] != 0))
5562 {
5563 i.rm.regmem = NO_BASE_REGISTER_16;
5564 i.types[op] = disp16;
5565 }
5566 else
5567 {
5568 i.rm.regmem = NO_BASE_REGISTER;
5569 i.types[op] = disp32;
5570 }
5571 }
5572 else if (!i.tm.opcode_modifier.vecsib)
5573 {
5574 /* !i.base_reg && i.index_reg */
5575 if (i.index_reg->reg_num == RegEiz
5576 || i.index_reg->reg_num == RegRiz)
5577 i.sib.index = NO_INDEX_REGISTER;
5578 else
5579 i.sib.index = i.index_reg->reg_num;
5580 i.sib.base = NO_BASE_REGISTER;
5581 i.sib.scale = i.log2_scale_factor;
5582 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5583 i.types[op].bitfield.disp8 = 0;
5584 i.types[op].bitfield.disp16 = 0;
5585 i.types[op].bitfield.disp64 = 0;
5586 if (flag_code != CODE_64BIT)
5587 {
5588 /* Must be 32 bit */
5589 i.types[op].bitfield.disp32 = 1;
5590 i.types[op].bitfield.disp32s = 0;
5591 }
5592 else
5593 {
5594 i.types[op].bitfield.disp32 = 0;
5595 i.types[op].bitfield.disp32s = 1;
5596 }
5597 if ((i.index_reg->reg_flags & RegRex) != 0)
5598 i.rex |= REX_X;
5599 }
5600 }
5601 /* RIP addressing for 64bit mode. */
5602 else if (i.base_reg->reg_num == RegRip ||
5603 i.base_reg->reg_num == RegEip)
5604 {
5605 gas_assert (!i.tm.opcode_modifier.vecsib);
5606 i.rm.regmem = NO_BASE_REGISTER;
5607 i.types[op].bitfield.disp8 = 0;
5608 i.types[op].bitfield.disp16 = 0;
5609 i.types[op].bitfield.disp32 = 0;
5610 i.types[op].bitfield.disp32s = 1;
5611 i.types[op].bitfield.disp64 = 0;
5612 i.flags[op] |= Operand_PCrel;
5613 if (! i.disp_operands)
5614 fake_zero_displacement = 1;
5615 }
5616 else if (i.base_reg->reg_type.bitfield.reg16)
5617 {
5618 gas_assert (!i.tm.opcode_modifier.vecsib);
5619 switch (i.base_reg->reg_num)
5620 {
5621 case 3: /* (%bx) */
5622 if (i.index_reg == 0)
5623 i.rm.regmem = 7;
5624 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5625 i.rm.regmem = i.index_reg->reg_num - 6;
5626 break;
5627 case 5: /* (%bp) */
5628 default_seg = &ss;
5629 if (i.index_reg == 0)
5630 {
5631 i.rm.regmem = 6;
5632 if (operand_type_check (i.types[op], disp) == 0)
5633 {
5634 /* fake (%bp) into 0(%bp) */
5635 i.types[op].bitfield.disp8 = 1;
5636 fake_zero_displacement = 1;
5637 }
5638 }
5639 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5640 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5641 break;
5642 default: /* (%si) -> 4 or (%di) -> 5 */
5643 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5644 }
5645 i.rm.mode = mode_from_disp_size (i.types[op]);
5646 }
5647 else /* i.base_reg and 32/64 bit mode */
5648 {
5649 if (flag_code == CODE_64BIT
5650 && operand_type_check (i.types[op], disp))
5651 {
5652 i386_operand_type temp;
5653 operand_type_set (&temp, 0);
5654 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5655 i.types[op] = temp;
5656 if (i.prefix[ADDR_PREFIX] == 0)
5657 i.types[op].bitfield.disp32s = 1;
5658 else
5659 i.types[op].bitfield.disp32 = 1;
5660 }
5661
5662 if (!i.tm.opcode_modifier.vecsib)
5663 i.rm.regmem = i.base_reg->reg_num;
5664 if ((i.base_reg->reg_flags & RegRex) != 0)
5665 i.rex |= REX_B;
5666 i.sib.base = i.base_reg->reg_num;
5667 /* x86-64 ignores REX prefix bit here to avoid decoder
5668 complications. */
5669 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5670 {
5671 default_seg = &ss;
5672 if (i.disp_operands == 0)
5673 {
5674 fake_zero_displacement = 1;
5675 i.types[op].bitfield.disp8 = 1;
5676 }
5677 }
5678 else if (i.base_reg->reg_num == ESP_REG_NUM)
5679 {
5680 default_seg = &ss;
5681 }
5682 i.sib.scale = i.log2_scale_factor;
5683 if (i.index_reg == 0)
5684 {
5685 gas_assert (!i.tm.opcode_modifier.vecsib);
5686 /* <disp>(%esp) becomes two byte modrm with no index
5687 register. We've already stored the code for esp
5688 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5689 Any base register besides %esp will not use the
5690 extra modrm byte. */
5691 i.sib.index = NO_INDEX_REGISTER;
5692 }
5693 else if (!i.tm.opcode_modifier.vecsib)
5694 {
5695 if (i.index_reg->reg_num == RegEiz
5696 || i.index_reg->reg_num == RegRiz)
5697 i.sib.index = NO_INDEX_REGISTER;
5698 else
5699 i.sib.index = i.index_reg->reg_num;
5700 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5701 if ((i.index_reg->reg_flags & RegRex) != 0)
5702 i.rex |= REX_X;
5703 }
5704
5705 if (i.disp_operands
5706 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5707 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5708 i.rm.mode = 0;
5709 else
5710 {
5711 if (!fake_zero_displacement
5712 && !i.disp_operands
5713 && i.disp_encoding)
5714 {
5715 fake_zero_displacement = 1;
5716 if (i.disp_encoding == disp_encoding_8bit)
5717 i.types[op].bitfield.disp8 = 1;
5718 else
5719 i.types[op].bitfield.disp32 = 1;
5720 }
5721 i.rm.mode = mode_from_disp_size (i.types[op]);
5722 }
5723 }
5724
5725 if (fake_zero_displacement)
5726 {
5727 /* Fakes a zero displacement assuming that i.types[op]
5728 holds the correct displacement size. */
5729 expressionS *exp;
5730
5731 gas_assert (i.op[op].disps == 0);
5732 exp = &disp_expressions[i.disp_operands++];
5733 i.op[op].disps = exp;
5734 exp->X_op = O_constant;
5735 exp->X_add_number = 0;
5736 exp->X_add_symbol = (symbolS *) 0;
5737 exp->X_op_symbol = (symbolS *) 0;
5738 }
5739
5740 mem = op;
5741 }
5742 else
5743 mem = ~0;
5744
5745 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5746 {
5747 if (operand_type_check (i.types[0], imm))
5748 i.vex.register_specifier = NULL;
5749 else
5750 {
5751 /* VEX.vvvv encodes one of the sources when the first
5752 operand is not an immediate. */
5753 if (i.tm.opcode_modifier.vexw == VEXW0)
5754 i.vex.register_specifier = i.op[0].regs;
5755 else
5756 i.vex.register_specifier = i.op[1].regs;
5757 }
5758
5759 /* Destination is a XMM register encoded in the ModRM.reg
5760 and VEX.R bit. */
5761 i.rm.reg = i.op[2].regs->reg_num;
5762 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5763 i.rex |= REX_R;
5764
5765 /* ModRM.rm and VEX.B encodes the other source. */
5766 if (!i.mem_operands)
5767 {
5768 i.rm.mode = 3;
5769
5770 if (i.tm.opcode_modifier.vexw == VEXW0)
5771 i.rm.regmem = i.op[1].regs->reg_num;
5772 else
5773 i.rm.regmem = i.op[0].regs->reg_num;
5774
5775 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5776 i.rex |= REX_B;
5777 }
5778 }
5779 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5780 {
5781 i.vex.register_specifier = i.op[2].regs;
5782 if (!i.mem_operands)
5783 {
5784 i.rm.mode = 3;
5785 i.rm.regmem = i.op[1].regs->reg_num;
5786 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5787 i.rex |= REX_B;
5788 }
5789 }
5790 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5791 (if any) based on i.tm.extension_opcode. Again, we must be
5792 careful to make sure that segment/control/debug/test/MMX
5793 registers are coded into the i.rm.reg field. */
5794 else if (i.reg_operands)
5795 {
5796 unsigned int op;
5797 unsigned int vex_reg = ~0;
5798
5799 for (op = 0; op < i.operands; op++)
5800 if (i.types[op].bitfield.reg8
5801 || i.types[op].bitfield.reg16
5802 || i.types[op].bitfield.reg32
5803 || i.types[op].bitfield.reg64
5804 || i.types[op].bitfield.regmmx
5805 || i.types[op].bitfield.regxmm
5806 || i.types[op].bitfield.regymm
5807 || i.types[op].bitfield.sreg2
5808 || i.types[op].bitfield.sreg3
5809 || i.types[op].bitfield.control
5810 || i.types[op].bitfield.debug
5811 || i.types[op].bitfield.test)
5812 break;
5813
5814 if (vex_3_sources)
5815 op = dest;
5816 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5817 {
5818 /* For instructions with VexNDS, the register-only
5819 source operand is encoded in VEX prefix. */
5820 gas_assert (mem != (unsigned int) ~0);
5821
5822 if (op > mem)
5823 {
5824 vex_reg = op++;
5825 gas_assert (op < i.operands);
5826 }
5827 else
5828 {
5829 /* Check register-only source operand when two source
5830 operands are swapped. */
5831 if (!i.tm.operand_types[op].bitfield.baseindex
5832 && i.tm.operand_types[op + 1].bitfield.baseindex)
5833 {
5834 vex_reg = op;
5835 op += 2;
5836 gas_assert (mem == (vex_reg + 1)
5837 && op < i.operands);
5838 }
5839 else
5840 {
5841 vex_reg = op + 1;
5842 gas_assert (vex_reg < i.operands);
5843 }
5844 }
5845 }
5846 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5847 {
5848 /* For instructions with VexNDD, the register destination
5849 is encoded in VEX prefix. */
5850 if (i.mem_operands == 0)
5851 {
5852 /* There is no memory operand. */
5853 gas_assert ((op + 2) == i.operands);
5854 vex_reg = op + 1;
5855 }
5856 else
5857 {
5858 /* There are only 2 operands. */
5859 gas_assert (op < 2 && i.operands == 2);
5860 vex_reg = 1;
5861 }
5862 }
5863 else
5864 gas_assert (op < i.operands);
5865
5866 if (vex_reg != (unsigned int) ~0)
5867 {
5868 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5869
5870 if (type->bitfield.reg32 != 1
5871 && type->bitfield.reg64 != 1
5872 && !operand_type_equal (type, &regxmm)
5873 && !operand_type_equal (type, &regymm))
5874 abort ();
5875
5876 i.vex.register_specifier = i.op[vex_reg].regs;
5877 }
5878
5879 /* Don't set OP operand twice. */
5880 if (vex_reg != op)
5881 {
5882 /* If there is an extension opcode to put here, the
5883 register number must be put into the regmem field. */
5884 if (i.tm.extension_opcode != None)
5885 {
5886 i.rm.regmem = i.op[op].regs->reg_num;
5887 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5888 i.rex |= REX_B;
5889 }
5890 else
5891 {
5892 i.rm.reg = i.op[op].regs->reg_num;
5893 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5894 i.rex |= REX_R;
5895 }
5896 }
5897
5898 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5899 must set it to 3 to indicate this is a register operand
5900 in the regmem field. */
5901 if (!i.mem_operands)
5902 i.rm.mode = 3;
5903 }
5904
5905 /* Fill in i.rm.reg field with extension opcode (if any). */
5906 if (i.tm.extension_opcode != None)
5907 i.rm.reg = i.tm.extension_opcode;
5908 }
5909 return default_seg;
5910 }
5911
5912 static void
5913 output_branch (void)
5914 {
5915 char *p;
5916 int size;
5917 int code16;
5918 int prefix;
5919 relax_substateT subtype;
5920 symbolS *sym;
5921 offsetT off;
5922
5923 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5924 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5925
5926 prefix = 0;
5927 if (i.prefix[DATA_PREFIX] != 0)
5928 {
5929 prefix = 1;
5930 i.prefixes -= 1;
5931 code16 ^= CODE16;
5932 }
5933 /* Pentium4 branch hints. */
5934 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5935 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5936 {
5937 prefix++;
5938 i.prefixes--;
5939 }
5940 if (i.prefix[REX_PREFIX] != 0)
5941 {
5942 prefix++;
5943 i.prefixes--;
5944 }
5945
5946 if (i.prefixes != 0 && !intel_syntax)
5947 as_warn (_("skipping prefixes on this instruction"));
5948
5949 /* It's always a symbol; End frag & setup for relax.
5950 Make sure there is enough room in this frag for the largest
5951 instruction we may generate in md_convert_frag. This is 2
5952 bytes for the opcode and room for the prefix and largest
5953 displacement. */
5954 frag_grow (prefix + 2 + 4);
5955 /* Prefix and 1 opcode byte go in fr_fix. */
5956 p = frag_more (prefix + 1);
5957 if (i.prefix[DATA_PREFIX] != 0)
5958 *p++ = DATA_PREFIX_OPCODE;
5959 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5960 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5961 *p++ = i.prefix[SEG_PREFIX];
5962 if (i.prefix[REX_PREFIX] != 0)
5963 *p++ = i.prefix[REX_PREFIX];
5964 *p = i.tm.base_opcode;
5965
5966 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5967 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5968 else if (cpu_arch_flags.bitfield.cpui386)
5969 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5970 else
5971 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5972 subtype |= code16;
5973
5974 sym = i.op[0].disps->X_add_symbol;
5975 off = i.op[0].disps->X_add_number;
5976
5977 if (i.op[0].disps->X_op != O_constant
5978 && i.op[0].disps->X_op != O_symbol)
5979 {
5980 /* Handle complex expressions. */
5981 sym = make_expr_symbol (i.op[0].disps);
5982 off = 0;
5983 }
5984
5985 /* 1 possible extra opcode + 4 byte displacement go in var part.
5986 Pass reloc in fr_var. */
5987 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5988 }
5989
5990 static void
5991 output_jump (void)
5992 {
5993 char *p;
5994 int size;
5995 fixS *fixP;
5996
5997 if (i.tm.opcode_modifier.jumpbyte)
5998 {
5999 /* This is a loop or jecxz type instruction. */
6000 size = 1;
6001 if (i.prefix[ADDR_PREFIX] != 0)
6002 {
6003 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6004 i.prefixes -= 1;
6005 }
6006 /* Pentium4 branch hints. */
6007 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6008 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6009 {
6010 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6011 i.prefixes--;
6012 }
6013 }
6014 else
6015 {
6016 int code16;
6017
6018 code16 = 0;
6019 if (flag_code == CODE_16BIT)
6020 code16 = CODE16;
6021
6022 if (i.prefix[DATA_PREFIX] != 0)
6023 {
6024 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6025 i.prefixes -= 1;
6026 code16 ^= CODE16;
6027 }
6028
6029 size = 4;
6030 if (code16)
6031 size = 2;
6032 }
6033
6034 if (i.prefix[REX_PREFIX] != 0)
6035 {
6036 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6037 i.prefixes -= 1;
6038 }
6039
6040 if (i.prefixes != 0 && !intel_syntax)
6041 as_warn (_("skipping prefixes on this instruction"));
6042
6043 p = frag_more (1 + size);
6044 *p++ = i.tm.base_opcode;
6045
6046 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6047 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6048
6049 /* All jumps handled here are signed, but don't use a signed limit
6050 check for 32 and 16 bit jumps as we want to allow wrap around at
6051 4G and 64k respectively. */
6052 if (size == 1)
6053 fixP->fx_signed = 1;
6054 }
6055
6056 static void
6057 output_interseg_jump (void)
6058 {
6059 char *p;
6060 int size;
6061 int prefix;
6062 int code16;
6063
6064 code16 = 0;
6065 if (flag_code == CODE_16BIT)
6066 code16 = CODE16;
6067
6068 prefix = 0;
6069 if (i.prefix[DATA_PREFIX] != 0)
6070 {
6071 prefix = 1;
6072 i.prefixes -= 1;
6073 code16 ^= CODE16;
6074 }
6075 if (i.prefix[REX_PREFIX] != 0)
6076 {
6077 prefix++;
6078 i.prefixes -= 1;
6079 }
6080
6081 size = 4;
6082 if (code16)
6083 size = 2;
6084
6085 if (i.prefixes != 0 && !intel_syntax)
6086 as_warn (_("skipping prefixes on this instruction"));
6087
6088 /* 1 opcode; 2 segment; offset */
6089 p = frag_more (prefix + 1 + 2 + size);
6090
6091 if (i.prefix[DATA_PREFIX] != 0)
6092 *p++ = DATA_PREFIX_OPCODE;
6093
6094 if (i.prefix[REX_PREFIX] != 0)
6095 *p++ = i.prefix[REX_PREFIX];
6096
6097 *p++ = i.tm.base_opcode;
6098 if (i.op[1].imms->X_op == O_constant)
6099 {
6100 offsetT n = i.op[1].imms->X_add_number;
6101
6102 if (size == 2
6103 && !fits_in_unsigned_word (n)
6104 && !fits_in_signed_word (n))
6105 {
6106 as_bad (_("16-bit jump out of range"));
6107 return;
6108 }
6109 md_number_to_chars (p, n, size);
6110 }
6111 else
6112 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6113 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6114 if (i.op[0].imms->X_op != O_constant)
6115 as_bad (_("can't handle non absolute segment in `%s'"),
6116 i.tm.name);
6117 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6118 }
6119
6120 static void
6121 output_insn (void)
6122 {
6123 fragS *insn_start_frag;
6124 offsetT insn_start_off;
6125
6126 /* Tie dwarf2 debug info to the address at the start of the insn.
6127 We can't do this after the insn has been output as the current
6128 frag may have been closed off. eg. by frag_var. */
6129 dwarf2_emit_insn (0);
6130
6131 insn_start_frag = frag_now;
6132 insn_start_off = frag_now_fix ();
6133
6134 /* Output jumps. */
6135 if (i.tm.opcode_modifier.jump)
6136 output_branch ();
6137 else if (i.tm.opcode_modifier.jumpbyte
6138 || i.tm.opcode_modifier.jumpdword)
6139 output_jump ();
6140 else if (i.tm.opcode_modifier.jumpintersegment)
6141 output_interseg_jump ();
6142 else
6143 {
6144 /* Output normal instructions here. */
6145 char *p;
6146 unsigned char *q;
6147 unsigned int j;
6148 unsigned int prefix;
6149
6150 /* Since the VEX prefix contains the implicit prefix, we don't
6151 need the explicit prefix. */
6152 if (!i.tm.opcode_modifier.vex)
6153 {
6154 switch (i.tm.opcode_length)
6155 {
6156 case 3:
6157 if (i.tm.base_opcode & 0xff000000)
6158 {
6159 prefix = (i.tm.base_opcode >> 24) & 0xff;
6160 goto check_prefix;
6161 }
6162 break;
6163 case 2:
6164 if ((i.tm.base_opcode & 0xff0000) != 0)
6165 {
6166 prefix = (i.tm.base_opcode >> 16) & 0xff;
6167 if (i.tm.cpu_flags.bitfield.cpupadlock)
6168 {
6169 check_prefix:
6170 if (prefix != REPE_PREFIX_OPCODE
6171 || (i.prefix[REP_PREFIX]
6172 != REPE_PREFIX_OPCODE))
6173 add_prefix (prefix);
6174 }
6175 else
6176 add_prefix (prefix);
6177 }
6178 break;
6179 case 1:
6180 break;
6181 default:
6182 abort ();
6183 }
6184
6185 /* The prefix bytes. */
6186 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6187 if (*q)
6188 FRAG_APPEND_1_CHAR (*q);
6189 }
6190
6191 if (i.tm.opcode_modifier.vex)
6192 {
6193 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6194 if (*q)
6195 switch (j)
6196 {
6197 case REX_PREFIX:
6198 /* REX byte is encoded in VEX prefix. */
6199 break;
6200 case SEG_PREFIX:
6201 case ADDR_PREFIX:
6202 FRAG_APPEND_1_CHAR (*q);
6203 break;
6204 default:
6205 /* There should be no other prefixes for instructions
6206 with VEX prefix. */
6207 abort ();
6208 }
6209
6210 /* Now the VEX prefix. */
6211 p = frag_more (i.vex.length);
6212 for (j = 0; j < i.vex.length; j++)
6213 p[j] = i.vex.bytes[j];
6214 }
6215
6216 /* Now the opcode; be careful about word order here! */
6217 if (i.tm.opcode_length == 1)
6218 {
6219 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6220 }
6221 else
6222 {
6223 switch (i.tm.opcode_length)
6224 {
6225 case 3:
6226 p = frag_more (3);
6227 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6228 break;
6229 case 2:
6230 p = frag_more (2);
6231 break;
6232 default:
6233 abort ();
6234 break;
6235 }
6236
6237 /* Put out high byte first: can't use md_number_to_chars! */
6238 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6239 *p = i.tm.base_opcode & 0xff;
6240 }
6241
6242 /* Now the modrm byte and sib byte (if present). */
6243 if (i.tm.opcode_modifier.modrm)
6244 {
6245 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6246 | i.rm.reg << 3
6247 | i.rm.mode << 6));
6248 /* If i.rm.regmem == ESP (4)
6249 && i.rm.mode != (Register mode)
6250 && not 16 bit
6251 ==> need second modrm byte. */
6252 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6253 && i.rm.mode != 3
6254 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6255 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6256 | i.sib.index << 3
6257 | i.sib.scale << 6));
6258 }
6259
6260 if (i.disp_operands)
6261 output_disp (insn_start_frag, insn_start_off);
6262
6263 if (i.imm_operands)
6264 output_imm (insn_start_frag, insn_start_off);
6265 }
6266
6267 #ifdef DEBUG386
6268 if (flag_debug)
6269 {
6270 pi ("" /*line*/, &i);
6271 }
6272 #endif /* DEBUG386 */
6273 }
6274
6275 /* Return the size of the displacement operand N. */
6276
6277 static int
6278 disp_size (unsigned int n)
6279 {
6280 int size = 4;
6281 if (i.types[n].bitfield.disp64)
6282 size = 8;
6283 else if (i.types[n].bitfield.disp8)
6284 size = 1;
6285 else if (i.types[n].bitfield.disp16)
6286 size = 2;
6287 return size;
6288 }
6289
6290 /* Return the size of the immediate operand N. */
6291
6292 static int
6293 imm_size (unsigned int n)
6294 {
6295 int size = 4;
6296 if (i.types[n].bitfield.imm64)
6297 size = 8;
6298 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6299 size = 1;
6300 else if (i.types[n].bitfield.imm16)
6301 size = 2;
6302 return size;
6303 }
6304
6305 static void
6306 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6307 {
6308 char *p;
6309 unsigned int n;
6310
6311 for (n = 0; n < i.operands; n++)
6312 {
6313 if (operand_type_check (i.types[n], disp))
6314 {
6315 if (i.op[n].disps->X_op == O_constant)
6316 {
6317 int size = disp_size (n);
6318 offsetT val;
6319
6320 val = offset_in_range (i.op[n].disps->X_add_number,
6321 size);
6322 p = frag_more (size);
6323 md_number_to_chars (p, val, size);
6324 }
6325 else
6326 {
6327 enum bfd_reloc_code_real reloc_type;
6328 int size = disp_size (n);
6329 int sign = i.types[n].bitfield.disp32s;
6330 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6331
6332 /* We can't have 8 bit displacement here. */
6333 gas_assert (!i.types[n].bitfield.disp8);
6334
6335 /* The PC relative address is computed relative
6336 to the instruction boundary, so in case immediate
6337 fields follows, we need to adjust the value. */
6338 if (pcrel && i.imm_operands)
6339 {
6340 unsigned int n1;
6341 int sz = 0;
6342
6343 for (n1 = 0; n1 < i.operands; n1++)
6344 if (operand_type_check (i.types[n1], imm))
6345 {
6346 /* Only one immediate is allowed for PC
6347 relative address. */
6348 gas_assert (sz == 0);
6349 sz = imm_size (n1);
6350 i.op[n].disps->X_add_number -= sz;
6351 }
6352 /* We should find the immediate. */
6353 gas_assert (sz != 0);
6354 }
6355
6356 p = frag_more (size);
6357 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6358 if (GOT_symbol
6359 && GOT_symbol == i.op[n].disps->X_add_symbol
6360 && (((reloc_type == BFD_RELOC_32
6361 || reloc_type == BFD_RELOC_X86_64_32S
6362 || (reloc_type == BFD_RELOC_64
6363 && object_64bit))
6364 && (i.op[n].disps->X_op == O_symbol
6365 || (i.op[n].disps->X_op == O_add
6366 && ((symbol_get_value_expression
6367 (i.op[n].disps->X_op_symbol)->X_op)
6368 == O_subtract))))
6369 || reloc_type == BFD_RELOC_32_PCREL))
6370 {
6371 offsetT add;
6372
6373 if (insn_start_frag == frag_now)
6374 add = (p - frag_now->fr_literal) - insn_start_off;
6375 else
6376 {
6377 fragS *fr;
6378
6379 add = insn_start_frag->fr_fix - insn_start_off;
6380 for (fr = insn_start_frag->fr_next;
6381 fr && fr != frag_now; fr = fr->fr_next)
6382 add += fr->fr_fix;
6383 add += p - frag_now->fr_literal;
6384 }
6385
6386 if (!object_64bit)
6387 {
6388 reloc_type = BFD_RELOC_386_GOTPC;
6389 i.op[n].imms->X_add_number += add;
6390 }
6391 else if (reloc_type == BFD_RELOC_64)
6392 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6393 else
6394 /* Don't do the adjustment for x86-64, as there
6395 the pcrel addressing is relative to the _next_
6396 insn, and that is taken care of in other code. */
6397 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6398 }
6399 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6400 i.op[n].disps, pcrel, reloc_type);
6401 }
6402 }
6403 }
6404 }
6405
6406 static void
6407 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6408 {
6409 char *p;
6410 unsigned int n;
6411
6412 for (n = 0; n < i.operands; n++)
6413 {
6414 if (operand_type_check (i.types[n], imm))
6415 {
6416 if (i.op[n].imms->X_op == O_constant)
6417 {
6418 int size = imm_size (n);
6419 offsetT val;
6420
6421 val = offset_in_range (i.op[n].imms->X_add_number,
6422 size);
6423 p = frag_more (size);
6424 md_number_to_chars (p, val, size);
6425 }
6426 else
6427 {
6428 /* Not absolute_section.
6429 Need a 32-bit fixup (don't support 8bit
6430 non-absolute imms). Try to support other
6431 sizes ... */
6432 enum bfd_reloc_code_real reloc_type;
6433 int size = imm_size (n);
6434 int sign;
6435
6436 if (i.types[n].bitfield.imm32s
6437 && (i.suffix == QWORD_MNEM_SUFFIX
6438 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6439 sign = 1;
6440 else
6441 sign = 0;
6442
6443 p = frag_more (size);
6444 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6445
6446 /* This is tough to explain. We end up with this one if we
6447 * have operands that look like
6448 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6449 * obtain the absolute address of the GOT, and it is strongly
6450 * preferable from a performance point of view to avoid using
6451 * a runtime relocation for this. The actual sequence of
6452 * instructions often look something like:
6453 *
6454 * call .L66
6455 * .L66:
6456 * popl %ebx
6457 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6458 *
6459 * The call and pop essentially return the absolute address
6460 * of the label .L66 and store it in %ebx. The linker itself
6461 * will ultimately change the first operand of the addl so
6462 * that %ebx points to the GOT, but to keep things simple, the
6463 * .o file must have this operand set so that it generates not
6464 * the absolute address of .L66, but the absolute address of
6465 * itself. This allows the linker itself simply treat a GOTPC
6466 * relocation as asking for a pcrel offset to the GOT to be
6467 * added in, and the addend of the relocation is stored in the
6468 * operand field for the instruction itself.
6469 *
6470 * Our job here is to fix the operand so that it would add
6471 * the correct offset so that %ebx would point to itself. The
6472 * thing that is tricky is that .-.L66 will point to the
6473 * beginning of the instruction, so we need to further modify
6474 * the operand so that it will point to itself. There are
6475 * other cases where you have something like:
6476 *
6477 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6478 *
6479 * and here no correction would be required. Internally in
6480 * the assembler we treat operands of this form as not being
6481 * pcrel since the '.' is explicitly mentioned, and I wonder
6482 * whether it would simplify matters to do it this way. Who
6483 * knows. In earlier versions of the PIC patches, the
6484 * pcrel_adjust field was used to store the correction, but
6485 * since the expression is not pcrel, I felt it would be
6486 * confusing to do it this way. */
6487
6488 if ((reloc_type == BFD_RELOC_32
6489 || reloc_type == BFD_RELOC_X86_64_32S
6490 || reloc_type == BFD_RELOC_64)
6491 && GOT_symbol
6492 && GOT_symbol == i.op[n].imms->X_add_symbol
6493 && (i.op[n].imms->X_op == O_symbol
6494 || (i.op[n].imms->X_op == O_add
6495 && ((symbol_get_value_expression
6496 (i.op[n].imms->X_op_symbol)->X_op)
6497 == O_subtract))))
6498 {
6499 offsetT add;
6500
6501 if (insn_start_frag == frag_now)
6502 add = (p - frag_now->fr_literal) - insn_start_off;
6503 else
6504 {
6505 fragS *fr;
6506
6507 add = insn_start_frag->fr_fix - insn_start_off;
6508 for (fr = insn_start_frag->fr_next;
6509 fr && fr != frag_now; fr = fr->fr_next)
6510 add += fr->fr_fix;
6511 add += p - frag_now->fr_literal;
6512 }
6513
6514 if (!object_64bit)
6515 reloc_type = BFD_RELOC_386_GOTPC;
6516 else if (size == 4)
6517 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6518 else if (size == 8)
6519 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6520 i.op[n].imms->X_add_number += add;
6521 }
6522 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6523 i.op[n].imms, 0, reloc_type);
6524 }
6525 }
6526 }
6527 }
6528 \f
6529 /* x86_cons_fix_new is called via the expression parsing code when a
6530 reloc is needed. We use this hook to get the correct .got reloc. */
6531 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6532 static int cons_sign = -1;
6533
6534 void
6535 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6536 expressionS *exp)
6537 {
6538 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6539
6540 got_reloc = NO_RELOC;
6541
6542 #ifdef TE_PE
6543 if (exp->X_op == O_secrel)
6544 {
6545 exp->X_op = O_symbol;
6546 r = BFD_RELOC_32_SECREL;
6547 }
6548 #endif
6549
6550 fix_new_exp (frag, off, len, exp, 0, r);
6551 }
6552
6553 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6554 || defined (LEX_AT)
6555 # define lex_got(reloc, adjust, types) NULL
6556 #else
6557 /* Parse operands of the form
6558 <symbol>@GOTOFF+<nnn>
6559 and similar .plt or .got references.
6560
6561 If we find one, set up the correct relocation in RELOC and copy the
6562 input string, minus the `@GOTOFF' into a malloc'd buffer for
6563 parsing by the calling routine. Return this buffer, and if ADJUST
6564 is non-null set it to the length of the string we removed from the
6565 input line. Otherwise return NULL. */
6566 static char *
6567 lex_got (enum bfd_reloc_code_real *rel,
6568 int *adjust,
6569 i386_operand_type *types)
6570 {
6571 /* Some of the relocations depend on the size of what field is to
6572 be relocated. But in our callers i386_immediate and i386_displacement
6573 we don't yet know the operand size (this will be set by insn
6574 matching). Hence we record the word32 relocation here,
6575 and adjust the reloc according to the real size in reloc(). */
6576 static const struct {
6577 const char *str;
6578 int len;
6579 const enum bfd_reloc_code_real rel[2];
6580 const i386_operand_type types64;
6581 } gotrel[] = {
6582 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6583 BFD_RELOC_X86_64_PLTOFF64 },
6584 OPERAND_TYPE_IMM64 },
6585 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6586 BFD_RELOC_X86_64_PLT32 },
6587 OPERAND_TYPE_IMM32_32S_DISP32 },
6588 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6589 BFD_RELOC_X86_64_GOTPLT64 },
6590 OPERAND_TYPE_IMM64_DISP64 },
6591 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6592 BFD_RELOC_X86_64_GOTOFF64 },
6593 OPERAND_TYPE_IMM64_DISP64 },
6594 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6595 BFD_RELOC_X86_64_GOTPCREL },
6596 OPERAND_TYPE_IMM32_32S_DISP32 },
6597 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6598 BFD_RELOC_X86_64_TLSGD },
6599 OPERAND_TYPE_IMM32_32S_DISP32 },
6600 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6601 _dummy_first_bfd_reloc_code_real },
6602 OPERAND_TYPE_NONE },
6603 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6604 BFD_RELOC_X86_64_TLSLD },
6605 OPERAND_TYPE_IMM32_32S_DISP32 },
6606 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6607 BFD_RELOC_X86_64_GOTTPOFF },
6608 OPERAND_TYPE_IMM32_32S_DISP32 },
6609 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6610 BFD_RELOC_X86_64_TPOFF32 },
6611 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6612 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6613 _dummy_first_bfd_reloc_code_real },
6614 OPERAND_TYPE_NONE },
6615 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6616 BFD_RELOC_X86_64_DTPOFF32 },
6617 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6618 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6619 _dummy_first_bfd_reloc_code_real },
6620 OPERAND_TYPE_NONE },
6621 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6622 _dummy_first_bfd_reloc_code_real },
6623 OPERAND_TYPE_NONE },
6624 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6625 BFD_RELOC_X86_64_GOT32 },
6626 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6627 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6628 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6629 OPERAND_TYPE_IMM32_32S_DISP32 },
6630 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6631 BFD_RELOC_X86_64_TLSDESC_CALL },
6632 OPERAND_TYPE_IMM32_32S_DISP32 },
6633 };
6634 char *cp;
6635 unsigned int j;
6636
6637 #if defined (OBJ_MAYBE_ELF)
6638 if (!IS_ELF)
6639 return NULL;
6640 #endif
6641
6642 for (cp = input_line_pointer; *cp != '@'; cp++)
6643 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6644 return NULL;
6645
6646 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6647 {
6648 int len = gotrel[j].len;
6649 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6650 {
6651 if (gotrel[j].rel[object_64bit] != 0)
6652 {
6653 int first, second;
6654 char *tmpbuf, *past_reloc;
6655
6656 *rel = gotrel[j].rel[object_64bit];
6657 if (adjust)
6658 *adjust = len;
6659
6660 if (types)
6661 {
6662 if (flag_code != CODE_64BIT)
6663 {
6664 types->bitfield.imm32 = 1;
6665 types->bitfield.disp32 = 1;
6666 }
6667 else
6668 *types = gotrel[j].types64;
6669 }
6670
6671 if (GOT_symbol == NULL)
6672 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6673
6674 /* The length of the first part of our input line. */
6675 first = cp - input_line_pointer;
6676
6677 /* The second part goes from after the reloc token until
6678 (and including) an end_of_line char or comma. */
6679 past_reloc = cp + 1 + len;
6680 cp = past_reloc;
6681 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6682 ++cp;
6683 second = cp + 1 - past_reloc;
6684
6685 /* Allocate and copy string. The trailing NUL shouldn't
6686 be necessary, but be safe. */
6687 tmpbuf = (char *) xmalloc (first + second + 2);
6688 memcpy (tmpbuf, input_line_pointer, first);
6689 if (second != 0 && *past_reloc != ' ')
6690 /* Replace the relocation token with ' ', so that
6691 errors like foo@GOTOFF1 will be detected. */
6692 tmpbuf[first++] = ' ';
6693 memcpy (tmpbuf + first, past_reloc, second);
6694 tmpbuf[first + second] = '\0';
6695 return tmpbuf;
6696 }
6697
6698 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6699 gotrel[j].str, 1 << (5 + object_64bit));
6700 return NULL;
6701 }
6702 }
6703
6704 /* Might be a symbol version string. Don't as_bad here. */
6705 return NULL;
6706 }
6707 #endif
6708
6709 void
6710 x86_cons (expressionS *exp, int size)
6711 {
6712 intel_syntax = -intel_syntax;
6713
6714 exp->X_md = 0;
6715 if (size == 4 || (object_64bit && size == 8))
6716 {
6717 /* Handle @GOTOFF and the like in an expression. */
6718 char *save;
6719 char *gotfree_input_line;
6720 int adjust = 0;
6721
6722 save = input_line_pointer;
6723 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6724 if (gotfree_input_line)
6725 input_line_pointer = gotfree_input_line;
6726
6727 expression (exp);
6728
6729 if (gotfree_input_line)
6730 {
6731 /* expression () has merrily parsed up to the end of line,
6732 or a comma - in the wrong buffer. Transfer how far
6733 input_line_pointer has moved to the right buffer. */
6734 input_line_pointer = (save
6735 + (input_line_pointer - gotfree_input_line)
6736 + adjust);
6737 free (gotfree_input_line);
6738 if (exp->X_op == O_constant
6739 || exp->X_op == O_absent
6740 || exp->X_op == O_illegal
6741 || exp->X_op == O_register
6742 || exp->X_op == O_big)
6743 {
6744 char c = *input_line_pointer;
6745 *input_line_pointer = 0;
6746 as_bad (_("missing or invalid expression `%s'"), save);
6747 *input_line_pointer = c;
6748 }
6749 }
6750 }
6751 else
6752 expression (exp);
6753
6754 intel_syntax = -intel_syntax;
6755
6756 if (intel_syntax)
6757 i386_intel_simplify (exp);
6758 }
6759
6760 static void
6761 signed_cons (int size)
6762 {
6763 if (flag_code == CODE_64BIT)
6764 cons_sign = 1;
6765 cons (size);
6766 cons_sign = -1;
6767 }
6768
6769 #ifdef TE_PE
6770 static void
6771 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6772 {
6773 expressionS exp;
6774
6775 do
6776 {
6777 expression (&exp);
6778 if (exp.X_op == O_symbol)
6779 exp.X_op = O_secrel;
6780
6781 emit_expr (&exp, 4);
6782 }
6783 while (*input_line_pointer++ == ',');
6784
6785 input_line_pointer--;
6786 demand_empty_rest_of_line ();
6787 }
6788 #endif
6789
6790 static int
6791 i386_immediate (char *imm_start)
6792 {
6793 char *save_input_line_pointer;
6794 char *gotfree_input_line;
6795 segT exp_seg = 0;
6796 expressionS *exp;
6797 i386_operand_type types;
6798
6799 operand_type_set (&types, ~0);
6800
6801 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6802 {
6803 as_bad (_("at most %d immediate operands are allowed"),
6804 MAX_IMMEDIATE_OPERANDS);
6805 return 0;
6806 }
6807
6808 exp = &im_expressions[i.imm_operands++];
6809 i.op[this_operand].imms = exp;
6810
6811 if (is_space_char (*imm_start))
6812 ++imm_start;
6813
6814 save_input_line_pointer = input_line_pointer;
6815 input_line_pointer = imm_start;
6816
6817 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6818 if (gotfree_input_line)
6819 input_line_pointer = gotfree_input_line;
6820
6821 exp_seg = expression (exp);
6822
6823 SKIP_WHITESPACE ();
6824 if (*input_line_pointer)
6825 as_bad (_("junk `%s' after expression"), input_line_pointer);
6826
6827 input_line_pointer = save_input_line_pointer;
6828 if (gotfree_input_line)
6829 {
6830 free (gotfree_input_line);
6831
6832 if (exp->X_op == O_constant || exp->X_op == O_register)
6833 exp->X_op = O_illegal;
6834 }
6835
6836 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6837 }
6838
6839 static int
6840 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6841 i386_operand_type types, const char *imm_start)
6842 {
6843 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6844 {
6845 if (imm_start)
6846 as_bad (_("missing or invalid immediate expression `%s'"),
6847 imm_start);
6848 return 0;
6849 }
6850 else if (exp->X_op == O_constant)
6851 {
6852 /* Size it properly later. */
6853 i.types[this_operand].bitfield.imm64 = 1;
6854 /* If not 64bit, sign extend val. */
6855 if (flag_code != CODE_64BIT
6856 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6857 exp->X_add_number
6858 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6859 }
6860 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6861 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6862 && exp_seg != absolute_section
6863 && exp_seg != text_section
6864 && exp_seg != data_section
6865 && exp_seg != bss_section
6866 && exp_seg != undefined_section
6867 && !bfd_is_com_section (exp_seg))
6868 {
6869 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6870 return 0;
6871 }
6872 #endif
6873 else if (!intel_syntax && exp->X_op == O_register)
6874 {
6875 if (imm_start)
6876 as_bad (_("illegal immediate register operand %s"), imm_start);
6877 return 0;
6878 }
6879 else
6880 {
6881 /* This is an address. The size of the address will be
6882 determined later, depending on destination register,
6883 suffix, or the default for the section. */
6884 i.types[this_operand].bitfield.imm8 = 1;
6885 i.types[this_operand].bitfield.imm16 = 1;
6886 i.types[this_operand].bitfield.imm32 = 1;
6887 i.types[this_operand].bitfield.imm32s = 1;
6888 i.types[this_operand].bitfield.imm64 = 1;
6889 i.types[this_operand] = operand_type_and (i.types[this_operand],
6890 types);
6891 }
6892
6893 return 1;
6894 }
6895
6896 static char *
6897 i386_scale (char *scale)
6898 {
6899 offsetT val;
6900 char *save = input_line_pointer;
6901
6902 input_line_pointer = scale;
6903 val = get_absolute_expression ();
6904
6905 switch (val)
6906 {
6907 case 1:
6908 i.log2_scale_factor = 0;
6909 break;
6910 case 2:
6911 i.log2_scale_factor = 1;
6912 break;
6913 case 4:
6914 i.log2_scale_factor = 2;
6915 break;
6916 case 8:
6917 i.log2_scale_factor = 3;
6918 break;
6919 default:
6920 {
6921 char sep = *input_line_pointer;
6922
6923 *input_line_pointer = '\0';
6924 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6925 scale);
6926 *input_line_pointer = sep;
6927 input_line_pointer = save;
6928 return NULL;
6929 }
6930 }
6931 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6932 {
6933 as_warn (_("scale factor of %d without an index register"),
6934 1 << i.log2_scale_factor);
6935 i.log2_scale_factor = 0;
6936 }
6937 scale = input_line_pointer;
6938 input_line_pointer = save;
6939 return scale;
6940 }
6941
6942 static int
6943 i386_displacement (char *disp_start, char *disp_end)
6944 {
6945 expressionS *exp;
6946 segT exp_seg = 0;
6947 char *save_input_line_pointer;
6948 char *gotfree_input_line;
6949 int override;
6950 i386_operand_type bigdisp, types = anydisp;
6951 int ret;
6952
6953 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6954 {
6955 as_bad (_("at most %d displacement operands are allowed"),
6956 MAX_MEMORY_OPERANDS);
6957 return 0;
6958 }
6959
6960 operand_type_set (&bigdisp, 0);
6961 if ((i.types[this_operand].bitfield.jumpabsolute)
6962 || (!current_templates->start->opcode_modifier.jump
6963 && !current_templates->start->opcode_modifier.jumpdword))
6964 {
6965 bigdisp.bitfield.disp32 = 1;
6966 override = (i.prefix[ADDR_PREFIX] != 0);
6967 if (flag_code == CODE_64BIT)
6968 {
6969 if (!override)
6970 {
6971 bigdisp.bitfield.disp32s = 1;
6972 bigdisp.bitfield.disp64 = 1;
6973 }
6974 }
6975 else if ((flag_code == CODE_16BIT) ^ override)
6976 {
6977 bigdisp.bitfield.disp32 = 0;
6978 bigdisp.bitfield.disp16 = 1;
6979 }
6980 }
6981 else
6982 {
6983 /* For PC-relative branches, the width of the displacement
6984 is dependent upon data size, not address size. */
6985 override = (i.prefix[DATA_PREFIX] != 0);
6986 if (flag_code == CODE_64BIT)
6987 {
6988 if (override || i.suffix == WORD_MNEM_SUFFIX)
6989 bigdisp.bitfield.disp16 = 1;
6990 else
6991 {
6992 bigdisp.bitfield.disp32 = 1;
6993 bigdisp.bitfield.disp32s = 1;
6994 }
6995 }
6996 else
6997 {
6998 if (!override)
6999 override = (i.suffix == (flag_code != CODE_16BIT
7000 ? WORD_MNEM_SUFFIX
7001 : LONG_MNEM_SUFFIX));
7002 bigdisp.bitfield.disp32 = 1;
7003 if ((flag_code == CODE_16BIT) ^ override)
7004 {
7005 bigdisp.bitfield.disp32 = 0;
7006 bigdisp.bitfield.disp16 = 1;
7007 }
7008 }
7009 }
7010 i.types[this_operand] = operand_type_or (i.types[this_operand],
7011 bigdisp);
7012
7013 exp = &disp_expressions[i.disp_operands];
7014 i.op[this_operand].disps = exp;
7015 i.disp_operands++;
7016 save_input_line_pointer = input_line_pointer;
7017 input_line_pointer = disp_start;
7018 END_STRING_AND_SAVE (disp_end);
7019
7020 #ifndef GCC_ASM_O_HACK
7021 #define GCC_ASM_O_HACK 0
7022 #endif
7023 #if GCC_ASM_O_HACK
7024 END_STRING_AND_SAVE (disp_end + 1);
7025 if (i.types[this_operand].bitfield.baseIndex
7026 && displacement_string_end[-1] == '+')
7027 {
7028 /* This hack is to avoid a warning when using the "o"
7029 constraint within gcc asm statements.
7030 For instance:
7031
7032 #define _set_tssldt_desc(n,addr,limit,type) \
7033 __asm__ __volatile__ ( \
7034 "movw %w2,%0\n\t" \
7035 "movw %w1,2+%0\n\t" \
7036 "rorl $16,%1\n\t" \
7037 "movb %b1,4+%0\n\t" \
7038 "movb %4,5+%0\n\t" \
7039 "movb $0,6+%0\n\t" \
7040 "movb %h1,7+%0\n\t" \
7041 "rorl $16,%1" \
7042 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7043
7044 This works great except that the output assembler ends
7045 up looking a bit weird if it turns out that there is
7046 no offset. You end up producing code that looks like:
7047
7048 #APP
7049 movw $235,(%eax)
7050 movw %dx,2+(%eax)
7051 rorl $16,%edx
7052 movb %dl,4+(%eax)
7053 movb $137,5+(%eax)
7054 movb $0,6+(%eax)
7055 movb %dh,7+(%eax)
7056 rorl $16,%edx
7057 #NO_APP
7058
7059 So here we provide the missing zero. */
7060
7061 *displacement_string_end = '0';
7062 }
7063 #endif
7064 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7065 if (gotfree_input_line)
7066 input_line_pointer = gotfree_input_line;
7067
7068 exp_seg = expression (exp);
7069
7070 SKIP_WHITESPACE ();
7071 if (*input_line_pointer)
7072 as_bad (_("junk `%s' after expression"), input_line_pointer);
7073 #if GCC_ASM_O_HACK
7074 RESTORE_END_STRING (disp_end + 1);
7075 #endif
7076 input_line_pointer = save_input_line_pointer;
7077 if (gotfree_input_line)
7078 {
7079 free (gotfree_input_line);
7080
7081 if (exp->X_op == O_constant || exp->X_op == O_register)
7082 exp->X_op = O_illegal;
7083 }
7084
7085 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7086
7087 RESTORE_END_STRING (disp_end);
7088
7089 return ret;
7090 }
7091
7092 static int
7093 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7094 i386_operand_type types, const char *disp_start)
7095 {
7096 i386_operand_type bigdisp;
7097 int ret = 1;
7098
7099 /* We do this to make sure that the section symbol is in
7100 the symbol table. We will ultimately change the relocation
7101 to be relative to the beginning of the section. */
7102 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7103 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7104 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7105 {
7106 if (exp->X_op != O_symbol)
7107 goto inv_disp;
7108
7109 if (S_IS_LOCAL (exp->X_add_symbol)
7110 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7111 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7112 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7113 exp->X_op = O_subtract;
7114 exp->X_op_symbol = GOT_symbol;
7115 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7116 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7117 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7118 i.reloc[this_operand] = BFD_RELOC_64;
7119 else
7120 i.reloc[this_operand] = BFD_RELOC_32;
7121 }
7122
7123 else if (exp->X_op == O_absent
7124 || exp->X_op == O_illegal
7125 || exp->X_op == O_big)
7126 {
7127 inv_disp:
7128 as_bad (_("missing or invalid displacement expression `%s'"),
7129 disp_start);
7130 ret = 0;
7131 }
7132
7133 else if (flag_code == CODE_64BIT
7134 && !i.prefix[ADDR_PREFIX]
7135 && exp->X_op == O_constant)
7136 {
7137 /* Since displacement is signed extended to 64bit, don't allow
7138 disp32 and turn off disp32s if they are out of range. */
7139 i.types[this_operand].bitfield.disp32 = 0;
7140 if (!fits_in_signed_long (exp->X_add_number))
7141 {
7142 i.types[this_operand].bitfield.disp32s = 0;
7143 if (i.types[this_operand].bitfield.baseindex)
7144 {
7145 as_bad (_("0x%lx out range of signed 32bit displacement"),
7146 (long) exp->X_add_number);
7147 ret = 0;
7148 }
7149 }
7150 }
7151
7152 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7153 else if (exp->X_op != O_constant
7154 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7155 && exp_seg != absolute_section
7156 && exp_seg != text_section
7157 && exp_seg != data_section
7158 && exp_seg != bss_section
7159 && exp_seg != undefined_section
7160 && !bfd_is_com_section (exp_seg))
7161 {
7162 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7163 ret = 0;
7164 }
7165 #endif
7166
7167 /* Check if this is a displacement only operand. */
7168 bigdisp = i.types[this_operand];
7169 bigdisp.bitfield.disp8 = 0;
7170 bigdisp.bitfield.disp16 = 0;
7171 bigdisp.bitfield.disp32 = 0;
7172 bigdisp.bitfield.disp32s = 0;
7173 bigdisp.bitfield.disp64 = 0;
7174 if (operand_type_all_zero (&bigdisp))
7175 i.types[this_operand] = operand_type_and (i.types[this_operand],
7176 types);
7177
7178 return ret;
7179 }
7180
7181 /* Make sure the memory operand we've been dealt is valid.
7182 Return 1 on success, 0 on a failure. */
7183
7184 static int
7185 i386_index_check (const char *operand_string)
7186 {
7187 int ok;
7188 const char *kind = "base/index";
7189 #if INFER_ADDR_PREFIX
7190 int fudged = 0;
7191
7192 tryprefix:
7193 #endif
7194 ok = 1;
7195 if (current_templates->start->opcode_modifier.isstring
7196 && !current_templates->start->opcode_modifier.immext
7197 && (current_templates->end[-1].opcode_modifier.isstring
7198 || i.mem_operands))
7199 {
7200 /* Memory operands of string insns are special in that they only allow
7201 a single register (rDI, rSI, or rBX) as their memory address. */
7202 unsigned int expected;
7203
7204 kind = "string address";
7205
7206 if (current_templates->start->opcode_modifier.w)
7207 {
7208 i386_operand_type type = current_templates->end[-1].operand_types[0];
7209
7210 if (!type.bitfield.baseindex
7211 || ((!i.mem_operands != !intel_syntax)
7212 && current_templates->end[-1].operand_types[1]
7213 .bitfield.baseindex))
7214 type = current_templates->end[-1].operand_types[1];
7215 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7216 }
7217 else
7218 expected = 3 /* rBX */;
7219
7220 if (!i.base_reg || i.index_reg
7221 || operand_type_check (i.types[this_operand], disp))
7222 ok = -1;
7223 else if (!(flag_code == CODE_64BIT
7224 ? i.prefix[ADDR_PREFIX]
7225 ? i.base_reg->reg_type.bitfield.reg32
7226 : i.base_reg->reg_type.bitfield.reg64
7227 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7228 ? i.base_reg->reg_type.bitfield.reg32
7229 : i.base_reg->reg_type.bitfield.reg16))
7230 ok = 0;
7231 else if (i.base_reg->reg_num != expected)
7232 ok = -1;
7233
7234 if (ok < 0)
7235 {
7236 unsigned int j;
7237
7238 for (j = 0; j < i386_regtab_size; ++j)
7239 if ((flag_code == CODE_64BIT
7240 ? i.prefix[ADDR_PREFIX]
7241 ? i386_regtab[j].reg_type.bitfield.reg32
7242 : i386_regtab[j].reg_type.bitfield.reg64
7243 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7244 ? i386_regtab[j].reg_type.bitfield.reg32
7245 : i386_regtab[j].reg_type.bitfield.reg16)
7246 && i386_regtab[j].reg_num == expected)
7247 break;
7248 gas_assert (j < i386_regtab_size);
7249 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7250 operand_string,
7251 intel_syntax ? '[' : '(',
7252 register_prefix,
7253 i386_regtab[j].reg_name,
7254 intel_syntax ? ']' : ')');
7255 ok = 1;
7256 }
7257 }
7258 else if (flag_code == CODE_64BIT)
7259 {
7260 if ((i.base_reg
7261 && ((i.prefix[ADDR_PREFIX] == 0
7262 && !i.base_reg->reg_type.bitfield.reg64)
7263 || (i.prefix[ADDR_PREFIX]
7264 && !i.base_reg->reg_type.bitfield.reg32))
7265 && (i.index_reg
7266 || i.base_reg->reg_num !=
7267 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7268 || (i.index_reg
7269 && !(i.index_reg->reg_type.bitfield.regxmm
7270 || i.index_reg->reg_type.bitfield.regymm)
7271 && (!i.index_reg->reg_type.bitfield.baseindex
7272 || (i.prefix[ADDR_PREFIX] == 0
7273 && i.index_reg->reg_num != RegRiz
7274 && !i.index_reg->reg_type.bitfield.reg64
7275 )
7276 || (i.prefix[ADDR_PREFIX]
7277 && i.index_reg->reg_num != RegEiz
7278 && !i.index_reg->reg_type.bitfield.reg32))))
7279 ok = 0;
7280 }
7281 else
7282 {
7283 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7284 {
7285 /* 16bit checks. */
7286 if ((i.base_reg
7287 && (!i.base_reg->reg_type.bitfield.reg16
7288 || !i.base_reg->reg_type.bitfield.baseindex))
7289 || (i.index_reg
7290 && (!i.index_reg->reg_type.bitfield.reg16
7291 || !i.index_reg->reg_type.bitfield.baseindex
7292 || !(i.base_reg
7293 && i.base_reg->reg_num < 6
7294 && i.index_reg->reg_num >= 6
7295 && i.log2_scale_factor == 0))))
7296 ok = 0;
7297 }
7298 else
7299 {
7300 /* 32bit checks. */
7301 if ((i.base_reg
7302 && !i.base_reg->reg_type.bitfield.reg32)
7303 || (i.index_reg
7304 && !i.index_reg->reg_type.bitfield.regxmm
7305 && !i.index_reg->reg_type.bitfield.regymm
7306 && ((!i.index_reg->reg_type.bitfield.reg32
7307 && i.index_reg->reg_num != RegEiz)
7308 || !i.index_reg->reg_type.bitfield.baseindex)))
7309 ok = 0;
7310 }
7311 }
7312 if (!ok)
7313 {
7314 #if INFER_ADDR_PREFIX
7315 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7316 {
7317 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7318 i.prefixes += 1;
7319 /* Change the size of any displacement too. At most one of
7320 Disp16 or Disp32 is set.
7321 FIXME. There doesn't seem to be any real need for separate
7322 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7323 Removing them would probably clean up the code quite a lot. */
7324 if (flag_code != CODE_64BIT
7325 && (i.types[this_operand].bitfield.disp16
7326 || i.types[this_operand].bitfield.disp32))
7327 i.types[this_operand]
7328 = operand_type_xor (i.types[this_operand], disp16_32);
7329 fudged = 1;
7330 goto tryprefix;
7331 }
7332 if (fudged)
7333 as_bad (_("`%s' is not a valid %s expression"),
7334 operand_string,
7335 kind);
7336 else
7337 #endif
7338 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7339 operand_string,
7340 flag_code_names[i.prefix[ADDR_PREFIX]
7341 ? flag_code == CODE_32BIT
7342 ? CODE_16BIT
7343 : CODE_32BIT
7344 : flag_code],
7345 kind);
7346 }
7347 return ok;
7348 }
7349
7350 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7351 on error. */
7352
7353 static int
7354 i386_att_operand (char *operand_string)
7355 {
7356 const reg_entry *r;
7357 char *end_op;
7358 char *op_string = operand_string;
7359
7360 if (is_space_char (*op_string))
7361 ++op_string;
7362
7363 /* We check for an absolute prefix (differentiating,
7364 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7365 if (*op_string == ABSOLUTE_PREFIX)
7366 {
7367 ++op_string;
7368 if (is_space_char (*op_string))
7369 ++op_string;
7370 i.types[this_operand].bitfield.jumpabsolute = 1;
7371 }
7372
7373 /* Check if operand is a register. */
7374 if ((r = parse_register (op_string, &end_op)) != NULL)
7375 {
7376 i386_operand_type temp;
7377
7378 /* Check for a segment override by searching for ':' after a
7379 segment register. */
7380 op_string = end_op;
7381 if (is_space_char (*op_string))
7382 ++op_string;
7383 if (*op_string == ':'
7384 && (r->reg_type.bitfield.sreg2
7385 || r->reg_type.bitfield.sreg3))
7386 {
7387 switch (r->reg_num)
7388 {
7389 case 0:
7390 i.seg[i.mem_operands] = &es;
7391 break;
7392 case 1:
7393 i.seg[i.mem_operands] = &cs;
7394 break;
7395 case 2:
7396 i.seg[i.mem_operands] = &ss;
7397 break;
7398 case 3:
7399 i.seg[i.mem_operands] = &ds;
7400 break;
7401 case 4:
7402 i.seg[i.mem_operands] = &fs;
7403 break;
7404 case 5:
7405 i.seg[i.mem_operands] = &gs;
7406 break;
7407 }
7408
7409 /* Skip the ':' and whitespace. */
7410 ++op_string;
7411 if (is_space_char (*op_string))
7412 ++op_string;
7413
7414 if (!is_digit_char (*op_string)
7415 && !is_identifier_char (*op_string)
7416 && *op_string != '('
7417 && *op_string != ABSOLUTE_PREFIX)
7418 {
7419 as_bad (_("bad memory operand `%s'"), op_string);
7420 return 0;
7421 }
7422 /* Handle case of %es:*foo. */
7423 if (*op_string == ABSOLUTE_PREFIX)
7424 {
7425 ++op_string;
7426 if (is_space_char (*op_string))
7427 ++op_string;
7428 i.types[this_operand].bitfield.jumpabsolute = 1;
7429 }
7430 goto do_memory_reference;
7431 }
7432 if (*op_string)
7433 {
7434 as_bad (_("junk `%s' after register"), op_string);
7435 return 0;
7436 }
7437 temp = r->reg_type;
7438 temp.bitfield.baseindex = 0;
7439 i.types[this_operand] = operand_type_or (i.types[this_operand],
7440 temp);
7441 i.types[this_operand].bitfield.unspecified = 0;
7442 i.op[this_operand].regs = r;
7443 i.reg_operands++;
7444 }
7445 else if (*op_string == REGISTER_PREFIX)
7446 {
7447 as_bad (_("bad register name `%s'"), op_string);
7448 return 0;
7449 }
7450 else if (*op_string == IMMEDIATE_PREFIX)
7451 {
7452 ++op_string;
7453 if (i.types[this_operand].bitfield.jumpabsolute)
7454 {
7455 as_bad (_("immediate operand illegal with absolute jump"));
7456 return 0;
7457 }
7458 if (!i386_immediate (op_string))
7459 return 0;
7460 }
7461 else if (is_digit_char (*op_string)
7462 || is_identifier_char (*op_string)
7463 || *op_string == '(')
7464 {
7465 /* This is a memory reference of some sort. */
7466 char *base_string;
7467
7468 /* Start and end of displacement string expression (if found). */
7469 char *displacement_string_start;
7470 char *displacement_string_end;
7471
7472 do_memory_reference:
7473 if ((i.mem_operands == 1
7474 && !current_templates->start->opcode_modifier.isstring)
7475 || i.mem_operands == 2)
7476 {
7477 as_bad (_("too many memory references for `%s'"),
7478 current_templates->start->name);
7479 return 0;
7480 }
7481
7482 /* Check for base index form. We detect the base index form by
7483 looking for an ')' at the end of the operand, searching
7484 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7485 after the '('. */
7486 base_string = op_string + strlen (op_string);
7487
7488 --base_string;
7489 if (is_space_char (*base_string))
7490 --base_string;
7491
7492 /* If we only have a displacement, set-up for it to be parsed later. */
7493 displacement_string_start = op_string;
7494 displacement_string_end = base_string + 1;
7495
7496 if (*base_string == ')')
7497 {
7498 char *temp_string;
7499 unsigned int parens_balanced = 1;
7500 /* We've already checked that the number of left & right ()'s are
7501 equal, so this loop will not be infinite. */
7502 do
7503 {
7504 base_string--;
7505 if (*base_string == ')')
7506 parens_balanced++;
7507 if (*base_string == '(')
7508 parens_balanced--;
7509 }
7510 while (parens_balanced);
7511
7512 temp_string = base_string;
7513
7514 /* Skip past '(' and whitespace. */
7515 ++base_string;
7516 if (is_space_char (*base_string))
7517 ++base_string;
7518
7519 if (*base_string == ','
7520 || ((i.base_reg = parse_register (base_string, &end_op))
7521 != NULL))
7522 {
7523 displacement_string_end = temp_string;
7524
7525 i.types[this_operand].bitfield.baseindex = 1;
7526
7527 if (i.base_reg)
7528 {
7529 base_string = end_op;
7530 if (is_space_char (*base_string))
7531 ++base_string;
7532 }
7533
7534 /* There may be an index reg or scale factor here. */
7535 if (*base_string == ',')
7536 {
7537 ++base_string;
7538 if (is_space_char (*base_string))
7539 ++base_string;
7540
7541 if ((i.index_reg = parse_register (base_string, &end_op))
7542 != NULL)
7543 {
7544 base_string = end_op;
7545 if (is_space_char (*base_string))
7546 ++base_string;
7547 if (*base_string == ',')
7548 {
7549 ++base_string;
7550 if (is_space_char (*base_string))
7551 ++base_string;
7552 }
7553 else if (*base_string != ')')
7554 {
7555 as_bad (_("expecting `,' or `)' "
7556 "after index register in `%s'"),
7557 operand_string);
7558 return 0;
7559 }
7560 }
7561 else if (*base_string == REGISTER_PREFIX)
7562 {
7563 as_bad (_("bad register name `%s'"), base_string);
7564 return 0;
7565 }
7566
7567 /* Check for scale factor. */
7568 if (*base_string != ')')
7569 {
7570 char *end_scale = i386_scale (base_string);
7571
7572 if (!end_scale)
7573 return 0;
7574
7575 base_string = end_scale;
7576 if (is_space_char (*base_string))
7577 ++base_string;
7578 if (*base_string != ')')
7579 {
7580 as_bad (_("expecting `)' "
7581 "after scale factor in `%s'"),
7582 operand_string);
7583 return 0;
7584 }
7585 }
7586 else if (!i.index_reg)
7587 {
7588 as_bad (_("expecting index register or scale factor "
7589 "after `,'; got '%c'"),
7590 *base_string);
7591 return 0;
7592 }
7593 }
7594 else if (*base_string != ')')
7595 {
7596 as_bad (_("expecting `,' or `)' "
7597 "after base register in `%s'"),
7598 operand_string);
7599 return 0;
7600 }
7601 }
7602 else if (*base_string == REGISTER_PREFIX)
7603 {
7604 as_bad (_("bad register name `%s'"), base_string);
7605 return 0;
7606 }
7607 }
7608
7609 /* If there's an expression beginning the operand, parse it,
7610 assuming displacement_string_start and
7611 displacement_string_end are meaningful. */
7612 if (displacement_string_start != displacement_string_end)
7613 {
7614 if (!i386_displacement (displacement_string_start,
7615 displacement_string_end))
7616 return 0;
7617 }
7618
7619 /* Special case for (%dx) while doing input/output op. */
7620 if (i.base_reg
7621 && operand_type_equal (&i.base_reg->reg_type,
7622 &reg16_inoutportreg)
7623 && i.index_reg == 0
7624 && i.log2_scale_factor == 0
7625 && i.seg[i.mem_operands] == 0
7626 && !operand_type_check (i.types[this_operand], disp))
7627 {
7628 i.types[this_operand] = inoutportreg;
7629 return 1;
7630 }
7631
7632 if (i386_index_check (operand_string) == 0)
7633 return 0;
7634 i.types[this_operand].bitfield.mem = 1;
7635 i.mem_operands++;
7636 }
7637 else
7638 {
7639 /* It's not a memory operand; argh! */
7640 as_bad (_("invalid char %s beginning operand %d `%s'"),
7641 output_invalid (*op_string),
7642 this_operand + 1,
7643 op_string);
7644 return 0;
7645 }
7646 return 1; /* Normal return. */
7647 }
7648 \f
7649 /* md_estimate_size_before_relax()
7650
7651 Called just before relax() for rs_machine_dependent frags. The x86
7652 assembler uses these frags to handle variable size jump
7653 instructions.
7654
7655 Any symbol that is now undefined will not become defined.
7656 Return the correct fr_subtype in the frag.
7657 Return the initial "guess for variable size of frag" to caller.
7658 The guess is actually the growth beyond the fixed part. Whatever
7659 we do to grow the fixed or variable part contributes to our
7660 returned value. */
7661
7662 int
7663 md_estimate_size_before_relax (fragS *fragP, segT segment)
7664 {
7665 /* We've already got fragP->fr_subtype right; all we have to do is
7666 check for un-relaxable symbols. On an ELF system, we can't relax
7667 an externally visible symbol, because it may be overridden by a
7668 shared library. */
7669 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7670 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7671 || (IS_ELF
7672 && (S_IS_EXTERNAL (fragP->fr_symbol)
7673 || S_IS_WEAK (fragP->fr_symbol)
7674 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7675 & BSF_GNU_INDIRECT_FUNCTION))))
7676 #endif
7677 #if defined (OBJ_COFF) && defined (TE_PE)
7678 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7679 && S_IS_WEAK (fragP->fr_symbol))
7680 #endif
7681 )
7682 {
7683 /* Symbol is undefined in this segment, or we need to keep a
7684 reloc so that weak symbols can be overridden. */
7685 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7686 enum bfd_reloc_code_real reloc_type;
7687 unsigned char *opcode;
7688 int old_fr_fix;
7689
7690 if (fragP->fr_var != NO_RELOC)
7691 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7692 else if (size == 2)
7693 reloc_type = BFD_RELOC_16_PCREL;
7694 else
7695 reloc_type = BFD_RELOC_32_PCREL;
7696
7697 old_fr_fix = fragP->fr_fix;
7698 opcode = (unsigned char *) fragP->fr_opcode;
7699
7700 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7701 {
7702 case UNCOND_JUMP:
7703 /* Make jmp (0xeb) a (d)word displacement jump. */
7704 opcode[0] = 0xe9;
7705 fragP->fr_fix += size;
7706 fix_new (fragP, old_fr_fix, size,
7707 fragP->fr_symbol,
7708 fragP->fr_offset, 1,
7709 reloc_type);
7710 break;
7711
7712 case COND_JUMP86:
7713 if (size == 2
7714 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7715 {
7716 /* Negate the condition, and branch past an
7717 unconditional jump. */
7718 opcode[0] ^= 1;
7719 opcode[1] = 3;
7720 /* Insert an unconditional jump. */
7721 opcode[2] = 0xe9;
7722 /* We added two extra opcode bytes, and have a two byte
7723 offset. */
7724 fragP->fr_fix += 2 + 2;
7725 fix_new (fragP, old_fr_fix + 2, 2,
7726 fragP->fr_symbol,
7727 fragP->fr_offset, 1,
7728 reloc_type);
7729 break;
7730 }
7731 /* Fall through. */
7732
7733 case COND_JUMP:
7734 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7735 {
7736 fixS *fixP;
7737
7738 fragP->fr_fix += 1;
7739 fixP = fix_new (fragP, old_fr_fix, 1,
7740 fragP->fr_symbol,
7741 fragP->fr_offset, 1,
7742 BFD_RELOC_8_PCREL);
7743 fixP->fx_signed = 1;
7744 break;
7745 }
7746
7747 /* This changes the byte-displacement jump 0x7N
7748 to the (d)word-displacement jump 0x0f,0x8N. */
7749 opcode[1] = opcode[0] + 0x10;
7750 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7751 /* We've added an opcode byte. */
7752 fragP->fr_fix += 1 + size;
7753 fix_new (fragP, old_fr_fix + 1, size,
7754 fragP->fr_symbol,
7755 fragP->fr_offset, 1,
7756 reloc_type);
7757 break;
7758
7759 default:
7760 BAD_CASE (fragP->fr_subtype);
7761 break;
7762 }
7763 frag_wane (fragP);
7764 return fragP->fr_fix - old_fr_fix;
7765 }
7766
7767 /* Guess size depending on current relax state. Initially the relax
7768 state will correspond to a short jump and we return 1, because
7769 the variable part of the frag (the branch offset) is one byte
7770 long. However, we can relax a section more than once and in that
7771 case we must either set fr_subtype back to the unrelaxed state,
7772 or return the value for the appropriate branch. */
7773 return md_relax_table[fragP->fr_subtype].rlx_length;
7774 }
7775
7776 /* Called after relax() is finished.
7777
7778 In: Address of frag.
7779 fr_type == rs_machine_dependent.
7780 fr_subtype is what the address relaxed to.
7781
7782 Out: Any fixSs and constants are set up.
7783 Caller will turn frag into a ".space 0". */
7784
7785 void
7786 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7787 fragS *fragP)
7788 {
7789 unsigned char *opcode;
7790 unsigned char *where_to_put_displacement = NULL;
7791 offsetT target_address;
7792 offsetT opcode_address;
7793 unsigned int extension = 0;
7794 offsetT displacement_from_opcode_start;
7795
7796 opcode = (unsigned char *) fragP->fr_opcode;
7797
7798 /* Address we want to reach in file space. */
7799 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7800
7801 /* Address opcode resides at in file space. */
7802 opcode_address = fragP->fr_address + fragP->fr_fix;
7803
7804 /* Displacement from opcode start to fill into instruction. */
7805 displacement_from_opcode_start = target_address - opcode_address;
7806
7807 if ((fragP->fr_subtype & BIG) == 0)
7808 {
7809 /* Don't have to change opcode. */
7810 extension = 1; /* 1 opcode + 1 displacement */
7811 where_to_put_displacement = &opcode[1];
7812 }
7813 else
7814 {
7815 if (no_cond_jump_promotion
7816 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7817 as_warn_where (fragP->fr_file, fragP->fr_line,
7818 _("long jump required"));
7819
7820 switch (fragP->fr_subtype)
7821 {
7822 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7823 extension = 4; /* 1 opcode + 4 displacement */
7824 opcode[0] = 0xe9;
7825 where_to_put_displacement = &opcode[1];
7826 break;
7827
7828 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7829 extension = 2; /* 1 opcode + 2 displacement */
7830 opcode[0] = 0xe9;
7831 where_to_put_displacement = &opcode[1];
7832 break;
7833
7834 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7835 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7836 extension = 5; /* 2 opcode + 4 displacement */
7837 opcode[1] = opcode[0] + 0x10;
7838 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7839 where_to_put_displacement = &opcode[2];
7840 break;
7841
7842 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7843 extension = 3; /* 2 opcode + 2 displacement */
7844 opcode[1] = opcode[0] + 0x10;
7845 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7846 where_to_put_displacement = &opcode[2];
7847 break;
7848
7849 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7850 extension = 4;
7851 opcode[0] ^= 1;
7852 opcode[1] = 3;
7853 opcode[2] = 0xe9;
7854 where_to_put_displacement = &opcode[3];
7855 break;
7856
7857 default:
7858 BAD_CASE (fragP->fr_subtype);
7859 break;
7860 }
7861 }
7862
7863 /* If size if less then four we are sure that the operand fits,
7864 but if it's 4, then it could be that the displacement is larger
7865 then -/+ 2GB. */
7866 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7867 && object_64bit
7868 && ((addressT) (displacement_from_opcode_start - extension
7869 + ((addressT) 1 << 31))
7870 > (((addressT) 2 << 31) - 1)))
7871 {
7872 as_bad_where (fragP->fr_file, fragP->fr_line,
7873 _("jump target out of range"));
7874 /* Make us emit 0. */
7875 displacement_from_opcode_start = extension;
7876 }
7877 /* Now put displacement after opcode. */
7878 md_number_to_chars ((char *) where_to_put_displacement,
7879 (valueT) (displacement_from_opcode_start - extension),
7880 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7881 fragP->fr_fix += extension;
7882 }
7883 \f
7884 /* Apply a fixup (fixP) to segment data, once it has been determined
7885 by our caller that we have all the info we need to fix it up.
7886
7887 Parameter valP is the pointer to the value of the bits.
7888
7889 On the 386, immediates, displacements, and data pointers are all in
7890 the same (little-endian) format, so we don't need to care about which
7891 we are handling. */
7892
7893 void
7894 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7895 {
7896 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7897 valueT value = *valP;
7898
7899 #if !defined (TE_Mach)
7900 if (fixP->fx_pcrel)
7901 {
7902 switch (fixP->fx_r_type)
7903 {
7904 default:
7905 break;
7906
7907 case BFD_RELOC_64:
7908 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7909 break;
7910 case BFD_RELOC_32:
7911 case BFD_RELOC_X86_64_32S:
7912 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7913 break;
7914 case BFD_RELOC_16:
7915 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7916 break;
7917 case BFD_RELOC_8:
7918 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7919 break;
7920 }
7921 }
7922
7923 if (fixP->fx_addsy != NULL
7924 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7925 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7926 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7927 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7928 && !use_rela_relocations)
7929 {
7930 /* This is a hack. There should be a better way to handle this.
7931 This covers for the fact that bfd_install_relocation will
7932 subtract the current location (for partial_inplace, PC relative
7933 relocations); see more below. */
7934 #ifndef OBJ_AOUT
7935 if (IS_ELF
7936 #ifdef TE_PE
7937 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7938 #endif
7939 )
7940 value += fixP->fx_where + fixP->fx_frag->fr_address;
7941 #endif
7942 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7943 if (IS_ELF)
7944 {
7945 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7946
7947 if ((sym_seg == seg
7948 || (symbol_section_p (fixP->fx_addsy)
7949 && sym_seg != absolute_section))
7950 && !generic_force_reloc (fixP))
7951 {
7952 /* Yes, we add the values in twice. This is because
7953 bfd_install_relocation subtracts them out again. I think
7954 bfd_install_relocation is broken, but I don't dare change
7955 it. FIXME. */
7956 value += fixP->fx_where + fixP->fx_frag->fr_address;
7957 }
7958 }
7959 #endif
7960 #if defined (OBJ_COFF) && defined (TE_PE)
7961 /* For some reason, the PE format does not store a
7962 section address offset for a PC relative symbol. */
7963 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7964 || S_IS_WEAK (fixP->fx_addsy))
7965 value += md_pcrel_from (fixP);
7966 #endif
7967 }
7968 #if defined (OBJ_COFF) && defined (TE_PE)
7969 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7970 {
7971 value -= S_GET_VALUE (fixP->fx_addsy);
7972 }
7973 #endif
7974
7975 /* Fix a few things - the dynamic linker expects certain values here,
7976 and we must not disappoint it. */
7977 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7978 if (IS_ELF && fixP->fx_addsy)
7979 switch (fixP->fx_r_type)
7980 {
7981 case BFD_RELOC_386_PLT32:
7982 case BFD_RELOC_X86_64_PLT32:
7983 /* Make the jump instruction point to the address of the operand. At
7984 runtime we merely add the offset to the actual PLT entry. */
7985 value = -4;
7986 break;
7987
7988 case BFD_RELOC_386_TLS_GD:
7989 case BFD_RELOC_386_TLS_LDM:
7990 case BFD_RELOC_386_TLS_IE_32:
7991 case BFD_RELOC_386_TLS_IE:
7992 case BFD_RELOC_386_TLS_GOTIE:
7993 case BFD_RELOC_386_TLS_GOTDESC:
7994 case BFD_RELOC_X86_64_TLSGD:
7995 case BFD_RELOC_X86_64_TLSLD:
7996 case BFD_RELOC_X86_64_GOTTPOFF:
7997 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7998 value = 0; /* Fully resolved at runtime. No addend. */
7999 /* Fallthrough */
8000 case BFD_RELOC_386_TLS_LE:
8001 case BFD_RELOC_386_TLS_LDO_32:
8002 case BFD_RELOC_386_TLS_LE_32:
8003 case BFD_RELOC_X86_64_DTPOFF32:
8004 case BFD_RELOC_X86_64_DTPOFF64:
8005 case BFD_RELOC_X86_64_TPOFF32:
8006 case BFD_RELOC_X86_64_TPOFF64:
8007 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8008 break;
8009
8010 case BFD_RELOC_386_TLS_DESC_CALL:
8011 case BFD_RELOC_X86_64_TLSDESC_CALL:
8012 value = 0; /* Fully resolved at runtime. No addend. */
8013 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8014 fixP->fx_done = 0;
8015 return;
8016
8017 case BFD_RELOC_386_GOT32:
8018 case BFD_RELOC_X86_64_GOT32:
8019 value = 0; /* Fully resolved at runtime. No addend. */
8020 break;
8021
8022 case BFD_RELOC_VTABLE_INHERIT:
8023 case BFD_RELOC_VTABLE_ENTRY:
8024 fixP->fx_done = 0;
8025 return;
8026
8027 default:
8028 break;
8029 }
8030 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8031 *valP = value;
8032 #endif /* !defined (TE_Mach) */
8033
8034 /* Are we finished with this relocation now? */
8035 if (fixP->fx_addsy == NULL)
8036 fixP->fx_done = 1;
8037 #if defined (OBJ_COFF) && defined (TE_PE)
8038 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8039 {
8040 fixP->fx_done = 0;
8041 /* Remember value for tc_gen_reloc. */
8042 fixP->fx_addnumber = value;
8043 /* Clear out the frag for now. */
8044 value = 0;
8045 }
8046 #endif
8047 else if (use_rela_relocations)
8048 {
8049 fixP->fx_no_overflow = 1;
8050 /* Remember value for tc_gen_reloc. */
8051 fixP->fx_addnumber = value;
8052 value = 0;
8053 }
8054
8055 md_number_to_chars (p, value, fixP->fx_size);
8056 }
8057 \f
8058 char *
8059 md_atof (int type, char *litP, int *sizeP)
8060 {
8061 /* This outputs the LITTLENUMs in REVERSE order;
8062 in accord with the bigendian 386. */
8063 return ieee_md_atof (type, litP, sizeP, FALSE);
8064 }
8065 \f
8066 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8067
8068 static char *
8069 output_invalid (int c)
8070 {
8071 if (ISPRINT (c))
8072 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8073 "'%c'", c);
8074 else
8075 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8076 "(0x%x)", (unsigned char) c);
8077 return output_invalid_buf;
8078 }
8079
8080 /* REG_STRING starts *before* REGISTER_PREFIX. */
8081
8082 static const reg_entry *
8083 parse_real_register (char *reg_string, char **end_op)
8084 {
8085 char *s = reg_string;
8086 char *p;
8087 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8088 const reg_entry *r;
8089
8090 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8091 if (*s == REGISTER_PREFIX)
8092 ++s;
8093
8094 if (is_space_char (*s))
8095 ++s;
8096
8097 p = reg_name_given;
8098 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8099 {
8100 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8101 return (const reg_entry *) NULL;
8102 s++;
8103 }
8104
8105 /* For naked regs, make sure that we are not dealing with an identifier.
8106 This prevents confusing an identifier like `eax_var' with register
8107 `eax'. */
8108 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8109 return (const reg_entry *) NULL;
8110
8111 *end_op = s;
8112
8113 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8114
8115 /* Handle floating point regs, allowing spaces in the (i) part. */
8116 if (r == i386_regtab /* %st is first entry of table */)
8117 {
8118 if (is_space_char (*s))
8119 ++s;
8120 if (*s == '(')
8121 {
8122 ++s;
8123 if (is_space_char (*s))
8124 ++s;
8125 if (*s >= '0' && *s <= '7')
8126 {
8127 int fpr = *s - '0';
8128 ++s;
8129 if (is_space_char (*s))
8130 ++s;
8131 if (*s == ')')
8132 {
8133 *end_op = s + 1;
8134 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8135 know (r);
8136 return r + fpr;
8137 }
8138 }
8139 /* We have "%st(" then garbage. */
8140 return (const reg_entry *) NULL;
8141 }
8142 }
8143
8144 if (r == NULL || allow_pseudo_reg)
8145 return r;
8146
8147 if (operand_type_all_zero (&r->reg_type))
8148 return (const reg_entry *) NULL;
8149
8150 if ((r->reg_type.bitfield.reg32
8151 || r->reg_type.bitfield.sreg3
8152 || r->reg_type.bitfield.control
8153 || r->reg_type.bitfield.debug
8154 || r->reg_type.bitfield.test)
8155 && !cpu_arch_flags.bitfield.cpui386)
8156 return (const reg_entry *) NULL;
8157
8158 if (r->reg_type.bitfield.floatreg
8159 && !cpu_arch_flags.bitfield.cpu8087
8160 && !cpu_arch_flags.bitfield.cpu287
8161 && !cpu_arch_flags.bitfield.cpu387)
8162 return (const reg_entry *) NULL;
8163
8164 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8165 return (const reg_entry *) NULL;
8166
8167 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8168 return (const reg_entry *) NULL;
8169
8170 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8171 return (const reg_entry *) NULL;
8172
8173 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8174 if (!allow_index_reg
8175 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8176 return (const reg_entry *) NULL;
8177
8178 if (((r->reg_flags & (RegRex64 | RegRex))
8179 || r->reg_type.bitfield.reg64)
8180 && (!cpu_arch_flags.bitfield.cpulm
8181 || !operand_type_equal (&r->reg_type, &control))
8182 && flag_code != CODE_64BIT)
8183 return (const reg_entry *) NULL;
8184
8185 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8186 return (const reg_entry *) NULL;
8187
8188 return r;
8189 }
8190
8191 /* REG_STRING starts *before* REGISTER_PREFIX. */
8192
8193 static const reg_entry *
8194 parse_register (char *reg_string, char **end_op)
8195 {
8196 const reg_entry *r;
8197
8198 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8199 r = parse_real_register (reg_string, end_op);
8200 else
8201 r = NULL;
8202 if (!r)
8203 {
8204 char *save = input_line_pointer;
8205 char c;
8206 symbolS *symbolP;
8207
8208 input_line_pointer = reg_string;
8209 c = get_symbol_end ();
8210 symbolP = symbol_find (reg_string);
8211 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8212 {
8213 const expressionS *e = symbol_get_value_expression (symbolP);
8214
8215 know (e->X_op == O_register);
8216 know (e->X_add_number >= 0
8217 && (valueT) e->X_add_number < i386_regtab_size);
8218 r = i386_regtab + e->X_add_number;
8219 *end_op = input_line_pointer;
8220 }
8221 *input_line_pointer = c;
8222 input_line_pointer = save;
8223 }
8224 return r;
8225 }
8226
8227 int
8228 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8229 {
8230 const reg_entry *r;
8231 char *end = input_line_pointer;
8232
8233 *end = *nextcharP;
8234 r = parse_register (name, &input_line_pointer);
8235 if (r && end <= input_line_pointer)
8236 {
8237 *nextcharP = *input_line_pointer;
8238 *input_line_pointer = 0;
8239 e->X_op = O_register;
8240 e->X_add_number = r - i386_regtab;
8241 return 1;
8242 }
8243 input_line_pointer = end;
8244 *end = 0;
8245 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8246 }
8247
8248 void
8249 md_operand (expressionS *e)
8250 {
8251 char *end;
8252 const reg_entry *r;
8253
8254 switch (*input_line_pointer)
8255 {
8256 case REGISTER_PREFIX:
8257 r = parse_real_register (input_line_pointer, &end);
8258 if (r)
8259 {
8260 e->X_op = O_register;
8261 e->X_add_number = r - i386_regtab;
8262 input_line_pointer = end;
8263 }
8264 break;
8265
8266 case '[':
8267 gas_assert (intel_syntax);
8268 end = input_line_pointer++;
8269 expression (e);
8270 if (*input_line_pointer == ']')
8271 {
8272 ++input_line_pointer;
8273 e->X_op_symbol = make_expr_symbol (e);
8274 e->X_add_symbol = NULL;
8275 e->X_add_number = 0;
8276 e->X_op = O_index;
8277 }
8278 else
8279 {
8280 e->X_op = O_absent;
8281 input_line_pointer = end;
8282 }
8283 break;
8284 }
8285 }
8286
8287 \f
8288 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8289 const char *md_shortopts = "kVQ:sqn";
8290 #else
8291 const char *md_shortopts = "qn";
8292 #endif
8293
8294 #define OPTION_32 (OPTION_MD_BASE + 0)
8295 #define OPTION_64 (OPTION_MD_BASE + 1)
8296 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8297 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8298 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8299 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8300 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8301 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8302 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8303 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8304 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8305 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8306 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8307 #define OPTION_X32 (OPTION_MD_BASE + 13)
8308
8309 struct option md_longopts[] =
8310 {
8311 {"32", no_argument, NULL, OPTION_32},
8312 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8313 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8314 {"64", no_argument, NULL, OPTION_64},
8315 #endif
8316 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8317 {"x32", no_argument, NULL, OPTION_X32},
8318 #endif
8319 {"divide", no_argument, NULL, OPTION_DIVIDE},
8320 {"march", required_argument, NULL, OPTION_MARCH},
8321 {"mtune", required_argument, NULL, OPTION_MTUNE},
8322 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8323 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8324 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8325 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8326 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8327 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8328 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8329 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8330 {NULL, no_argument, NULL, 0}
8331 };
8332 size_t md_longopts_size = sizeof (md_longopts);
8333
8334 int
8335 md_parse_option (int c, char *arg)
8336 {
8337 unsigned int j;
8338 char *arch, *next;
8339
8340 switch (c)
8341 {
8342 case 'n':
8343 optimize_align_code = 0;
8344 break;
8345
8346 case 'q':
8347 quiet_warnings = 1;
8348 break;
8349
8350 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8351 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8352 should be emitted or not. FIXME: Not implemented. */
8353 case 'Q':
8354 break;
8355
8356 /* -V: SVR4 argument to print version ID. */
8357 case 'V':
8358 print_version_id ();
8359 break;
8360
8361 /* -k: Ignore for FreeBSD compatibility. */
8362 case 'k':
8363 break;
8364
8365 case 's':
8366 /* -s: On i386 Solaris, this tells the native assembler to use
8367 .stab instead of .stab.excl. We always use .stab anyhow. */
8368 break;
8369 #endif
8370 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8371 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8372 case OPTION_64:
8373 {
8374 const char **list, **l;
8375
8376 list = bfd_target_list ();
8377 for (l = list; *l != NULL; l++)
8378 if (CONST_STRNEQ (*l, "elf64-x86-64")
8379 || strcmp (*l, "coff-x86-64") == 0
8380 || strcmp (*l, "pe-x86-64") == 0
8381 || strcmp (*l, "pei-x86-64") == 0
8382 || strcmp (*l, "mach-o-x86-64") == 0)
8383 {
8384 default_arch = "x86_64";
8385 break;
8386 }
8387 if (*l == NULL)
8388 as_fatal (_("no compiled in support for x86_64"));
8389 free (list);
8390 }
8391 break;
8392 #endif
8393
8394 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8395 case OPTION_X32:
8396 if (IS_ELF)
8397 {
8398 const char **list, **l;
8399
8400 list = bfd_target_list ();
8401 for (l = list; *l != NULL; l++)
8402 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8403 {
8404 default_arch = "x86_64:32";
8405 break;
8406 }
8407 if (*l == NULL)
8408 as_fatal (_("no compiled in support for 32bit x86_64"));
8409 free (list);
8410 }
8411 else
8412 as_fatal (_("32bit x86_64 is only supported for ELF"));
8413 break;
8414 #endif
8415
8416 case OPTION_32:
8417 default_arch = "i386";
8418 break;
8419
8420 case OPTION_DIVIDE:
8421 #ifdef SVR4_COMMENT_CHARS
8422 {
8423 char *n, *t;
8424 const char *s;
8425
8426 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8427 t = n;
8428 for (s = i386_comment_chars; *s != '\0'; s++)
8429 if (*s != '/')
8430 *t++ = *s;
8431 *t = '\0';
8432 i386_comment_chars = n;
8433 }
8434 #endif
8435 break;
8436
8437 case OPTION_MARCH:
8438 arch = xstrdup (arg);
8439 do
8440 {
8441 if (*arch == '.')
8442 as_fatal (_("invalid -march= option: `%s'"), arg);
8443 next = strchr (arch, '+');
8444 if (next)
8445 *next++ = '\0';
8446 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8447 {
8448 if (strcmp (arch, cpu_arch [j].name) == 0)
8449 {
8450 /* Processor. */
8451 if (! cpu_arch[j].flags.bitfield.cpui386)
8452 continue;
8453
8454 cpu_arch_name = cpu_arch[j].name;
8455 cpu_sub_arch_name = NULL;
8456 cpu_arch_flags = cpu_arch[j].flags;
8457 cpu_arch_isa = cpu_arch[j].type;
8458 cpu_arch_isa_flags = cpu_arch[j].flags;
8459 if (!cpu_arch_tune_set)
8460 {
8461 cpu_arch_tune = cpu_arch_isa;
8462 cpu_arch_tune_flags = cpu_arch_isa_flags;
8463 }
8464 break;
8465 }
8466 else if (*cpu_arch [j].name == '.'
8467 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8468 {
8469 /* ISA entension. */
8470 i386_cpu_flags flags;
8471
8472 if (!cpu_arch[j].negated)
8473 flags = cpu_flags_or (cpu_arch_flags,
8474 cpu_arch[j].flags);
8475 else
8476 flags = cpu_flags_and_not (cpu_arch_flags,
8477 cpu_arch[j].flags);
8478 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8479 {
8480 if (cpu_sub_arch_name)
8481 {
8482 char *name = cpu_sub_arch_name;
8483 cpu_sub_arch_name = concat (name,
8484 cpu_arch[j].name,
8485 (const char *) NULL);
8486 free (name);
8487 }
8488 else
8489 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8490 cpu_arch_flags = flags;
8491 cpu_arch_isa_flags = flags;
8492 }
8493 break;
8494 }
8495 }
8496
8497 if (j >= ARRAY_SIZE (cpu_arch))
8498 as_fatal (_("invalid -march= option: `%s'"), arg);
8499
8500 arch = next;
8501 }
8502 while (next != NULL );
8503 break;
8504
8505 case OPTION_MTUNE:
8506 if (*arg == '.')
8507 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8508 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8509 {
8510 if (strcmp (arg, cpu_arch [j].name) == 0)
8511 {
8512 cpu_arch_tune_set = 1;
8513 cpu_arch_tune = cpu_arch [j].type;
8514 cpu_arch_tune_flags = cpu_arch[j].flags;
8515 break;
8516 }
8517 }
8518 if (j >= ARRAY_SIZE (cpu_arch))
8519 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8520 break;
8521
8522 case OPTION_MMNEMONIC:
8523 if (strcasecmp (arg, "att") == 0)
8524 intel_mnemonic = 0;
8525 else if (strcasecmp (arg, "intel") == 0)
8526 intel_mnemonic = 1;
8527 else
8528 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8529 break;
8530
8531 case OPTION_MSYNTAX:
8532 if (strcasecmp (arg, "att") == 0)
8533 intel_syntax = 0;
8534 else if (strcasecmp (arg, "intel") == 0)
8535 intel_syntax = 1;
8536 else
8537 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8538 break;
8539
8540 case OPTION_MINDEX_REG:
8541 allow_index_reg = 1;
8542 break;
8543
8544 case OPTION_MNAKED_REG:
8545 allow_naked_reg = 1;
8546 break;
8547
8548 case OPTION_MOLD_GCC:
8549 old_gcc = 1;
8550 break;
8551
8552 case OPTION_MSSE2AVX:
8553 sse2avx = 1;
8554 break;
8555
8556 case OPTION_MSSE_CHECK:
8557 if (strcasecmp (arg, "error") == 0)
8558 sse_check = sse_check_error;
8559 else if (strcasecmp (arg, "warning") == 0)
8560 sse_check = sse_check_warning;
8561 else if (strcasecmp (arg, "none") == 0)
8562 sse_check = sse_check_none;
8563 else
8564 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8565 break;
8566
8567 case OPTION_MAVXSCALAR:
8568 if (strcasecmp (arg, "128") == 0)
8569 avxscalar = vex128;
8570 else if (strcasecmp (arg, "256") == 0)
8571 avxscalar = vex256;
8572 else
8573 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8574 break;
8575
8576 default:
8577 return 0;
8578 }
8579 return 1;
8580 }
8581
8582 #define MESSAGE_TEMPLATE \
8583 " "
8584
8585 static void
8586 show_arch (FILE *stream, int ext, int check)
8587 {
8588 static char message[] = MESSAGE_TEMPLATE;
8589 char *start = message + 27;
8590 char *p;
8591 int size = sizeof (MESSAGE_TEMPLATE);
8592 int left;
8593 const char *name;
8594 int len;
8595 unsigned int j;
8596
8597 p = start;
8598 left = size - (start - message);
8599 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8600 {
8601 /* Should it be skipped? */
8602 if (cpu_arch [j].skip)
8603 continue;
8604
8605 name = cpu_arch [j].name;
8606 len = cpu_arch [j].len;
8607 if (*name == '.')
8608 {
8609 /* It is an extension. Skip if we aren't asked to show it. */
8610 if (ext)
8611 {
8612 name++;
8613 len--;
8614 }
8615 else
8616 continue;
8617 }
8618 else if (ext)
8619 {
8620 /* It is an processor. Skip if we show only extension. */
8621 continue;
8622 }
8623 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8624 {
8625 /* It is an impossible processor - skip. */
8626 continue;
8627 }
8628
8629 /* Reserve 2 spaces for ", " or ",\0" */
8630 left -= len + 2;
8631
8632 /* Check if there is any room. */
8633 if (left >= 0)
8634 {
8635 if (p != start)
8636 {
8637 *p++ = ',';
8638 *p++ = ' ';
8639 }
8640 p = mempcpy (p, name, len);
8641 }
8642 else
8643 {
8644 /* Output the current message now and start a new one. */
8645 *p++ = ',';
8646 *p = '\0';
8647 fprintf (stream, "%s\n", message);
8648 p = start;
8649 left = size - (start - message) - len - 2;
8650
8651 gas_assert (left >= 0);
8652
8653 p = mempcpy (p, name, len);
8654 }
8655 }
8656
8657 *p = '\0';
8658 fprintf (stream, "%s\n", message);
8659 }
8660
8661 void
8662 md_show_usage (FILE *stream)
8663 {
8664 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8665 fprintf (stream, _("\
8666 -Q ignored\n\
8667 -V print assembler version number\n\
8668 -k ignored\n"));
8669 #endif
8670 fprintf (stream, _("\
8671 -n Do not optimize code alignment\n\
8672 -q quieten some warnings\n"));
8673 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8674 fprintf (stream, _("\
8675 -s ignored\n"));
8676 #endif
8677 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8678 || defined (TE_PE) || defined (TE_PEP))
8679 fprintf (stream, _("\
8680 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8681 #endif
8682 #ifdef SVR4_COMMENT_CHARS
8683 fprintf (stream, _("\
8684 --divide do not treat `/' as a comment character\n"));
8685 #else
8686 fprintf (stream, _("\
8687 --divide ignored\n"));
8688 #endif
8689 fprintf (stream, _("\
8690 -march=CPU[,+EXTENSION...]\n\
8691 generate code for CPU and EXTENSION, CPU is one of:\n"));
8692 show_arch (stream, 0, 1);
8693 fprintf (stream, _("\
8694 EXTENSION is combination of:\n"));
8695 show_arch (stream, 1, 0);
8696 fprintf (stream, _("\
8697 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8698 show_arch (stream, 0, 0);
8699 fprintf (stream, _("\
8700 -msse2avx encode SSE instructions with VEX prefix\n"));
8701 fprintf (stream, _("\
8702 -msse-check=[none|error|warning]\n\
8703 check SSE instructions\n"));
8704 fprintf (stream, _("\
8705 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8706 length\n"));
8707 fprintf (stream, _("\
8708 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8709 fprintf (stream, _("\
8710 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8711 fprintf (stream, _("\
8712 -mindex-reg support pseudo index registers\n"));
8713 fprintf (stream, _("\
8714 -mnaked-reg don't require `%%' prefix for registers\n"));
8715 fprintf (stream, _("\
8716 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8717 }
8718
8719 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8720 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8721 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8722
8723 /* Pick the target format to use. */
8724
8725 const char *
8726 i386_target_format (void)
8727 {
8728 if (!strncmp (default_arch, "x86_64", 6))
8729 {
8730 update_code_flag (CODE_64BIT, 1);
8731 if (default_arch[6] == '\0')
8732 x86_elf_abi = X86_64_ABI;
8733 else
8734 x86_elf_abi = X86_64_X32_ABI;
8735 }
8736 else if (!strcmp (default_arch, "i386"))
8737 update_code_flag (CODE_32BIT, 1);
8738 else
8739 as_fatal (_("unknown architecture"));
8740
8741 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8742 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8743 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8744 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8745
8746 switch (OUTPUT_FLAVOR)
8747 {
8748 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8749 case bfd_target_aout_flavour:
8750 return AOUT_TARGET_FORMAT;
8751 #endif
8752 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8753 # if defined (TE_PE) || defined (TE_PEP)
8754 case bfd_target_coff_flavour:
8755 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8756 # elif defined (TE_GO32)
8757 case bfd_target_coff_flavour:
8758 return "coff-go32";
8759 # else
8760 case bfd_target_coff_flavour:
8761 return "coff-i386";
8762 # endif
8763 #endif
8764 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8765 case bfd_target_elf_flavour:
8766 {
8767 const char *format;
8768
8769 switch (x86_elf_abi)
8770 {
8771 default:
8772 format = ELF_TARGET_FORMAT;
8773 break;
8774 case X86_64_ABI:
8775 use_rela_relocations = 1;
8776 object_64bit = 1;
8777 format = ELF_TARGET_FORMAT64;
8778 break;
8779 case X86_64_X32_ABI:
8780 use_rela_relocations = 1;
8781 object_64bit = 1;
8782 disallow_64bit_reloc = 1;
8783 format = ELF_TARGET_FORMAT32;
8784 break;
8785 }
8786 if (cpu_arch_isa == PROCESSOR_L1OM)
8787 {
8788 if (x86_elf_abi != X86_64_ABI)
8789 as_fatal (_("Intel L1OM is 64bit only"));
8790 return ELF_TARGET_L1OM_FORMAT;
8791 }
8792 if (cpu_arch_isa == PROCESSOR_K1OM)
8793 {
8794 if (x86_elf_abi != X86_64_ABI)
8795 as_fatal (_("Intel K1OM is 64bit only"));
8796 return ELF_TARGET_K1OM_FORMAT;
8797 }
8798 else
8799 return format;
8800 }
8801 #endif
8802 #if defined (OBJ_MACH_O)
8803 case bfd_target_mach_o_flavour:
8804 if (flag_code == CODE_64BIT)
8805 {
8806 use_rela_relocations = 1;
8807 object_64bit = 1;
8808 return "mach-o-x86-64";
8809 }
8810 else
8811 return "mach-o-i386";
8812 #endif
8813 default:
8814 abort ();
8815 return NULL;
8816 }
8817 }
8818
8819 #endif /* OBJ_MAYBE_ more than one */
8820
8821 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8822 void
8823 i386_elf_emit_arch_note (void)
8824 {
8825 if (IS_ELF && cpu_arch_name != NULL)
8826 {
8827 char *p;
8828 asection *seg = now_seg;
8829 subsegT subseg = now_subseg;
8830 Elf_Internal_Note i_note;
8831 Elf_External_Note e_note;
8832 asection *note_secp;
8833 int len;
8834
8835 /* Create the .note section. */
8836 note_secp = subseg_new (".note", 0);
8837 bfd_set_section_flags (stdoutput,
8838 note_secp,
8839 SEC_HAS_CONTENTS | SEC_READONLY);
8840
8841 /* Process the arch string. */
8842 len = strlen (cpu_arch_name);
8843
8844 i_note.namesz = len + 1;
8845 i_note.descsz = 0;
8846 i_note.type = NT_ARCH;
8847 p = frag_more (sizeof (e_note.namesz));
8848 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8849 p = frag_more (sizeof (e_note.descsz));
8850 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8851 p = frag_more (sizeof (e_note.type));
8852 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8853 p = frag_more (len + 1);
8854 strcpy (p, cpu_arch_name);
8855
8856 frag_align (2, 0, 0);
8857
8858 subseg_set (seg, subseg);
8859 }
8860 }
8861 #endif
8862 \f
8863 symbolS *
8864 md_undefined_symbol (char *name)
8865 {
8866 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8867 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8868 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8869 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8870 {
8871 if (!GOT_symbol)
8872 {
8873 if (symbol_find (name))
8874 as_bad (_("GOT already in symbol table"));
8875 GOT_symbol = symbol_new (name, undefined_section,
8876 (valueT) 0, &zero_address_frag);
8877 };
8878 return GOT_symbol;
8879 }
8880 return 0;
8881 }
8882
8883 /* Round up a section size to the appropriate boundary. */
8884
8885 valueT
8886 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8887 {
8888 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8889 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8890 {
8891 /* For a.out, force the section size to be aligned. If we don't do
8892 this, BFD will align it for us, but it will not write out the
8893 final bytes of the section. This may be a bug in BFD, but it is
8894 easier to fix it here since that is how the other a.out targets
8895 work. */
8896 int align;
8897
8898 align = bfd_get_section_alignment (stdoutput, segment);
8899 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8900 }
8901 #endif
8902
8903 return size;
8904 }
8905
8906 /* On the i386, PC-relative offsets are relative to the start of the
8907 next instruction. That is, the address of the offset, plus its
8908 size, since the offset is always the last part of the insn. */
8909
8910 long
8911 md_pcrel_from (fixS *fixP)
8912 {
8913 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8914 }
8915
8916 #ifndef I386COFF
8917
8918 static void
8919 s_bss (int ignore ATTRIBUTE_UNUSED)
8920 {
8921 int temp;
8922
8923 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8924 if (IS_ELF)
8925 obj_elf_section_change_hook ();
8926 #endif
8927 temp = get_absolute_expression ();
8928 subseg_set (bss_section, (subsegT) temp);
8929 demand_empty_rest_of_line ();
8930 }
8931
8932 #endif
8933
8934 void
8935 i386_validate_fix (fixS *fixp)
8936 {
8937 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8938 {
8939 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8940 {
8941 if (!object_64bit)
8942 abort ();
8943 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8944 }
8945 else
8946 {
8947 if (!object_64bit)
8948 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8949 else
8950 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8951 }
8952 fixp->fx_subsy = 0;
8953 }
8954 }
8955
8956 arelent *
8957 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
8958 {
8959 arelent *rel;
8960 bfd_reloc_code_real_type code;
8961
8962 switch (fixp->fx_r_type)
8963 {
8964 case BFD_RELOC_X86_64_PLT32:
8965 case BFD_RELOC_X86_64_GOT32:
8966 case BFD_RELOC_X86_64_GOTPCREL:
8967 case BFD_RELOC_386_PLT32:
8968 case BFD_RELOC_386_GOT32:
8969 case BFD_RELOC_386_GOTOFF:
8970 case BFD_RELOC_386_GOTPC:
8971 case BFD_RELOC_386_TLS_GD:
8972 case BFD_RELOC_386_TLS_LDM:
8973 case BFD_RELOC_386_TLS_LDO_32:
8974 case BFD_RELOC_386_TLS_IE_32:
8975 case BFD_RELOC_386_TLS_IE:
8976 case BFD_RELOC_386_TLS_GOTIE:
8977 case BFD_RELOC_386_TLS_LE_32:
8978 case BFD_RELOC_386_TLS_LE:
8979 case BFD_RELOC_386_TLS_GOTDESC:
8980 case BFD_RELOC_386_TLS_DESC_CALL:
8981 case BFD_RELOC_X86_64_TLSGD:
8982 case BFD_RELOC_X86_64_TLSLD:
8983 case BFD_RELOC_X86_64_DTPOFF32:
8984 case BFD_RELOC_X86_64_DTPOFF64:
8985 case BFD_RELOC_X86_64_GOTTPOFF:
8986 case BFD_RELOC_X86_64_TPOFF32:
8987 case BFD_RELOC_X86_64_TPOFF64:
8988 case BFD_RELOC_X86_64_GOTOFF64:
8989 case BFD_RELOC_X86_64_GOTPC32:
8990 case BFD_RELOC_X86_64_GOT64:
8991 case BFD_RELOC_X86_64_GOTPCREL64:
8992 case BFD_RELOC_X86_64_GOTPC64:
8993 case BFD_RELOC_X86_64_GOTPLT64:
8994 case BFD_RELOC_X86_64_PLTOFF64:
8995 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8996 case BFD_RELOC_X86_64_TLSDESC_CALL:
8997 case BFD_RELOC_RVA:
8998 case BFD_RELOC_VTABLE_ENTRY:
8999 case BFD_RELOC_VTABLE_INHERIT:
9000 #ifdef TE_PE
9001 case BFD_RELOC_32_SECREL:
9002 #endif
9003 code = fixp->fx_r_type;
9004 break;
9005 case BFD_RELOC_X86_64_32S:
9006 if (!fixp->fx_pcrel)
9007 {
9008 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9009 code = fixp->fx_r_type;
9010 break;
9011 }
9012 default:
9013 if (fixp->fx_pcrel)
9014 {
9015 switch (fixp->fx_size)
9016 {
9017 default:
9018 as_bad_where (fixp->fx_file, fixp->fx_line,
9019 _("can not do %d byte pc-relative relocation"),
9020 fixp->fx_size);
9021 code = BFD_RELOC_32_PCREL;
9022 break;
9023 case 1: code = BFD_RELOC_8_PCREL; break;
9024 case 2: code = BFD_RELOC_16_PCREL; break;
9025 case 4: code = BFD_RELOC_32_PCREL; break;
9026 #ifdef BFD64
9027 case 8: code = BFD_RELOC_64_PCREL; break;
9028 #endif
9029 }
9030 }
9031 else
9032 {
9033 switch (fixp->fx_size)
9034 {
9035 default:
9036 as_bad_where (fixp->fx_file, fixp->fx_line,
9037 _("can not do %d byte relocation"),
9038 fixp->fx_size);
9039 code = BFD_RELOC_32;
9040 break;
9041 case 1: code = BFD_RELOC_8; break;
9042 case 2: code = BFD_RELOC_16; break;
9043 case 4: code = BFD_RELOC_32; break;
9044 #ifdef BFD64
9045 case 8: code = BFD_RELOC_64; break;
9046 #endif
9047 }
9048 }
9049 break;
9050 }
9051
9052 if ((code == BFD_RELOC_32
9053 || code == BFD_RELOC_32_PCREL
9054 || code == BFD_RELOC_X86_64_32S)
9055 && GOT_symbol
9056 && fixp->fx_addsy == GOT_symbol)
9057 {
9058 if (!object_64bit)
9059 code = BFD_RELOC_386_GOTPC;
9060 else
9061 code = BFD_RELOC_X86_64_GOTPC32;
9062 }
9063 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9064 && GOT_symbol
9065 && fixp->fx_addsy == GOT_symbol)
9066 {
9067 code = BFD_RELOC_X86_64_GOTPC64;
9068 }
9069
9070 rel = (arelent *) xmalloc (sizeof (arelent));
9071 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9072 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9073
9074 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9075
9076 if (!use_rela_relocations)
9077 {
9078 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9079 vtable entry to be used in the relocation's section offset. */
9080 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9081 rel->address = fixp->fx_offset;
9082 #if defined (OBJ_COFF) && defined (TE_PE)
9083 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9084 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9085 else
9086 #endif
9087 rel->addend = 0;
9088 }
9089 /* Use the rela in 64bit mode. */
9090 else
9091 {
9092 if (disallow_64bit_reloc)
9093 switch (code)
9094 {
9095 case BFD_RELOC_X86_64_DTPOFF64:
9096 case BFD_RELOC_X86_64_TPOFF64:
9097 case BFD_RELOC_64_PCREL:
9098 case BFD_RELOC_X86_64_GOTOFF64:
9099 case BFD_RELOC_X86_64_GOT64:
9100 case BFD_RELOC_X86_64_GOTPCREL64:
9101 case BFD_RELOC_X86_64_GOTPC64:
9102 case BFD_RELOC_X86_64_GOTPLT64:
9103 case BFD_RELOC_X86_64_PLTOFF64:
9104 as_bad_where (fixp->fx_file, fixp->fx_line,
9105 _("cannot represent relocation type %s in x32 mode"),
9106 bfd_get_reloc_code_name (code));
9107 break;
9108 default:
9109 break;
9110 }
9111
9112 if (!fixp->fx_pcrel)
9113 rel->addend = fixp->fx_offset;
9114 else
9115 switch (code)
9116 {
9117 case BFD_RELOC_X86_64_PLT32:
9118 case BFD_RELOC_X86_64_GOT32:
9119 case BFD_RELOC_X86_64_GOTPCREL:
9120 case BFD_RELOC_X86_64_TLSGD:
9121 case BFD_RELOC_X86_64_TLSLD:
9122 case BFD_RELOC_X86_64_GOTTPOFF:
9123 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9124 case BFD_RELOC_X86_64_TLSDESC_CALL:
9125 rel->addend = fixp->fx_offset - fixp->fx_size;
9126 break;
9127 default:
9128 rel->addend = (section->vma
9129 - fixp->fx_size
9130 + fixp->fx_addnumber
9131 + md_pcrel_from (fixp));
9132 break;
9133 }
9134 }
9135
9136 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9137 if (rel->howto == NULL)
9138 {
9139 as_bad_where (fixp->fx_file, fixp->fx_line,
9140 _("cannot represent relocation type %s"),
9141 bfd_get_reloc_code_name (code));
9142 /* Set howto to a garbage value so that we can keep going. */
9143 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9144 gas_assert (rel->howto != NULL);
9145 }
9146
9147 return rel;
9148 }
9149
9150 #include "tc-i386-intel.c"
9151
9152 void
9153 tc_x86_parse_to_dw2regnum (expressionS *exp)
9154 {
9155 int saved_naked_reg;
9156 char saved_register_dot;
9157
9158 saved_naked_reg = allow_naked_reg;
9159 allow_naked_reg = 1;
9160 saved_register_dot = register_chars['.'];
9161 register_chars['.'] = '.';
9162 allow_pseudo_reg = 1;
9163 expression_and_evaluate (exp);
9164 allow_pseudo_reg = 0;
9165 register_chars['.'] = saved_register_dot;
9166 allow_naked_reg = saved_naked_reg;
9167
9168 if (exp->X_op == O_register && exp->X_add_number >= 0)
9169 {
9170 if ((addressT) exp->X_add_number < i386_regtab_size)
9171 {
9172 exp->X_op = O_constant;
9173 exp->X_add_number = i386_regtab[exp->X_add_number]
9174 .dw2_regnum[flag_code >> 1];
9175 }
9176 else
9177 exp->X_op = O_illegal;
9178 }
9179 }
9180
9181 void
9182 tc_x86_frame_initial_instructions (void)
9183 {
9184 static unsigned int sp_regno[2];
9185
9186 if (!sp_regno[flag_code >> 1])
9187 {
9188 char *saved_input = input_line_pointer;
9189 char sp[][4] = {"esp", "rsp"};
9190 expressionS exp;
9191
9192 input_line_pointer = sp[flag_code >> 1];
9193 tc_x86_parse_to_dw2regnum (&exp);
9194 gas_assert (exp.X_op == O_constant);
9195 sp_regno[flag_code >> 1] = exp.X_add_number;
9196 input_line_pointer = saved_input;
9197 }
9198
9199 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9200 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9201 }
9202
9203 int
9204 x86_dwarf2_addr_size (void)
9205 {
9206 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9207 if (x86_elf_abi == X86_64_X32_ABI)
9208 return 4;
9209 #endif
9210 return bfd_arch_bits_per_address (stdoutput) / 8;
9211 }
9212
9213 int
9214 i386_elf_section_type (const char *str, size_t len)
9215 {
9216 if (flag_code == CODE_64BIT
9217 && len == sizeof ("unwind") - 1
9218 && strncmp (str, "unwind", 6) == 0)
9219 return SHT_X86_64_UNWIND;
9220
9221 return -1;
9222 }
9223
9224 #ifdef TE_SOLARIS
9225 void
9226 i386_solaris_fix_up_eh_frame (segT sec)
9227 {
9228 if (flag_code == CODE_64BIT)
9229 elf_section_type (sec) = SHT_X86_64_UNWIND;
9230 }
9231 #endif
9232
9233 #ifdef TE_PE
9234 void
9235 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9236 {
9237 expressionS exp;
9238
9239 exp.X_op = O_secrel;
9240 exp.X_add_symbol = symbol;
9241 exp.X_add_number = 0;
9242 emit_expr (&exp, size);
9243 }
9244 #endif
9245
9246 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9247 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9248
9249 bfd_vma
9250 x86_64_section_letter (int letter, char **ptr_msg)
9251 {
9252 if (flag_code == CODE_64BIT)
9253 {
9254 if (letter == 'l')
9255 return SHF_X86_64_LARGE;
9256
9257 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9258 }
9259 else
9260 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9261 return -1;
9262 }
9263
9264 bfd_vma
9265 x86_64_section_word (char *str, size_t len)
9266 {
9267 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9268 return SHF_X86_64_LARGE;
9269
9270 return -1;
9271 }
9272
9273 static void
9274 handle_large_common (int small ATTRIBUTE_UNUSED)
9275 {
9276 if (flag_code != CODE_64BIT)
9277 {
9278 s_comm_internal (0, elf_common_parse);
9279 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9280 }
9281 else
9282 {
9283 static segT lbss_section;
9284 asection *saved_com_section_ptr = elf_com_section_ptr;
9285 asection *saved_bss_section = bss_section;
9286
9287 if (lbss_section == NULL)
9288 {
9289 flagword applicable;
9290 segT seg = now_seg;
9291 subsegT subseg = now_subseg;
9292
9293 /* The .lbss section is for local .largecomm symbols. */
9294 lbss_section = subseg_new (".lbss", 0);
9295 applicable = bfd_applicable_section_flags (stdoutput);
9296 bfd_set_section_flags (stdoutput, lbss_section,
9297 applicable & SEC_ALLOC);
9298 seg_info (lbss_section)->bss = 1;
9299
9300 subseg_set (seg, subseg);
9301 }
9302
9303 elf_com_section_ptr = &_bfd_elf_large_com_section;
9304 bss_section = lbss_section;
9305
9306 s_comm_internal (0, elf_common_parse);
9307
9308 elf_com_section_ptr = saved_com_section_ptr;
9309 bss_section = saved_bss_section;
9310 }
9311 }
9312 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.253049 seconds and 4 git commands to generate.