* configure.tgt (i386-*-nacl*): Match it.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define LOCK_PREFIX 5
70 #define REX_PREFIX 6 /* must come last. */
71 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72
73 /* we define the syntax here (modulo base,index,scale syntax) */
74 #define REGISTER_PREFIX '%'
75 #define IMMEDIATE_PREFIX '$'
76 #define ABSOLUTE_PREFIX '*'
77
78 /* these are the instruction mnemonic suffixes in AT&T syntax or
79 memory operand size in Intel syntax. */
80 #define WORD_MNEM_SUFFIX 'w'
81 #define BYTE_MNEM_SUFFIX 'b'
82 #define SHORT_MNEM_SUFFIX 's'
83 #define LONG_MNEM_SUFFIX 'l'
84 #define QWORD_MNEM_SUFFIX 'q'
85 #define XMMWORD_MNEM_SUFFIX 'x'
86 #define YMMWORD_MNEM_SUFFIX 'y'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
88 in instructions. */
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90
91 #define END_OF_INSN '\0'
92
93 /*
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
98 END.
99 */
100 typedef struct
101 {
102 const insn_template *start;
103 const insn_template *end;
104 }
105 templates;
106
107 /* 386 operand encoding bytes: see 386 book for details of this. */
108 typedef struct
109 {
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
113 }
114 modrm_byte;
115
116 /* x86-64 extension prefix. */
117 typedef int rex_byte;
118
119 /* 386 opcode byte to code indirect addressing. */
120 typedef struct
121 {
122 unsigned base;
123 unsigned index;
124 unsigned scale;
125 }
126 sib_byte;
127
128 /* x86 arch names, types and features */
129 typedef struct
130 {
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 unsigned int negated; /* turn off indicated flags. */
137 }
138 arch_entry;
139
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_sse_check (int);
147 static void set_cpu_arch (int);
148 #ifdef TE_PE
149 static void pe_directive_secrel (int);
150 #endif
151 static void signed_cons (int);
152 static char *output_invalid (int c);
153 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
154 const char *);
155 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
156 const char *);
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS *);
160 static int i386_intel_parse_name (const char *, expressionS *);
161 static const reg_entry *parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template *match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry *build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS *, offsetT);
180 static void output_disp (fragS *, offsetT);
181 #ifndef I386COFF
182 static void s_bss (int);
183 #endif
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* VEX prefix. */
191 typedef struct
192 {
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198 } vex_prefix;
199
200 /* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203 union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210 enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported,
222 invalid_vsib_address,
223 unsupported_vector_index_register
224 };
225
226 struct _i386_insn
227 {
228 /* TM holds the template for the insn were currently assembling. */
229 insn_template tm;
230
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
233 char suffix;
234
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands;
237
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
240 operands. */
241 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
242
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types[MAX_OPERANDS];
246
247 /* Displacement expression, immediate expression, or register for each
248 operand. */
249 union i386_op op[MAX_OPERANDS];
250
251 /* Flags for operands. */
252 unsigned int flags[MAX_OPERANDS];
253 #define Operand_PCrel 1
254
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
257
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry *base_reg;
261 const reg_entry *index_reg;
262 unsigned int log2_scale_factor;
263
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry *seg[2];
267
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes;
271 unsigned char prefix[MAX_PREFIXES];
272
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
275 modrm_byte rm;
276 rex_byte rex;
277 sib_byte sib;
278 vex_prefix vex;
279
280 /* Swap operand in encoding. */
281 unsigned int swap_operand;
282
283 /* Prefer 8bit or 32bit displacement in encoding. */
284 enum
285 {
286 disp_encoding_default = 0,
287 disp_encoding_8bit,
288 disp_encoding_32bit
289 } disp_encoding;
290
291 /* Error message. */
292 enum i386_error error;
293 };
294
295 typedef struct _i386_insn i386_insn;
296
297 /* List of chars besides those in app.c:symbol_chars that can start an
298 operand. Used to prevent the scrubber eating vital white-space. */
299 const char extra_symbol_chars[] = "*%-(["
300 #ifdef LEX_AT
301 "@"
302 #endif
303 #ifdef LEX_QM
304 "?"
305 #endif
306 ;
307
308 #if (defined (TE_I386AIX) \
309 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
310 && !defined (TE_GNU) \
311 && !defined (TE_LINUX) \
312 && !defined (TE_NACL) \
313 && !defined (TE_NETWARE) \
314 && !defined (TE_FreeBSD) \
315 && !defined (TE_DragonFly) \
316 && !defined (TE_NetBSD)))
317 /* This array holds the chars that always start a comment. If the
318 pre-processor is disabled, these aren't very useful. The option
319 --divide will remove '/' from this list. */
320 const char *i386_comment_chars = "#/";
321 #define SVR4_COMMENT_CHARS 1
322 #define PREFIX_SEPARATOR '\\'
323
324 #else
325 const char *i386_comment_chars = "#";
326 #define PREFIX_SEPARATOR '/'
327 #endif
328
329 /* This array holds the chars that only start a comment at the beginning of
330 a line. If the line seems to have the form '# 123 filename'
331 .line and .file directives will appear in the pre-processed output.
332 Note that input_file.c hand checks for '#' at the beginning of the
333 first line of the input file. This is because the compiler outputs
334 #NO_APP at the beginning of its output.
335 Also note that comments started like this one will always work if
336 '/' isn't otherwise defined. */
337 const char line_comment_chars[] = "#/";
338
339 const char line_separator_chars[] = ";";
340
341 /* Chars that can be used to separate mant from exp in floating point
342 nums. */
343 const char EXP_CHARS[] = "eE";
344
345 /* Chars that mean this number is a floating point constant
346 As in 0f12.456
347 or 0d1.2345e12. */
348 const char FLT_CHARS[] = "fFdDxX";
349
350 /* Tables for lexical analysis. */
351 static char mnemonic_chars[256];
352 static char register_chars[256];
353 static char operand_chars[256];
354 static char identifier_chars[256];
355 static char digit_chars[256];
356
357 /* Lexical macros. */
358 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
359 #define is_operand_char(x) (operand_chars[(unsigned char) x])
360 #define is_register_char(x) (register_chars[(unsigned char) x])
361 #define is_space_char(x) ((x) == ' ')
362 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
363 #define is_digit_char(x) (digit_chars[(unsigned char) x])
364
365 /* All non-digit non-letter characters that may occur in an operand. */
366 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
367
368 /* md_assemble() always leaves the strings it's passed unaltered. To
369 effect this we maintain a stack of saved characters that we've smashed
370 with '\0's (indicating end of strings for various sub-fields of the
371 assembler instruction). */
372 static char save_stack[32];
373 static char *save_stack_p;
374 #define END_STRING_AND_SAVE(s) \
375 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
376 #define RESTORE_END_STRING(s) \
377 do { *(s) = *--save_stack_p; } while (0)
378
379 /* The instruction we're assembling. */
380 static i386_insn i;
381
382 /* Possible templates for current insn. */
383 static const templates *current_templates;
384
385 /* Per instruction expressionS buffers: max displacements & immediates. */
386 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
387 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
388
389 /* Current operand we are working on. */
390 static int this_operand = -1;
391
392 /* We support four different modes. FLAG_CODE variable is used to distinguish
393 these. */
394
395 enum flag_code {
396 CODE_32BIT,
397 CODE_16BIT,
398 CODE_64BIT };
399
400 static enum flag_code flag_code;
401 static unsigned int object_64bit;
402 static unsigned int disallow_64bit_reloc;
403 static int use_rela_relocations = 0;
404
405 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
406 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
407 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
408
409 /* The ELF ABI to use. */
410 enum x86_elf_abi
411 {
412 I386_ABI,
413 X86_64_ABI,
414 X86_64_X32_ABI
415 };
416
417 static enum x86_elf_abi x86_elf_abi = I386_ABI;
418 #endif
419
420 /* The names used to print error messages. */
421 static const char *flag_code_names[] =
422 {
423 "32",
424 "16",
425 "64"
426 };
427
428 /* 1 for intel syntax,
429 0 if att syntax. */
430 static int intel_syntax = 0;
431
432 /* 1 for intel mnemonic,
433 0 if att mnemonic. */
434 static int intel_mnemonic = !SYSV386_COMPAT;
435
436 /* 1 if support old (<= 2.8.1) versions of gcc. */
437 static int old_gcc = OLDGCC_COMPAT;
438
439 /* 1 if pseudo registers are permitted. */
440 static int allow_pseudo_reg = 0;
441
442 /* 1 if register prefix % not required. */
443 static int allow_naked_reg = 0;
444
445 /* 1 if pseudo index register, eiz/riz, is allowed . */
446 static int allow_index_reg = 0;
447
448 static enum
449 {
450 sse_check_none = 0,
451 sse_check_warning,
452 sse_check_error
453 }
454 sse_check;
455
456 /* Register prefix used for error message. */
457 static const char *register_prefix = "%";
458
459 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
460 leave, push, and pop instructions so that gcc has the same stack
461 frame as in 32 bit mode. */
462 static char stackop_size = '\0';
463
464 /* Non-zero to optimize code alignment. */
465 int optimize_align_code = 1;
466
467 /* Non-zero to quieten some warnings. */
468 static int quiet_warnings = 0;
469
470 /* CPU name. */
471 static const char *cpu_arch_name = NULL;
472 static char *cpu_sub_arch_name = NULL;
473
474 /* CPU feature flags. */
475 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
476
477 /* If we have selected a cpu we are generating instructions for. */
478 static int cpu_arch_tune_set = 0;
479
480 /* Cpu we are generating instructions for. */
481 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
482
483 /* CPU feature flags of cpu we are generating instructions for. */
484 static i386_cpu_flags cpu_arch_tune_flags;
485
486 /* CPU instruction set architecture used. */
487 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
488
489 /* CPU feature flags of instruction set architecture used. */
490 i386_cpu_flags cpu_arch_isa_flags;
491
492 /* If set, conditional jumps are not automatically promoted to handle
493 larger than a byte offset. */
494 static unsigned int no_cond_jump_promotion = 0;
495
496 /* Encode SSE instructions with VEX prefix. */
497 static unsigned int sse2avx;
498
499 /* Encode scalar AVX instructions with specific vector length. */
500 static enum
501 {
502 vex128 = 0,
503 vex256
504 } avxscalar;
505
506 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
507 static symbolS *GOT_symbol;
508
509 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
510 unsigned int x86_dwarf2_return_column;
511
512 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
513 int x86_cie_data_alignment;
514
515 /* Interface to relax_segment.
516 There are 3 major relax states for 386 jump insns because the
517 different types of jumps add different sizes to frags when we're
518 figuring out what sort of jump to choose to reach a given label. */
519
520 /* Types. */
521 #define UNCOND_JUMP 0
522 #define COND_JUMP 1
523 #define COND_JUMP86 2
524
525 /* Sizes. */
526 #define CODE16 1
527 #define SMALL 0
528 #define SMALL16 (SMALL | CODE16)
529 #define BIG 2
530 #define BIG16 (BIG | CODE16)
531
532 #ifndef INLINE
533 #ifdef __GNUC__
534 #define INLINE __inline__
535 #else
536 #define INLINE
537 #endif
538 #endif
539
540 #define ENCODE_RELAX_STATE(type, size) \
541 ((relax_substateT) (((type) << 2) | (size)))
542 #define TYPE_FROM_RELAX_STATE(s) \
543 ((s) >> 2)
544 #define DISP_SIZE_FROM_RELAX_STATE(s) \
545 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
546
547 /* This table is used by relax_frag to promote short jumps to long
548 ones where necessary. SMALL (short) jumps may be promoted to BIG
549 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
550 don't allow a short jump in a 32 bit code segment to be promoted to
551 a 16 bit offset jump because it's slower (requires data size
552 prefix), and doesn't work, unless the destination is in the bottom
553 64k of the code segment (The top 16 bits of eip are zeroed). */
554
555 const relax_typeS md_relax_table[] =
556 {
557 /* The fields are:
558 1) most positive reach of this state,
559 2) most negative reach of this state,
560 3) how many bytes this mode will have in the variable part of the frag
561 4) which index into the table to try if we can't fit into this one. */
562
563 /* UNCOND_JUMP states. */
564 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
565 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
566 /* dword jmp adds 4 bytes to frag:
567 0 extra opcode bytes, 4 displacement bytes. */
568 {0, 0, 4, 0},
569 /* word jmp adds 2 byte2 to frag:
570 0 extra opcode bytes, 2 displacement bytes. */
571 {0, 0, 2, 0},
572
573 /* COND_JUMP states. */
574 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
575 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
576 /* dword conditionals adds 5 bytes to frag:
577 1 extra opcode byte, 4 displacement bytes. */
578 {0, 0, 5, 0},
579 /* word conditionals add 3 bytes to frag:
580 1 extra opcode byte, 2 displacement bytes. */
581 {0, 0, 3, 0},
582
583 /* COND_JUMP86 states. */
584 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
585 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
586 /* dword conditionals adds 5 bytes to frag:
587 1 extra opcode byte, 4 displacement bytes. */
588 {0, 0, 5, 0},
589 /* word conditionals add 4 bytes to frag:
590 1 displacement byte and a 3 byte long branch insn. */
591 {0, 0, 4, 0}
592 };
593
594 static const arch_entry cpu_arch[] =
595 {
596 /* Do not replace the first two entries - i386_target_format()
597 relies on them being there in this order. */
598 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
599 CPU_GENERIC32_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
601 CPU_GENERIC64_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
603 CPU_NONE_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
605 CPU_I186_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
607 CPU_I286_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
609 CPU_I386_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
611 CPU_I486_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
613 CPU_I586_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
615 CPU_I686_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
619 CPU_PENTIUMPRO_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
621 CPU_P2_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
623 CPU_P3_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
625 CPU_P4_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
627 CPU_CORE_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
629 CPU_NOCONA_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
631 CPU_CORE_FLAGS, 1, 0 },
632 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
633 CPU_CORE_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
635 CPU_CORE2_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
637 CPU_CORE2_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
639 CPU_COREI7_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
641 CPU_L1OM_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
643 CPU_K1OM_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
645 CPU_K6_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
647 CPU_K6_2_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
649 CPU_ATHLON_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
651 CPU_K8_FLAGS, 1, 0 },
652 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
653 CPU_K8_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
657 CPU_AMDFAM10_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
659 CPU_BDVER1_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
661 CPU_BDVER2_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
663 CPU_8087_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
665 CPU_287_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
667 CPU_387_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
669 CPU_ANY87_FLAGS, 0, 1 },
670 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
671 CPU_MMX_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
673 CPU_3DNOWA_FLAGS, 0, 1 },
674 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
675 CPU_SSE_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
677 CPU_SSE2_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
679 CPU_SSE3_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
681 CPU_SSSE3_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
683 CPU_SSE4_1_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
685 CPU_SSE4_2_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
687 CPU_SSE4_2_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
689 CPU_ANY_SSE_FLAGS, 0, 1 },
690 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
691 CPU_AVX_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
693 CPU_AVX2_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
695 CPU_ANY_AVX_FLAGS, 0, 1 },
696 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
697 CPU_VMX_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
699 CPU_VMFUNC_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
701 CPU_SMX_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
703 CPU_XSAVE_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
705 CPU_XSAVEOPT_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
707 CPU_AES_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
709 CPU_PCLMUL_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
711 CPU_PCLMUL_FLAGS, 1, 0 },
712 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
713 CPU_FSGSBASE_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
715 CPU_RDRND_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
717 CPU_F16C_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
719 CPU_BMI2_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
721 CPU_FMA_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
723 CPU_FMA4_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
725 CPU_XOP_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
727 CPU_LWP_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
729 CPU_MOVBE_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
731 CPU_EPT_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
733 CPU_LZCNT_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
735 CPU_INVPCID_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
737 CPU_CLFLUSH_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
739 CPU_NOP_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
741 CPU_SYSCALL_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
743 CPU_RDTSCP_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
745 CPU_3DNOW_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
747 CPU_3DNOWA_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
749 CPU_PADLOCK_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
751 CPU_SVME_FLAGS, 1, 0 },
752 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
753 CPU_SVME_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
755 CPU_SSE4A_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
757 CPU_ABM_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
759 CPU_BMI_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
761 CPU_TBM_FLAGS, 0, 0 },
762 };
763
764 #ifdef I386COFF
765 /* Like s_lcomm_internal in gas/read.c but the alignment string
766 is allowed to be optional. */
767
768 static symbolS *
769 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
770 {
771 addressT align = 0;
772
773 SKIP_WHITESPACE ();
774
775 if (needs_align
776 && *input_line_pointer == ',')
777 {
778 align = parse_align (needs_align - 1);
779
780 if (align == (addressT) -1)
781 return NULL;
782 }
783 else
784 {
785 if (size >= 8)
786 align = 3;
787 else if (size >= 4)
788 align = 2;
789 else if (size >= 2)
790 align = 1;
791 else
792 align = 0;
793 }
794
795 bss_alloc (symbolP, size, align);
796 return symbolP;
797 }
798
799 static void
800 pe_lcomm (int needs_align)
801 {
802 s_comm_internal (needs_align * 2, pe_lcomm_internal);
803 }
804 #endif
805
806 const pseudo_typeS md_pseudo_table[] =
807 {
808 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
809 {"align", s_align_bytes, 0},
810 #else
811 {"align", s_align_ptwo, 0},
812 #endif
813 {"arch", set_cpu_arch, 0},
814 #ifndef I386COFF
815 {"bss", s_bss, 0},
816 #else
817 {"lcomm", pe_lcomm, 1},
818 #endif
819 {"ffloat", float_cons, 'f'},
820 {"dfloat", float_cons, 'd'},
821 {"tfloat", float_cons, 'x'},
822 {"value", cons, 2},
823 {"slong", signed_cons, 4},
824 {"noopt", s_ignore, 0},
825 {"optim", s_ignore, 0},
826 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
827 {"code16", set_code_flag, CODE_16BIT},
828 {"code32", set_code_flag, CODE_32BIT},
829 {"code64", set_code_flag, CODE_64BIT},
830 {"intel_syntax", set_intel_syntax, 1},
831 {"att_syntax", set_intel_syntax, 0},
832 {"intel_mnemonic", set_intel_mnemonic, 1},
833 {"att_mnemonic", set_intel_mnemonic, 0},
834 {"allow_index_reg", set_allow_index_reg, 1},
835 {"disallow_index_reg", set_allow_index_reg, 0},
836 {"sse_check", set_sse_check, 0},
837 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
838 {"largecomm", handle_large_common, 0},
839 #else
840 {"file", (void (*) (int)) dwarf2_directive_file, 0},
841 {"loc", dwarf2_directive_loc, 0},
842 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
843 #endif
844 #ifdef TE_PE
845 {"secrel32", pe_directive_secrel, 0},
846 #endif
847 {0, 0, 0}
848 };
849
850 /* For interface with expression (). */
851 extern char *input_line_pointer;
852
853 /* Hash table for instruction mnemonic lookup. */
854 static struct hash_control *op_hash;
855
856 /* Hash table for register lookup. */
857 static struct hash_control *reg_hash;
858 \f
859 void
860 i386_align_code (fragS *fragP, int count)
861 {
862 /* Various efficient no-op patterns for aligning code labels.
863 Note: Don't try to assemble the instructions in the comments.
864 0L and 0w are not legal. */
865 static const char f32_1[] =
866 {0x90}; /* nop */
867 static const char f32_2[] =
868 {0x66,0x90}; /* xchg %ax,%ax */
869 static const char f32_3[] =
870 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
871 static const char f32_4[] =
872 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
873 static const char f32_5[] =
874 {0x90, /* nop */
875 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
876 static const char f32_6[] =
877 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
878 static const char f32_7[] =
879 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
880 static const char f32_8[] =
881 {0x90, /* nop */
882 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
883 static const char f32_9[] =
884 {0x89,0xf6, /* movl %esi,%esi */
885 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
886 static const char f32_10[] =
887 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
888 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
889 static const char f32_11[] =
890 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
891 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
892 static const char f32_12[] =
893 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
894 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
895 static const char f32_13[] =
896 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
897 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
898 static const char f32_14[] =
899 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
900 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
901 static const char f16_3[] =
902 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
903 static const char f16_4[] =
904 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
905 static const char f16_5[] =
906 {0x90, /* nop */
907 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
908 static const char f16_6[] =
909 {0x89,0xf6, /* mov %si,%si */
910 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
911 static const char f16_7[] =
912 {0x8d,0x74,0x00, /* lea 0(%si),%si */
913 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
914 static const char f16_8[] =
915 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
916 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
917 static const char jump_31[] =
918 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
919 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
920 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
921 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
922 static const char *const f32_patt[] = {
923 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
924 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
925 };
926 static const char *const f16_patt[] = {
927 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
928 };
929 /* nopl (%[re]ax) */
930 static const char alt_3[] =
931 {0x0f,0x1f,0x00};
932 /* nopl 0(%[re]ax) */
933 static const char alt_4[] =
934 {0x0f,0x1f,0x40,0x00};
935 /* nopl 0(%[re]ax,%[re]ax,1) */
936 static const char alt_5[] =
937 {0x0f,0x1f,0x44,0x00,0x00};
938 /* nopw 0(%[re]ax,%[re]ax,1) */
939 static const char alt_6[] =
940 {0x66,0x0f,0x1f,0x44,0x00,0x00};
941 /* nopl 0L(%[re]ax) */
942 static const char alt_7[] =
943 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
944 /* nopl 0L(%[re]ax,%[re]ax,1) */
945 static const char alt_8[] =
946 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
947 /* nopw 0L(%[re]ax,%[re]ax,1) */
948 static const char alt_9[] =
949 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
950 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
951 static const char alt_10[] =
952 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
953 /* data16
954 nopw %cs:0L(%[re]ax,%[re]ax,1) */
955 static const char alt_long_11[] =
956 {0x66,
957 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
958 /* data16
959 data16
960 nopw %cs:0L(%[re]ax,%[re]ax,1) */
961 static const char alt_long_12[] =
962 {0x66,
963 0x66,
964 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
965 /* data16
966 data16
967 data16
968 nopw %cs:0L(%[re]ax,%[re]ax,1) */
969 static const char alt_long_13[] =
970 {0x66,
971 0x66,
972 0x66,
973 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
974 /* data16
975 data16
976 data16
977 data16
978 nopw %cs:0L(%[re]ax,%[re]ax,1) */
979 static const char alt_long_14[] =
980 {0x66,
981 0x66,
982 0x66,
983 0x66,
984 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
985 /* data16
986 data16
987 data16
988 data16
989 data16
990 nopw %cs:0L(%[re]ax,%[re]ax,1) */
991 static const char alt_long_15[] =
992 {0x66,
993 0x66,
994 0x66,
995 0x66,
996 0x66,
997 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
998 /* nopl 0(%[re]ax,%[re]ax,1)
999 nopw 0(%[re]ax,%[re]ax,1) */
1000 static const char alt_short_11[] =
1001 {0x0f,0x1f,0x44,0x00,0x00,
1002 0x66,0x0f,0x1f,0x44,0x00,0x00};
1003 /* nopw 0(%[re]ax,%[re]ax,1)
1004 nopw 0(%[re]ax,%[re]ax,1) */
1005 static const char alt_short_12[] =
1006 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1007 0x66,0x0f,0x1f,0x44,0x00,0x00};
1008 /* nopw 0(%[re]ax,%[re]ax,1)
1009 nopl 0L(%[re]ax) */
1010 static const char alt_short_13[] =
1011 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1012 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1013 /* nopl 0L(%[re]ax)
1014 nopl 0L(%[re]ax) */
1015 static const char alt_short_14[] =
1016 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1017 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1018 /* nopl 0L(%[re]ax)
1019 nopl 0L(%[re]ax,%[re]ax,1) */
1020 static const char alt_short_15[] =
1021 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1022 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1023 static const char *const alt_short_patt[] = {
1024 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1025 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1026 alt_short_14, alt_short_15
1027 };
1028 static const char *const alt_long_patt[] = {
1029 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1030 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1031 alt_long_14, alt_long_15
1032 };
1033
1034 /* Only align for at least a positive non-zero boundary. */
1035 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1036 return;
1037
1038 /* We need to decide which NOP sequence to use for 32bit and
1039 64bit. When -mtune= is used:
1040
1041 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1042 PROCESSOR_GENERIC32, f32_patt will be used.
1043 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1044 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1045 PROCESSOR_GENERIC64, alt_long_patt will be used.
1046 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1047 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1048 will be used.
1049
1050 When -mtune= isn't used, alt_long_patt will be used if
1051 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1052 be used.
1053
1054 When -march= or .arch is used, we can't use anything beyond
1055 cpu_arch_isa_flags. */
1056
1057 if (flag_code == CODE_16BIT)
1058 {
1059 if (count > 8)
1060 {
1061 memcpy (fragP->fr_literal + fragP->fr_fix,
1062 jump_31, count);
1063 /* Adjust jump offset. */
1064 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1065 }
1066 else
1067 memcpy (fragP->fr_literal + fragP->fr_fix,
1068 f16_patt[count - 1], count);
1069 }
1070 else
1071 {
1072 const char *const *patt = NULL;
1073
1074 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1075 {
1076 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1077 switch (cpu_arch_tune)
1078 {
1079 case PROCESSOR_UNKNOWN:
1080 /* We use cpu_arch_isa_flags to check if we SHOULD
1081 optimize with nops. */
1082 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1083 patt = alt_long_patt;
1084 else
1085 patt = f32_patt;
1086 break;
1087 case PROCESSOR_PENTIUM4:
1088 case PROCESSOR_NOCONA:
1089 case PROCESSOR_CORE:
1090 case PROCESSOR_CORE2:
1091 case PROCESSOR_COREI7:
1092 case PROCESSOR_L1OM:
1093 case PROCESSOR_K1OM:
1094 case PROCESSOR_GENERIC64:
1095 patt = alt_long_patt;
1096 break;
1097 case PROCESSOR_K6:
1098 case PROCESSOR_ATHLON:
1099 case PROCESSOR_K8:
1100 case PROCESSOR_AMDFAM10:
1101 case PROCESSOR_BD:
1102 patt = alt_short_patt;
1103 break;
1104 case PROCESSOR_I386:
1105 case PROCESSOR_I486:
1106 case PROCESSOR_PENTIUM:
1107 case PROCESSOR_PENTIUMPRO:
1108 case PROCESSOR_GENERIC32:
1109 patt = f32_patt;
1110 break;
1111 }
1112 }
1113 else
1114 {
1115 switch (fragP->tc_frag_data.tune)
1116 {
1117 case PROCESSOR_UNKNOWN:
1118 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1119 PROCESSOR_UNKNOWN. */
1120 abort ();
1121 break;
1122
1123 case PROCESSOR_I386:
1124 case PROCESSOR_I486:
1125 case PROCESSOR_PENTIUM:
1126 case PROCESSOR_K6:
1127 case PROCESSOR_ATHLON:
1128 case PROCESSOR_K8:
1129 case PROCESSOR_AMDFAM10:
1130 case PROCESSOR_BD:
1131 case PROCESSOR_GENERIC32:
1132 /* We use cpu_arch_isa_flags to check if we CAN optimize
1133 with nops. */
1134 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1135 patt = alt_short_patt;
1136 else
1137 patt = f32_patt;
1138 break;
1139 case PROCESSOR_PENTIUMPRO:
1140 case PROCESSOR_PENTIUM4:
1141 case PROCESSOR_NOCONA:
1142 case PROCESSOR_CORE:
1143 case PROCESSOR_CORE2:
1144 case PROCESSOR_COREI7:
1145 case PROCESSOR_L1OM:
1146 case PROCESSOR_K1OM:
1147 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1148 patt = alt_long_patt;
1149 else
1150 patt = f32_patt;
1151 break;
1152 case PROCESSOR_GENERIC64:
1153 patt = alt_long_patt;
1154 break;
1155 }
1156 }
1157
1158 if (patt == f32_patt)
1159 {
1160 /* If the padding is less than 15 bytes, we use the normal
1161 ones. Otherwise, we use a jump instruction and adjust
1162 its offset. */
1163 int limit;
1164
1165 /* For 64bit, the limit is 3 bytes. */
1166 if (flag_code == CODE_64BIT
1167 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1168 limit = 3;
1169 else
1170 limit = 15;
1171 if (count < limit)
1172 memcpy (fragP->fr_literal + fragP->fr_fix,
1173 patt[count - 1], count);
1174 else
1175 {
1176 memcpy (fragP->fr_literal + fragP->fr_fix,
1177 jump_31, count);
1178 /* Adjust jump offset. */
1179 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1180 }
1181 }
1182 else
1183 {
1184 /* Maximum length of an instruction is 15 byte. If the
1185 padding is greater than 15 bytes and we don't use jump,
1186 we have to break it into smaller pieces. */
1187 int padding = count;
1188 while (padding > 15)
1189 {
1190 padding -= 15;
1191 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1192 patt [14], 15);
1193 }
1194
1195 if (padding)
1196 memcpy (fragP->fr_literal + fragP->fr_fix,
1197 patt [padding - 1], padding);
1198 }
1199 }
1200 fragP->fr_var = count;
1201 }
1202
1203 static INLINE int
1204 operand_type_all_zero (const union i386_operand_type *x)
1205 {
1206 switch (ARRAY_SIZE(x->array))
1207 {
1208 case 3:
1209 if (x->array[2])
1210 return 0;
1211 case 2:
1212 if (x->array[1])
1213 return 0;
1214 case 1:
1215 return !x->array[0];
1216 default:
1217 abort ();
1218 }
1219 }
1220
1221 static INLINE void
1222 operand_type_set (union i386_operand_type *x, unsigned int v)
1223 {
1224 switch (ARRAY_SIZE(x->array))
1225 {
1226 case 3:
1227 x->array[2] = v;
1228 case 2:
1229 x->array[1] = v;
1230 case 1:
1231 x->array[0] = v;
1232 break;
1233 default:
1234 abort ();
1235 }
1236 }
1237
1238 static INLINE int
1239 operand_type_equal (const union i386_operand_type *x,
1240 const union i386_operand_type *y)
1241 {
1242 switch (ARRAY_SIZE(x->array))
1243 {
1244 case 3:
1245 if (x->array[2] != y->array[2])
1246 return 0;
1247 case 2:
1248 if (x->array[1] != y->array[1])
1249 return 0;
1250 case 1:
1251 return x->array[0] == y->array[0];
1252 break;
1253 default:
1254 abort ();
1255 }
1256 }
1257
1258 static INLINE int
1259 cpu_flags_all_zero (const union i386_cpu_flags *x)
1260 {
1261 switch (ARRAY_SIZE(x->array))
1262 {
1263 case 3:
1264 if (x->array[2])
1265 return 0;
1266 case 2:
1267 if (x->array[1])
1268 return 0;
1269 case 1:
1270 return !x->array[0];
1271 default:
1272 abort ();
1273 }
1274 }
1275
1276 static INLINE void
1277 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1278 {
1279 switch (ARRAY_SIZE(x->array))
1280 {
1281 case 3:
1282 x->array[2] = v;
1283 case 2:
1284 x->array[1] = v;
1285 case 1:
1286 x->array[0] = v;
1287 break;
1288 default:
1289 abort ();
1290 }
1291 }
1292
1293 static INLINE int
1294 cpu_flags_equal (const union i386_cpu_flags *x,
1295 const union i386_cpu_flags *y)
1296 {
1297 switch (ARRAY_SIZE(x->array))
1298 {
1299 case 3:
1300 if (x->array[2] != y->array[2])
1301 return 0;
1302 case 2:
1303 if (x->array[1] != y->array[1])
1304 return 0;
1305 case 1:
1306 return x->array[0] == y->array[0];
1307 break;
1308 default:
1309 abort ();
1310 }
1311 }
1312
1313 static INLINE int
1314 cpu_flags_check_cpu64 (i386_cpu_flags f)
1315 {
1316 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1317 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1318 }
1319
1320 static INLINE i386_cpu_flags
1321 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1322 {
1323 switch (ARRAY_SIZE (x.array))
1324 {
1325 case 3:
1326 x.array [2] &= y.array [2];
1327 case 2:
1328 x.array [1] &= y.array [1];
1329 case 1:
1330 x.array [0] &= y.array [0];
1331 break;
1332 default:
1333 abort ();
1334 }
1335 return x;
1336 }
1337
1338 static INLINE i386_cpu_flags
1339 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1340 {
1341 switch (ARRAY_SIZE (x.array))
1342 {
1343 case 3:
1344 x.array [2] |= y.array [2];
1345 case 2:
1346 x.array [1] |= y.array [1];
1347 case 1:
1348 x.array [0] |= y.array [0];
1349 break;
1350 default:
1351 abort ();
1352 }
1353 return x;
1354 }
1355
1356 static INLINE i386_cpu_flags
1357 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1358 {
1359 switch (ARRAY_SIZE (x.array))
1360 {
1361 case 3:
1362 x.array [2] &= ~y.array [2];
1363 case 2:
1364 x.array [1] &= ~y.array [1];
1365 case 1:
1366 x.array [0] &= ~y.array [0];
1367 break;
1368 default:
1369 abort ();
1370 }
1371 return x;
1372 }
1373
1374 #define CPU_FLAGS_ARCH_MATCH 0x1
1375 #define CPU_FLAGS_64BIT_MATCH 0x2
1376 #define CPU_FLAGS_AES_MATCH 0x4
1377 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1378 #define CPU_FLAGS_AVX_MATCH 0x10
1379
1380 #define CPU_FLAGS_32BIT_MATCH \
1381 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1382 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1383 #define CPU_FLAGS_PERFECT_MATCH \
1384 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1385
1386 /* Return CPU flags match bits. */
1387
1388 static int
1389 cpu_flags_match (const insn_template *t)
1390 {
1391 i386_cpu_flags x = t->cpu_flags;
1392 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1393
1394 x.bitfield.cpu64 = 0;
1395 x.bitfield.cpuno64 = 0;
1396
1397 if (cpu_flags_all_zero (&x))
1398 {
1399 /* This instruction is available on all archs. */
1400 match |= CPU_FLAGS_32BIT_MATCH;
1401 }
1402 else
1403 {
1404 /* This instruction is available only on some archs. */
1405 i386_cpu_flags cpu = cpu_arch_flags;
1406
1407 cpu.bitfield.cpu64 = 0;
1408 cpu.bitfield.cpuno64 = 0;
1409 cpu = cpu_flags_and (x, cpu);
1410 if (!cpu_flags_all_zero (&cpu))
1411 {
1412 if (x.bitfield.cpuavx)
1413 {
1414 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1415 if (cpu.bitfield.cpuavx)
1416 {
1417 /* Check SSE2AVX. */
1418 if (!t->opcode_modifier.sse2avx|| sse2avx)
1419 {
1420 match |= (CPU_FLAGS_ARCH_MATCH
1421 | CPU_FLAGS_AVX_MATCH);
1422 /* Check AES. */
1423 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1424 match |= CPU_FLAGS_AES_MATCH;
1425 /* Check PCLMUL. */
1426 if (!x.bitfield.cpupclmul
1427 || cpu.bitfield.cpupclmul)
1428 match |= CPU_FLAGS_PCLMUL_MATCH;
1429 }
1430 }
1431 else
1432 match |= CPU_FLAGS_ARCH_MATCH;
1433 }
1434 else
1435 match |= CPU_FLAGS_32BIT_MATCH;
1436 }
1437 }
1438 return match;
1439 }
1440
1441 static INLINE i386_operand_type
1442 operand_type_and (i386_operand_type x, i386_operand_type y)
1443 {
1444 switch (ARRAY_SIZE (x.array))
1445 {
1446 case 3:
1447 x.array [2] &= y.array [2];
1448 case 2:
1449 x.array [1] &= y.array [1];
1450 case 1:
1451 x.array [0] &= y.array [0];
1452 break;
1453 default:
1454 abort ();
1455 }
1456 return x;
1457 }
1458
1459 static INLINE i386_operand_type
1460 operand_type_or (i386_operand_type x, i386_operand_type y)
1461 {
1462 switch (ARRAY_SIZE (x.array))
1463 {
1464 case 3:
1465 x.array [2] |= y.array [2];
1466 case 2:
1467 x.array [1] |= y.array [1];
1468 case 1:
1469 x.array [0] |= y.array [0];
1470 break;
1471 default:
1472 abort ();
1473 }
1474 return x;
1475 }
1476
1477 static INLINE i386_operand_type
1478 operand_type_xor (i386_operand_type x, i386_operand_type y)
1479 {
1480 switch (ARRAY_SIZE (x.array))
1481 {
1482 case 3:
1483 x.array [2] ^= y.array [2];
1484 case 2:
1485 x.array [1] ^= y.array [1];
1486 case 1:
1487 x.array [0] ^= y.array [0];
1488 break;
1489 default:
1490 abort ();
1491 }
1492 return x;
1493 }
1494
1495 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1496 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1497 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1498 static const i386_operand_type inoutportreg
1499 = OPERAND_TYPE_INOUTPORTREG;
1500 static const i386_operand_type reg16_inoutportreg
1501 = OPERAND_TYPE_REG16_INOUTPORTREG;
1502 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1503 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1504 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1505 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1506 static const i386_operand_type anydisp
1507 = OPERAND_TYPE_ANYDISP;
1508 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1509 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1510 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1511 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1512 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1513 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1514 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1515 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1516 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1517 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1518 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1519 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1520
1521 enum operand_type
1522 {
1523 reg,
1524 imm,
1525 disp,
1526 anymem
1527 };
1528
1529 static INLINE int
1530 operand_type_check (i386_operand_type t, enum operand_type c)
1531 {
1532 switch (c)
1533 {
1534 case reg:
1535 return (t.bitfield.reg8
1536 || t.bitfield.reg16
1537 || t.bitfield.reg32
1538 || t.bitfield.reg64);
1539
1540 case imm:
1541 return (t.bitfield.imm8
1542 || t.bitfield.imm8s
1543 || t.bitfield.imm16
1544 || t.bitfield.imm32
1545 || t.bitfield.imm32s
1546 || t.bitfield.imm64);
1547
1548 case disp:
1549 return (t.bitfield.disp8
1550 || t.bitfield.disp16
1551 || t.bitfield.disp32
1552 || t.bitfield.disp32s
1553 || t.bitfield.disp64);
1554
1555 case anymem:
1556 return (t.bitfield.disp8
1557 || t.bitfield.disp16
1558 || t.bitfield.disp32
1559 || t.bitfield.disp32s
1560 || t.bitfield.disp64
1561 || t.bitfield.baseindex);
1562
1563 default:
1564 abort ();
1565 }
1566
1567 return 0;
1568 }
1569
1570 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1571 operand J for instruction template T. */
1572
1573 static INLINE int
1574 match_reg_size (const insn_template *t, unsigned int j)
1575 {
1576 return !((i.types[j].bitfield.byte
1577 && !t->operand_types[j].bitfield.byte)
1578 || (i.types[j].bitfield.word
1579 && !t->operand_types[j].bitfield.word)
1580 || (i.types[j].bitfield.dword
1581 && !t->operand_types[j].bitfield.dword)
1582 || (i.types[j].bitfield.qword
1583 && !t->operand_types[j].bitfield.qword));
1584 }
1585
1586 /* Return 1 if there is no conflict in any size on operand J for
1587 instruction template T. */
1588
1589 static INLINE int
1590 match_mem_size (const insn_template *t, unsigned int j)
1591 {
1592 return (match_reg_size (t, j)
1593 && !((i.types[j].bitfield.unspecified
1594 && !t->operand_types[j].bitfield.unspecified)
1595 || (i.types[j].bitfield.fword
1596 && !t->operand_types[j].bitfield.fword)
1597 || (i.types[j].bitfield.tbyte
1598 && !t->operand_types[j].bitfield.tbyte)
1599 || (i.types[j].bitfield.xmmword
1600 && !t->operand_types[j].bitfield.xmmword)
1601 || (i.types[j].bitfield.ymmword
1602 && !t->operand_types[j].bitfield.ymmword)));
1603 }
1604
1605 /* Return 1 if there is no size conflict on any operands for
1606 instruction template T. */
1607
1608 static INLINE int
1609 operand_size_match (const insn_template *t)
1610 {
1611 unsigned int j;
1612 int match = 1;
1613
1614 /* Don't check jump instructions. */
1615 if (t->opcode_modifier.jump
1616 || t->opcode_modifier.jumpbyte
1617 || t->opcode_modifier.jumpdword
1618 || t->opcode_modifier.jumpintersegment)
1619 return match;
1620
1621 /* Check memory and accumulator operand size. */
1622 for (j = 0; j < i.operands; j++)
1623 {
1624 if (t->operand_types[j].bitfield.anysize)
1625 continue;
1626
1627 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1628 {
1629 match = 0;
1630 break;
1631 }
1632
1633 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1634 {
1635 match = 0;
1636 break;
1637 }
1638 }
1639
1640 if (match)
1641 return match;
1642 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1643 {
1644 mismatch:
1645 i.error = operand_size_mismatch;
1646 return 0;
1647 }
1648
1649 /* Check reverse. */
1650 gas_assert (i.operands == 2);
1651
1652 match = 1;
1653 for (j = 0; j < 2; j++)
1654 {
1655 if (t->operand_types[j].bitfield.acc
1656 && !match_reg_size (t, j ? 0 : 1))
1657 goto mismatch;
1658
1659 if (i.types[j].bitfield.mem
1660 && !match_mem_size (t, j ? 0 : 1))
1661 goto mismatch;
1662 }
1663
1664 return match;
1665 }
1666
1667 static INLINE int
1668 operand_type_match (i386_operand_type overlap,
1669 i386_operand_type given)
1670 {
1671 i386_operand_type temp = overlap;
1672
1673 temp.bitfield.jumpabsolute = 0;
1674 temp.bitfield.unspecified = 0;
1675 temp.bitfield.byte = 0;
1676 temp.bitfield.word = 0;
1677 temp.bitfield.dword = 0;
1678 temp.bitfield.fword = 0;
1679 temp.bitfield.qword = 0;
1680 temp.bitfield.tbyte = 0;
1681 temp.bitfield.xmmword = 0;
1682 temp.bitfield.ymmword = 0;
1683 if (operand_type_all_zero (&temp))
1684 goto mismatch;
1685
1686 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1687 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1688 return 1;
1689
1690 mismatch:
1691 i.error = operand_type_mismatch;
1692 return 0;
1693 }
1694
1695 /* If given types g0 and g1 are registers they must be of the same type
1696 unless the expected operand type register overlap is null.
1697 Note that Acc in a template matches every size of reg. */
1698
1699 static INLINE int
1700 operand_type_register_match (i386_operand_type m0,
1701 i386_operand_type g0,
1702 i386_operand_type t0,
1703 i386_operand_type m1,
1704 i386_operand_type g1,
1705 i386_operand_type t1)
1706 {
1707 if (!operand_type_check (g0, reg))
1708 return 1;
1709
1710 if (!operand_type_check (g1, reg))
1711 return 1;
1712
1713 if (g0.bitfield.reg8 == g1.bitfield.reg8
1714 && g0.bitfield.reg16 == g1.bitfield.reg16
1715 && g0.bitfield.reg32 == g1.bitfield.reg32
1716 && g0.bitfield.reg64 == g1.bitfield.reg64)
1717 return 1;
1718
1719 if (m0.bitfield.acc)
1720 {
1721 t0.bitfield.reg8 = 1;
1722 t0.bitfield.reg16 = 1;
1723 t0.bitfield.reg32 = 1;
1724 t0.bitfield.reg64 = 1;
1725 }
1726
1727 if (m1.bitfield.acc)
1728 {
1729 t1.bitfield.reg8 = 1;
1730 t1.bitfield.reg16 = 1;
1731 t1.bitfield.reg32 = 1;
1732 t1.bitfield.reg64 = 1;
1733 }
1734
1735 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1736 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1737 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1738 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1739 return 1;
1740
1741 i.error = register_type_mismatch;
1742
1743 return 0;
1744 }
1745
1746 static INLINE unsigned int
1747 mode_from_disp_size (i386_operand_type t)
1748 {
1749 if (t.bitfield.disp8)
1750 return 1;
1751 else if (t.bitfield.disp16
1752 || t.bitfield.disp32
1753 || t.bitfield.disp32s)
1754 return 2;
1755 else
1756 return 0;
1757 }
1758
1759 static INLINE int
1760 fits_in_signed_byte (offsetT num)
1761 {
1762 return (num >= -128) && (num <= 127);
1763 }
1764
1765 static INLINE int
1766 fits_in_unsigned_byte (offsetT num)
1767 {
1768 return (num & 0xff) == num;
1769 }
1770
1771 static INLINE int
1772 fits_in_unsigned_word (offsetT num)
1773 {
1774 return (num & 0xffff) == num;
1775 }
1776
1777 static INLINE int
1778 fits_in_signed_word (offsetT num)
1779 {
1780 return (-32768 <= num) && (num <= 32767);
1781 }
1782
1783 static INLINE int
1784 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1785 {
1786 #ifndef BFD64
1787 return 1;
1788 #else
1789 return (!(((offsetT) -1 << 31) & num)
1790 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1791 #endif
1792 } /* fits_in_signed_long() */
1793
1794 static INLINE int
1795 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1796 {
1797 #ifndef BFD64
1798 return 1;
1799 #else
1800 return (num & (((offsetT) 2 << 31) - 1)) == num;
1801 #endif
1802 } /* fits_in_unsigned_long() */
1803
1804 static INLINE int
1805 fits_in_imm4 (offsetT num)
1806 {
1807 return (num & 0xf) == num;
1808 }
1809
1810 static i386_operand_type
1811 smallest_imm_type (offsetT num)
1812 {
1813 i386_operand_type t;
1814
1815 operand_type_set (&t, 0);
1816 t.bitfield.imm64 = 1;
1817
1818 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1819 {
1820 /* This code is disabled on the 486 because all the Imm1 forms
1821 in the opcode table are slower on the i486. They're the
1822 versions with the implicitly specified single-position
1823 displacement, which has another syntax if you really want to
1824 use that form. */
1825 t.bitfield.imm1 = 1;
1826 t.bitfield.imm8 = 1;
1827 t.bitfield.imm8s = 1;
1828 t.bitfield.imm16 = 1;
1829 t.bitfield.imm32 = 1;
1830 t.bitfield.imm32s = 1;
1831 }
1832 else if (fits_in_signed_byte (num))
1833 {
1834 t.bitfield.imm8 = 1;
1835 t.bitfield.imm8s = 1;
1836 t.bitfield.imm16 = 1;
1837 t.bitfield.imm32 = 1;
1838 t.bitfield.imm32s = 1;
1839 }
1840 else if (fits_in_unsigned_byte (num))
1841 {
1842 t.bitfield.imm8 = 1;
1843 t.bitfield.imm16 = 1;
1844 t.bitfield.imm32 = 1;
1845 t.bitfield.imm32s = 1;
1846 }
1847 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1848 {
1849 t.bitfield.imm16 = 1;
1850 t.bitfield.imm32 = 1;
1851 t.bitfield.imm32s = 1;
1852 }
1853 else if (fits_in_signed_long (num))
1854 {
1855 t.bitfield.imm32 = 1;
1856 t.bitfield.imm32s = 1;
1857 }
1858 else if (fits_in_unsigned_long (num))
1859 t.bitfield.imm32 = 1;
1860
1861 return t;
1862 }
1863
1864 static offsetT
1865 offset_in_range (offsetT val, int size)
1866 {
1867 addressT mask;
1868
1869 switch (size)
1870 {
1871 case 1: mask = ((addressT) 1 << 8) - 1; break;
1872 case 2: mask = ((addressT) 1 << 16) - 1; break;
1873 case 4: mask = ((addressT) 2 << 31) - 1; break;
1874 #ifdef BFD64
1875 case 8: mask = ((addressT) 2 << 63) - 1; break;
1876 #endif
1877 default: abort ();
1878 }
1879
1880 #ifdef BFD64
1881 /* If BFD64, sign extend val for 32bit address mode. */
1882 if (flag_code != CODE_64BIT
1883 || i.prefix[ADDR_PREFIX])
1884 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1885 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1886 #endif
1887
1888 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1889 {
1890 char buf1[40], buf2[40];
1891
1892 sprint_value (buf1, val);
1893 sprint_value (buf2, val & mask);
1894 as_warn (_("%s shortened to %s"), buf1, buf2);
1895 }
1896 return val & mask;
1897 }
1898
1899 enum PREFIX_GROUP
1900 {
1901 PREFIX_EXIST = 0,
1902 PREFIX_LOCK,
1903 PREFIX_REP,
1904 PREFIX_OTHER
1905 };
1906
1907 /* Returns
1908 a. PREFIX_EXIST if attempting to add a prefix where one from the
1909 same class already exists.
1910 b. PREFIX_LOCK if lock prefix is added.
1911 c. PREFIX_REP if rep/repne prefix is added.
1912 d. PREFIX_OTHER if other prefix is added.
1913 */
1914
1915 static enum PREFIX_GROUP
1916 add_prefix (unsigned int prefix)
1917 {
1918 enum PREFIX_GROUP ret = PREFIX_OTHER;
1919 unsigned int q;
1920
1921 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1922 && flag_code == CODE_64BIT)
1923 {
1924 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1925 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1926 && (prefix & (REX_R | REX_X | REX_B))))
1927 ret = PREFIX_EXIST;
1928 q = REX_PREFIX;
1929 }
1930 else
1931 {
1932 switch (prefix)
1933 {
1934 default:
1935 abort ();
1936
1937 case CS_PREFIX_OPCODE:
1938 case DS_PREFIX_OPCODE:
1939 case ES_PREFIX_OPCODE:
1940 case FS_PREFIX_OPCODE:
1941 case GS_PREFIX_OPCODE:
1942 case SS_PREFIX_OPCODE:
1943 q = SEG_PREFIX;
1944 break;
1945
1946 case REPNE_PREFIX_OPCODE:
1947 case REPE_PREFIX_OPCODE:
1948 q = REP_PREFIX;
1949 ret = PREFIX_REP;
1950 break;
1951
1952 case LOCK_PREFIX_OPCODE:
1953 q = LOCK_PREFIX;
1954 ret = PREFIX_LOCK;
1955 break;
1956
1957 case FWAIT_OPCODE:
1958 q = WAIT_PREFIX;
1959 break;
1960
1961 case ADDR_PREFIX_OPCODE:
1962 q = ADDR_PREFIX;
1963 break;
1964
1965 case DATA_PREFIX_OPCODE:
1966 q = DATA_PREFIX;
1967 break;
1968 }
1969 if (i.prefix[q] != 0)
1970 ret = PREFIX_EXIST;
1971 }
1972
1973 if (ret)
1974 {
1975 if (!i.prefix[q])
1976 ++i.prefixes;
1977 i.prefix[q] |= prefix;
1978 }
1979 else
1980 as_bad (_("same type of prefix used twice"));
1981
1982 return ret;
1983 }
1984
1985 static void
1986 update_code_flag (int value, int check)
1987 {
1988 PRINTF_LIKE ((*as_error));
1989
1990 flag_code = (enum flag_code) value;
1991 if (flag_code == CODE_64BIT)
1992 {
1993 cpu_arch_flags.bitfield.cpu64 = 1;
1994 cpu_arch_flags.bitfield.cpuno64 = 0;
1995 }
1996 else
1997 {
1998 cpu_arch_flags.bitfield.cpu64 = 0;
1999 cpu_arch_flags.bitfield.cpuno64 = 1;
2000 }
2001 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2002 {
2003 if (check)
2004 as_error = as_fatal;
2005 else
2006 as_error = as_bad;
2007 (*as_error) (_("64bit mode not supported on `%s'."),
2008 cpu_arch_name ? cpu_arch_name : default_arch);
2009 }
2010 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2011 {
2012 if (check)
2013 as_error = as_fatal;
2014 else
2015 as_error = as_bad;
2016 (*as_error) (_("32bit mode not supported on `%s'."),
2017 cpu_arch_name ? cpu_arch_name : default_arch);
2018 }
2019 stackop_size = '\0';
2020 }
2021
2022 static void
2023 set_code_flag (int value)
2024 {
2025 update_code_flag (value, 0);
2026 }
2027
2028 static void
2029 set_16bit_gcc_code_flag (int new_code_flag)
2030 {
2031 flag_code = (enum flag_code) new_code_flag;
2032 if (flag_code != CODE_16BIT)
2033 abort ();
2034 cpu_arch_flags.bitfield.cpu64 = 0;
2035 cpu_arch_flags.bitfield.cpuno64 = 1;
2036 stackop_size = LONG_MNEM_SUFFIX;
2037 }
2038
2039 static void
2040 set_intel_syntax (int syntax_flag)
2041 {
2042 /* Find out if register prefixing is specified. */
2043 int ask_naked_reg = 0;
2044
2045 SKIP_WHITESPACE ();
2046 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2047 {
2048 char *string = input_line_pointer;
2049 int e = get_symbol_end ();
2050
2051 if (strcmp (string, "prefix") == 0)
2052 ask_naked_reg = 1;
2053 else if (strcmp (string, "noprefix") == 0)
2054 ask_naked_reg = -1;
2055 else
2056 as_bad (_("bad argument to syntax directive."));
2057 *input_line_pointer = e;
2058 }
2059 demand_empty_rest_of_line ();
2060
2061 intel_syntax = syntax_flag;
2062
2063 if (ask_naked_reg == 0)
2064 allow_naked_reg = (intel_syntax
2065 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2066 else
2067 allow_naked_reg = (ask_naked_reg < 0);
2068
2069 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2070
2071 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2072 identifier_chars['$'] = intel_syntax ? '$' : 0;
2073 register_prefix = allow_naked_reg ? "" : "%";
2074 }
2075
2076 static void
2077 set_intel_mnemonic (int mnemonic_flag)
2078 {
2079 intel_mnemonic = mnemonic_flag;
2080 }
2081
2082 static void
2083 set_allow_index_reg (int flag)
2084 {
2085 allow_index_reg = flag;
2086 }
2087
2088 static void
2089 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2090 {
2091 SKIP_WHITESPACE ();
2092
2093 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2094 {
2095 char *string = input_line_pointer;
2096 int e = get_symbol_end ();
2097
2098 if (strcmp (string, "none") == 0)
2099 sse_check = sse_check_none;
2100 else if (strcmp (string, "warning") == 0)
2101 sse_check = sse_check_warning;
2102 else if (strcmp (string, "error") == 0)
2103 sse_check = sse_check_error;
2104 else
2105 as_bad (_("bad argument to sse_check directive."));
2106 *input_line_pointer = e;
2107 }
2108 else
2109 as_bad (_("missing argument for sse_check directive"));
2110
2111 demand_empty_rest_of_line ();
2112 }
2113
2114 static void
2115 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2116 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2117 {
2118 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2119 static const char *arch;
2120
2121 /* Intel LIOM is only supported on ELF. */
2122 if (!IS_ELF)
2123 return;
2124
2125 if (!arch)
2126 {
2127 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2128 use default_arch. */
2129 arch = cpu_arch_name;
2130 if (!arch)
2131 arch = default_arch;
2132 }
2133
2134 /* If we are targeting Intel L1OM, we must enable it. */
2135 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2136 || new_flag.bitfield.cpul1om)
2137 return;
2138
2139 /* If we are targeting Intel K1OM, we must enable it. */
2140 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2141 || new_flag.bitfield.cpuk1om)
2142 return;
2143
2144 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2145 #endif
2146 }
2147
2148 static void
2149 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2150 {
2151 SKIP_WHITESPACE ();
2152
2153 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2154 {
2155 char *string = input_line_pointer;
2156 int e = get_symbol_end ();
2157 unsigned int j;
2158 i386_cpu_flags flags;
2159
2160 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2161 {
2162 if (strcmp (string, cpu_arch[j].name) == 0)
2163 {
2164 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2165
2166 if (*string != '.')
2167 {
2168 cpu_arch_name = cpu_arch[j].name;
2169 cpu_sub_arch_name = NULL;
2170 cpu_arch_flags = cpu_arch[j].flags;
2171 if (flag_code == CODE_64BIT)
2172 {
2173 cpu_arch_flags.bitfield.cpu64 = 1;
2174 cpu_arch_flags.bitfield.cpuno64 = 0;
2175 }
2176 else
2177 {
2178 cpu_arch_flags.bitfield.cpu64 = 0;
2179 cpu_arch_flags.bitfield.cpuno64 = 1;
2180 }
2181 cpu_arch_isa = cpu_arch[j].type;
2182 cpu_arch_isa_flags = cpu_arch[j].flags;
2183 if (!cpu_arch_tune_set)
2184 {
2185 cpu_arch_tune = cpu_arch_isa;
2186 cpu_arch_tune_flags = cpu_arch_isa_flags;
2187 }
2188 break;
2189 }
2190
2191 if (!cpu_arch[j].negated)
2192 flags = cpu_flags_or (cpu_arch_flags,
2193 cpu_arch[j].flags);
2194 else
2195 flags = cpu_flags_and_not (cpu_arch_flags,
2196 cpu_arch[j].flags);
2197 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2198 {
2199 if (cpu_sub_arch_name)
2200 {
2201 char *name = cpu_sub_arch_name;
2202 cpu_sub_arch_name = concat (name,
2203 cpu_arch[j].name,
2204 (const char *) NULL);
2205 free (name);
2206 }
2207 else
2208 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2209 cpu_arch_flags = flags;
2210 cpu_arch_isa_flags = flags;
2211 }
2212 *input_line_pointer = e;
2213 demand_empty_rest_of_line ();
2214 return;
2215 }
2216 }
2217 if (j >= ARRAY_SIZE (cpu_arch))
2218 as_bad (_("no such architecture: `%s'"), string);
2219
2220 *input_line_pointer = e;
2221 }
2222 else
2223 as_bad (_("missing cpu architecture"));
2224
2225 no_cond_jump_promotion = 0;
2226 if (*input_line_pointer == ','
2227 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2228 {
2229 char *string = ++input_line_pointer;
2230 int e = get_symbol_end ();
2231
2232 if (strcmp (string, "nojumps") == 0)
2233 no_cond_jump_promotion = 1;
2234 else if (strcmp (string, "jumps") == 0)
2235 ;
2236 else
2237 as_bad (_("no such architecture modifier: `%s'"), string);
2238
2239 *input_line_pointer = e;
2240 }
2241
2242 demand_empty_rest_of_line ();
2243 }
2244
2245 enum bfd_architecture
2246 i386_arch (void)
2247 {
2248 if (cpu_arch_isa == PROCESSOR_L1OM)
2249 {
2250 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2251 || flag_code != CODE_64BIT)
2252 as_fatal (_("Intel L1OM is 64bit ELF only"));
2253 return bfd_arch_l1om;
2254 }
2255 else if (cpu_arch_isa == PROCESSOR_K1OM)
2256 {
2257 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2258 || flag_code != CODE_64BIT)
2259 as_fatal (_("Intel K1OM is 64bit ELF only"));
2260 return bfd_arch_k1om;
2261 }
2262 else
2263 return bfd_arch_i386;
2264 }
2265
2266 unsigned long
2267 i386_mach (void)
2268 {
2269 if (!strncmp (default_arch, "x86_64", 6))
2270 {
2271 if (cpu_arch_isa == PROCESSOR_L1OM)
2272 {
2273 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2274 || default_arch[6] != '\0')
2275 as_fatal (_("Intel L1OM is 64bit ELF only"));
2276 return bfd_mach_l1om;
2277 }
2278 else if (cpu_arch_isa == PROCESSOR_K1OM)
2279 {
2280 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2281 || default_arch[6] != '\0')
2282 as_fatal (_("Intel K1OM is 64bit ELF only"));
2283 return bfd_mach_k1om;
2284 }
2285 else if (default_arch[6] == '\0')
2286 return bfd_mach_x86_64;
2287 else
2288 return bfd_mach_x64_32;
2289 }
2290 else if (!strcmp (default_arch, "i386"))
2291 return bfd_mach_i386_i386;
2292 else
2293 as_fatal (_("unknown architecture"));
2294 }
2295 \f
2296 void
2297 md_begin (void)
2298 {
2299 const char *hash_err;
2300
2301 /* Initialize op_hash hash table. */
2302 op_hash = hash_new ();
2303
2304 {
2305 const insn_template *optab;
2306 templates *core_optab;
2307
2308 /* Setup for loop. */
2309 optab = i386_optab;
2310 core_optab = (templates *) xmalloc (sizeof (templates));
2311 core_optab->start = optab;
2312
2313 while (1)
2314 {
2315 ++optab;
2316 if (optab->name == NULL
2317 || strcmp (optab->name, (optab - 1)->name) != 0)
2318 {
2319 /* different name --> ship out current template list;
2320 add to hash table; & begin anew. */
2321 core_optab->end = optab;
2322 hash_err = hash_insert (op_hash,
2323 (optab - 1)->name,
2324 (void *) core_optab);
2325 if (hash_err)
2326 {
2327 as_fatal (_("internal Error: Can't hash %s: %s"),
2328 (optab - 1)->name,
2329 hash_err);
2330 }
2331 if (optab->name == NULL)
2332 break;
2333 core_optab = (templates *) xmalloc (sizeof (templates));
2334 core_optab->start = optab;
2335 }
2336 }
2337 }
2338
2339 /* Initialize reg_hash hash table. */
2340 reg_hash = hash_new ();
2341 {
2342 const reg_entry *regtab;
2343 unsigned int regtab_size = i386_regtab_size;
2344
2345 for (regtab = i386_regtab; regtab_size--; regtab++)
2346 {
2347 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2348 if (hash_err)
2349 as_fatal (_("internal Error: Can't hash %s: %s"),
2350 regtab->reg_name,
2351 hash_err);
2352 }
2353 }
2354
2355 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2356 {
2357 int c;
2358 char *p;
2359
2360 for (c = 0; c < 256; c++)
2361 {
2362 if (ISDIGIT (c))
2363 {
2364 digit_chars[c] = c;
2365 mnemonic_chars[c] = c;
2366 register_chars[c] = c;
2367 operand_chars[c] = c;
2368 }
2369 else if (ISLOWER (c))
2370 {
2371 mnemonic_chars[c] = c;
2372 register_chars[c] = c;
2373 operand_chars[c] = c;
2374 }
2375 else if (ISUPPER (c))
2376 {
2377 mnemonic_chars[c] = TOLOWER (c);
2378 register_chars[c] = mnemonic_chars[c];
2379 operand_chars[c] = c;
2380 }
2381
2382 if (ISALPHA (c) || ISDIGIT (c))
2383 identifier_chars[c] = c;
2384 else if (c >= 128)
2385 {
2386 identifier_chars[c] = c;
2387 operand_chars[c] = c;
2388 }
2389 }
2390
2391 #ifdef LEX_AT
2392 identifier_chars['@'] = '@';
2393 #endif
2394 #ifdef LEX_QM
2395 identifier_chars['?'] = '?';
2396 operand_chars['?'] = '?';
2397 #endif
2398 digit_chars['-'] = '-';
2399 mnemonic_chars['_'] = '_';
2400 mnemonic_chars['-'] = '-';
2401 mnemonic_chars['.'] = '.';
2402 identifier_chars['_'] = '_';
2403 identifier_chars['.'] = '.';
2404
2405 for (p = operand_special_chars; *p != '\0'; p++)
2406 operand_chars[(unsigned char) *p] = *p;
2407 }
2408
2409 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2410 if (IS_ELF)
2411 {
2412 record_alignment (text_section, 2);
2413 record_alignment (data_section, 2);
2414 record_alignment (bss_section, 2);
2415 }
2416 #endif
2417
2418 if (flag_code == CODE_64BIT)
2419 {
2420 #if defined (OBJ_COFF) && defined (TE_PE)
2421 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2422 ? 32 : 16);
2423 #else
2424 x86_dwarf2_return_column = 16;
2425 #endif
2426 x86_cie_data_alignment = -8;
2427 }
2428 else
2429 {
2430 x86_dwarf2_return_column = 8;
2431 x86_cie_data_alignment = -4;
2432 }
2433 }
2434
2435 void
2436 i386_print_statistics (FILE *file)
2437 {
2438 hash_print_statistics (file, "i386 opcode", op_hash);
2439 hash_print_statistics (file, "i386 register", reg_hash);
2440 }
2441 \f
2442 #ifdef DEBUG386
2443
2444 /* Debugging routines for md_assemble. */
2445 static void pte (insn_template *);
2446 static void pt (i386_operand_type);
2447 static void pe (expressionS *);
2448 static void ps (symbolS *);
2449
2450 static void
2451 pi (char *line, i386_insn *x)
2452 {
2453 unsigned int j;
2454
2455 fprintf (stdout, "%s: template ", line);
2456 pte (&x->tm);
2457 fprintf (stdout, " address: base %s index %s scale %x\n",
2458 x->base_reg ? x->base_reg->reg_name : "none",
2459 x->index_reg ? x->index_reg->reg_name : "none",
2460 x->log2_scale_factor);
2461 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2462 x->rm.mode, x->rm.reg, x->rm.regmem);
2463 fprintf (stdout, " sib: base %x index %x scale %x\n",
2464 x->sib.base, x->sib.index, x->sib.scale);
2465 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2466 (x->rex & REX_W) != 0,
2467 (x->rex & REX_R) != 0,
2468 (x->rex & REX_X) != 0,
2469 (x->rex & REX_B) != 0);
2470 for (j = 0; j < x->operands; j++)
2471 {
2472 fprintf (stdout, " #%d: ", j + 1);
2473 pt (x->types[j]);
2474 fprintf (stdout, "\n");
2475 if (x->types[j].bitfield.reg8
2476 || x->types[j].bitfield.reg16
2477 || x->types[j].bitfield.reg32
2478 || x->types[j].bitfield.reg64
2479 || x->types[j].bitfield.regmmx
2480 || x->types[j].bitfield.regxmm
2481 || x->types[j].bitfield.regymm
2482 || x->types[j].bitfield.sreg2
2483 || x->types[j].bitfield.sreg3
2484 || x->types[j].bitfield.control
2485 || x->types[j].bitfield.debug
2486 || x->types[j].bitfield.test)
2487 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2488 if (operand_type_check (x->types[j], imm))
2489 pe (x->op[j].imms);
2490 if (operand_type_check (x->types[j], disp))
2491 pe (x->op[j].disps);
2492 }
2493 }
2494
2495 static void
2496 pte (insn_template *t)
2497 {
2498 unsigned int j;
2499 fprintf (stdout, " %d operands ", t->operands);
2500 fprintf (stdout, "opcode %x ", t->base_opcode);
2501 if (t->extension_opcode != None)
2502 fprintf (stdout, "ext %x ", t->extension_opcode);
2503 if (t->opcode_modifier.d)
2504 fprintf (stdout, "D");
2505 if (t->opcode_modifier.w)
2506 fprintf (stdout, "W");
2507 fprintf (stdout, "\n");
2508 for (j = 0; j < t->operands; j++)
2509 {
2510 fprintf (stdout, " #%d type ", j + 1);
2511 pt (t->operand_types[j]);
2512 fprintf (stdout, "\n");
2513 }
2514 }
2515
2516 static void
2517 pe (expressionS *e)
2518 {
2519 fprintf (stdout, " operation %d\n", e->X_op);
2520 fprintf (stdout, " add_number %ld (%lx)\n",
2521 (long) e->X_add_number, (long) e->X_add_number);
2522 if (e->X_add_symbol)
2523 {
2524 fprintf (stdout, " add_symbol ");
2525 ps (e->X_add_symbol);
2526 fprintf (stdout, "\n");
2527 }
2528 if (e->X_op_symbol)
2529 {
2530 fprintf (stdout, " op_symbol ");
2531 ps (e->X_op_symbol);
2532 fprintf (stdout, "\n");
2533 }
2534 }
2535
2536 static void
2537 ps (symbolS *s)
2538 {
2539 fprintf (stdout, "%s type %s%s",
2540 S_GET_NAME (s),
2541 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2542 segment_name (S_GET_SEGMENT (s)));
2543 }
2544
2545 static struct type_name
2546 {
2547 i386_operand_type mask;
2548 const char *name;
2549 }
2550 const type_names[] =
2551 {
2552 { OPERAND_TYPE_REG8, "r8" },
2553 { OPERAND_TYPE_REG16, "r16" },
2554 { OPERAND_TYPE_REG32, "r32" },
2555 { OPERAND_TYPE_REG64, "r64" },
2556 { OPERAND_TYPE_IMM8, "i8" },
2557 { OPERAND_TYPE_IMM8, "i8s" },
2558 { OPERAND_TYPE_IMM16, "i16" },
2559 { OPERAND_TYPE_IMM32, "i32" },
2560 { OPERAND_TYPE_IMM32S, "i32s" },
2561 { OPERAND_TYPE_IMM64, "i64" },
2562 { OPERAND_TYPE_IMM1, "i1" },
2563 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2564 { OPERAND_TYPE_DISP8, "d8" },
2565 { OPERAND_TYPE_DISP16, "d16" },
2566 { OPERAND_TYPE_DISP32, "d32" },
2567 { OPERAND_TYPE_DISP32S, "d32s" },
2568 { OPERAND_TYPE_DISP64, "d64" },
2569 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2570 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2571 { OPERAND_TYPE_CONTROL, "control reg" },
2572 { OPERAND_TYPE_TEST, "test reg" },
2573 { OPERAND_TYPE_DEBUG, "debug reg" },
2574 { OPERAND_TYPE_FLOATREG, "FReg" },
2575 { OPERAND_TYPE_FLOATACC, "FAcc" },
2576 { OPERAND_TYPE_SREG2, "SReg2" },
2577 { OPERAND_TYPE_SREG3, "SReg3" },
2578 { OPERAND_TYPE_ACC, "Acc" },
2579 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2580 { OPERAND_TYPE_REGMMX, "rMMX" },
2581 { OPERAND_TYPE_REGXMM, "rXMM" },
2582 { OPERAND_TYPE_REGYMM, "rYMM" },
2583 { OPERAND_TYPE_ESSEG, "es" },
2584 };
2585
2586 static void
2587 pt (i386_operand_type t)
2588 {
2589 unsigned int j;
2590 i386_operand_type a;
2591
2592 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2593 {
2594 a = operand_type_and (t, type_names[j].mask);
2595 if (!operand_type_all_zero (&a))
2596 fprintf (stdout, "%s, ", type_names[j].name);
2597 }
2598 fflush (stdout);
2599 }
2600
2601 #endif /* DEBUG386 */
2602 \f
2603 static bfd_reloc_code_real_type
2604 reloc (unsigned int size,
2605 int pcrel,
2606 int sign,
2607 bfd_reloc_code_real_type other)
2608 {
2609 if (other != NO_RELOC)
2610 {
2611 reloc_howto_type *rel;
2612
2613 if (size == 8)
2614 switch (other)
2615 {
2616 case BFD_RELOC_X86_64_GOT32:
2617 return BFD_RELOC_X86_64_GOT64;
2618 break;
2619 case BFD_RELOC_X86_64_PLTOFF64:
2620 return BFD_RELOC_X86_64_PLTOFF64;
2621 break;
2622 case BFD_RELOC_X86_64_GOTPC32:
2623 other = BFD_RELOC_X86_64_GOTPC64;
2624 break;
2625 case BFD_RELOC_X86_64_GOTPCREL:
2626 other = BFD_RELOC_X86_64_GOTPCREL64;
2627 break;
2628 case BFD_RELOC_X86_64_TPOFF32:
2629 other = BFD_RELOC_X86_64_TPOFF64;
2630 break;
2631 case BFD_RELOC_X86_64_DTPOFF32:
2632 other = BFD_RELOC_X86_64_DTPOFF64;
2633 break;
2634 default:
2635 break;
2636 }
2637
2638 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2639 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2640 sign = -1;
2641
2642 rel = bfd_reloc_type_lookup (stdoutput, other);
2643 if (!rel)
2644 as_bad (_("unknown relocation (%u)"), other);
2645 else if (size != bfd_get_reloc_size (rel))
2646 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2647 bfd_get_reloc_size (rel),
2648 size);
2649 else if (pcrel && !rel->pc_relative)
2650 as_bad (_("non-pc-relative relocation for pc-relative field"));
2651 else if ((rel->complain_on_overflow == complain_overflow_signed
2652 && !sign)
2653 || (rel->complain_on_overflow == complain_overflow_unsigned
2654 && sign > 0))
2655 as_bad (_("relocated field and relocation type differ in signedness"));
2656 else
2657 return other;
2658 return NO_RELOC;
2659 }
2660
2661 if (pcrel)
2662 {
2663 if (!sign)
2664 as_bad (_("there are no unsigned pc-relative relocations"));
2665 switch (size)
2666 {
2667 case 1: return BFD_RELOC_8_PCREL;
2668 case 2: return BFD_RELOC_16_PCREL;
2669 case 4: return BFD_RELOC_32_PCREL;
2670 case 8: return BFD_RELOC_64_PCREL;
2671 }
2672 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2673 }
2674 else
2675 {
2676 if (sign > 0)
2677 switch (size)
2678 {
2679 case 4: return BFD_RELOC_X86_64_32S;
2680 }
2681 else
2682 switch (size)
2683 {
2684 case 1: return BFD_RELOC_8;
2685 case 2: return BFD_RELOC_16;
2686 case 4: return BFD_RELOC_32;
2687 case 8: return BFD_RELOC_64;
2688 }
2689 as_bad (_("cannot do %s %u byte relocation"),
2690 sign > 0 ? "signed" : "unsigned", size);
2691 }
2692
2693 return NO_RELOC;
2694 }
2695
2696 /* Here we decide which fixups can be adjusted to make them relative to
2697 the beginning of the section instead of the symbol. Basically we need
2698 to make sure that the dynamic relocations are done correctly, so in
2699 some cases we force the original symbol to be used. */
2700
2701 int
2702 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2703 {
2704 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2705 if (!IS_ELF)
2706 return 1;
2707
2708 /* Don't adjust pc-relative references to merge sections in 64-bit
2709 mode. */
2710 if (use_rela_relocations
2711 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2712 && fixP->fx_pcrel)
2713 return 0;
2714
2715 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2716 and changed later by validate_fix. */
2717 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2718 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2719 return 0;
2720
2721 /* adjust_reloc_syms doesn't know about the GOT. */
2722 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2723 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2724 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2725 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2726 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2727 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2728 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2729 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2730 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2731 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2732 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2733 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2734 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2735 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2736 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2737 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2738 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2739 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2740 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2741 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2742 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2743 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2744 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2745 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2746 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2747 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2748 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2749 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2750 return 0;
2751 #endif
2752 return 1;
2753 }
2754
2755 static int
2756 intel_float_operand (const char *mnemonic)
2757 {
2758 /* Note that the value returned is meaningful only for opcodes with (memory)
2759 operands, hence the code here is free to improperly handle opcodes that
2760 have no operands (for better performance and smaller code). */
2761
2762 if (mnemonic[0] != 'f')
2763 return 0; /* non-math */
2764
2765 switch (mnemonic[1])
2766 {
2767 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2768 the fs segment override prefix not currently handled because no
2769 call path can make opcodes without operands get here */
2770 case 'i':
2771 return 2 /* integer op */;
2772 case 'l':
2773 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2774 return 3; /* fldcw/fldenv */
2775 break;
2776 case 'n':
2777 if (mnemonic[2] != 'o' /* fnop */)
2778 return 3; /* non-waiting control op */
2779 break;
2780 case 'r':
2781 if (mnemonic[2] == 's')
2782 return 3; /* frstor/frstpm */
2783 break;
2784 case 's':
2785 if (mnemonic[2] == 'a')
2786 return 3; /* fsave */
2787 if (mnemonic[2] == 't')
2788 {
2789 switch (mnemonic[3])
2790 {
2791 case 'c': /* fstcw */
2792 case 'd': /* fstdw */
2793 case 'e': /* fstenv */
2794 case 's': /* fsts[gw] */
2795 return 3;
2796 }
2797 }
2798 break;
2799 case 'x':
2800 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2801 return 0; /* fxsave/fxrstor are not really math ops */
2802 break;
2803 }
2804
2805 return 1;
2806 }
2807
2808 /* Build the VEX prefix. */
2809
2810 static void
2811 build_vex_prefix (const insn_template *t)
2812 {
2813 unsigned int register_specifier;
2814 unsigned int implied_prefix;
2815 unsigned int vector_length;
2816
2817 /* Check register specifier. */
2818 if (i.vex.register_specifier)
2819 {
2820 register_specifier = i.vex.register_specifier->reg_num;
2821 if ((i.vex.register_specifier->reg_flags & RegRex))
2822 register_specifier += 8;
2823 register_specifier = ~register_specifier & 0xf;
2824 }
2825 else
2826 register_specifier = 0xf;
2827
2828 /* Use 2-byte VEX prefix by swappping destination and source
2829 operand. */
2830 if (!i.swap_operand
2831 && i.operands == i.reg_operands
2832 && i.tm.opcode_modifier.vexopcode == VEX0F
2833 && i.tm.opcode_modifier.s
2834 && i.rex == REX_B)
2835 {
2836 unsigned int xchg = i.operands - 1;
2837 union i386_op temp_op;
2838 i386_operand_type temp_type;
2839
2840 temp_type = i.types[xchg];
2841 i.types[xchg] = i.types[0];
2842 i.types[0] = temp_type;
2843 temp_op = i.op[xchg];
2844 i.op[xchg] = i.op[0];
2845 i.op[0] = temp_op;
2846
2847 gas_assert (i.rm.mode == 3);
2848
2849 i.rex = REX_R;
2850 xchg = i.rm.regmem;
2851 i.rm.regmem = i.rm.reg;
2852 i.rm.reg = xchg;
2853
2854 /* Use the next insn. */
2855 i.tm = t[1];
2856 }
2857
2858 if (i.tm.opcode_modifier.vex == VEXScalar)
2859 vector_length = avxscalar;
2860 else
2861 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2862
2863 switch ((i.tm.base_opcode >> 8) & 0xff)
2864 {
2865 case 0:
2866 implied_prefix = 0;
2867 break;
2868 case DATA_PREFIX_OPCODE:
2869 implied_prefix = 1;
2870 break;
2871 case REPE_PREFIX_OPCODE:
2872 implied_prefix = 2;
2873 break;
2874 case REPNE_PREFIX_OPCODE:
2875 implied_prefix = 3;
2876 break;
2877 default:
2878 abort ();
2879 }
2880
2881 /* Use 2-byte VEX prefix if possible. */
2882 if (i.tm.opcode_modifier.vexopcode == VEX0F
2883 && i.tm.opcode_modifier.vexw != VEXW1
2884 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2885 {
2886 /* 2-byte VEX prefix. */
2887 unsigned int r;
2888
2889 i.vex.length = 2;
2890 i.vex.bytes[0] = 0xc5;
2891
2892 /* Check the REX.R bit. */
2893 r = (i.rex & REX_R) ? 0 : 1;
2894 i.vex.bytes[1] = (r << 7
2895 | register_specifier << 3
2896 | vector_length << 2
2897 | implied_prefix);
2898 }
2899 else
2900 {
2901 /* 3-byte VEX prefix. */
2902 unsigned int m, w;
2903
2904 i.vex.length = 3;
2905
2906 switch (i.tm.opcode_modifier.vexopcode)
2907 {
2908 case VEX0F:
2909 m = 0x1;
2910 i.vex.bytes[0] = 0xc4;
2911 break;
2912 case VEX0F38:
2913 m = 0x2;
2914 i.vex.bytes[0] = 0xc4;
2915 break;
2916 case VEX0F3A:
2917 m = 0x3;
2918 i.vex.bytes[0] = 0xc4;
2919 break;
2920 case XOP08:
2921 m = 0x8;
2922 i.vex.bytes[0] = 0x8f;
2923 break;
2924 case XOP09:
2925 m = 0x9;
2926 i.vex.bytes[0] = 0x8f;
2927 break;
2928 case XOP0A:
2929 m = 0xa;
2930 i.vex.bytes[0] = 0x8f;
2931 break;
2932 default:
2933 abort ();
2934 }
2935
2936 /* The high 3 bits of the second VEX byte are 1's compliment
2937 of RXB bits from REX. */
2938 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2939
2940 /* Check the REX.W bit. */
2941 w = (i.rex & REX_W) ? 1 : 0;
2942 if (i.tm.opcode_modifier.vexw)
2943 {
2944 if (w)
2945 abort ();
2946
2947 if (i.tm.opcode_modifier.vexw == VEXW1)
2948 w = 1;
2949 }
2950
2951 i.vex.bytes[2] = (w << 7
2952 | register_specifier << 3
2953 | vector_length << 2
2954 | implied_prefix);
2955 }
2956 }
2957
2958 static void
2959 process_immext (void)
2960 {
2961 expressionS *exp;
2962
2963 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2964 {
2965 /* SSE3 Instructions have the fixed operands with an opcode
2966 suffix which is coded in the same place as an 8-bit immediate
2967 field would be. Here we check those operands and remove them
2968 afterwards. */
2969 unsigned int x;
2970
2971 for (x = 0; x < i.operands; x++)
2972 if (i.op[x].regs->reg_num != x)
2973 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2974 register_prefix, i.op[x].regs->reg_name, x + 1,
2975 i.tm.name);
2976
2977 i.operands = 0;
2978 }
2979
2980 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2981 which is coded in the same place as an 8-bit immediate field
2982 would be. Here we fake an 8-bit immediate operand from the
2983 opcode suffix stored in tm.extension_opcode.
2984
2985 AVX instructions also use this encoding, for some of
2986 3 argument instructions. */
2987
2988 gas_assert (i.imm_operands == 0
2989 && (i.operands <= 2
2990 || (i.tm.opcode_modifier.vex
2991 && i.operands <= 4)));
2992
2993 exp = &im_expressions[i.imm_operands++];
2994 i.op[i.operands].imms = exp;
2995 i.types[i.operands] = imm8;
2996 i.operands++;
2997 exp->X_op = O_constant;
2998 exp->X_add_number = i.tm.extension_opcode;
2999 i.tm.extension_opcode = None;
3000 }
3001
3002 /* This is the guts of the machine-dependent assembler. LINE points to a
3003 machine dependent instruction. This function is supposed to emit
3004 the frags/bytes it assembles to. */
3005
3006 void
3007 md_assemble (char *line)
3008 {
3009 unsigned int j;
3010 char mnemonic[MAX_MNEM_SIZE];
3011 const insn_template *t;
3012
3013 /* Initialize globals. */
3014 memset (&i, '\0', sizeof (i));
3015 for (j = 0; j < MAX_OPERANDS; j++)
3016 i.reloc[j] = NO_RELOC;
3017 memset (disp_expressions, '\0', sizeof (disp_expressions));
3018 memset (im_expressions, '\0', sizeof (im_expressions));
3019 save_stack_p = save_stack;
3020
3021 /* First parse an instruction mnemonic & call i386_operand for the operands.
3022 We assume that the scrubber has arranged it so that line[0] is the valid
3023 start of a (possibly prefixed) mnemonic. */
3024
3025 line = parse_insn (line, mnemonic);
3026 if (line == NULL)
3027 return;
3028
3029 line = parse_operands (line, mnemonic);
3030 this_operand = -1;
3031 if (line == NULL)
3032 return;
3033
3034 /* Now we've parsed the mnemonic into a set of templates, and have the
3035 operands at hand. */
3036
3037 /* All intel opcodes have reversed operands except for "bound" and
3038 "enter". We also don't reverse intersegment "jmp" and "call"
3039 instructions with 2 immediate operands so that the immediate segment
3040 precedes the offset, as it does when in AT&T mode. */
3041 if (intel_syntax
3042 && i.operands > 1
3043 && (strcmp (mnemonic, "bound") != 0)
3044 && (strcmp (mnemonic, "invlpga") != 0)
3045 && !(operand_type_check (i.types[0], imm)
3046 && operand_type_check (i.types[1], imm)))
3047 swap_operands ();
3048
3049 /* The order of the immediates should be reversed
3050 for 2 immediates extrq and insertq instructions */
3051 if (i.imm_operands == 2
3052 && (strcmp (mnemonic, "extrq") == 0
3053 || strcmp (mnemonic, "insertq") == 0))
3054 swap_2_operands (0, 1);
3055
3056 if (i.imm_operands)
3057 optimize_imm ();
3058
3059 /* Don't optimize displacement for movabs since it only takes 64bit
3060 displacement. */
3061 if (i.disp_operands
3062 && i.disp_encoding != disp_encoding_32bit
3063 && (flag_code != CODE_64BIT
3064 || strcmp (mnemonic, "movabs") != 0))
3065 optimize_disp ();
3066
3067 /* Next, we find a template that matches the given insn,
3068 making sure the overlap of the given operands types is consistent
3069 with the template operand types. */
3070
3071 if (!(t = match_template ()))
3072 return;
3073
3074 if (sse_check != sse_check_none
3075 && !i.tm.opcode_modifier.noavx
3076 && (i.tm.cpu_flags.bitfield.cpusse
3077 || i.tm.cpu_flags.bitfield.cpusse2
3078 || i.tm.cpu_flags.bitfield.cpusse3
3079 || i.tm.cpu_flags.bitfield.cpussse3
3080 || i.tm.cpu_flags.bitfield.cpusse4_1
3081 || i.tm.cpu_flags.bitfield.cpusse4_2))
3082 {
3083 (sse_check == sse_check_warning
3084 ? as_warn
3085 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3086 }
3087
3088 /* Zap movzx and movsx suffix. The suffix has been set from
3089 "word ptr" or "byte ptr" on the source operand in Intel syntax
3090 or extracted from mnemonic in AT&T syntax. But we'll use
3091 the destination register to choose the suffix for encoding. */
3092 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3093 {
3094 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3095 there is no suffix, the default will be byte extension. */
3096 if (i.reg_operands != 2
3097 && !i.suffix
3098 && intel_syntax)
3099 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3100
3101 i.suffix = 0;
3102 }
3103
3104 if (i.tm.opcode_modifier.fwait)
3105 if (!add_prefix (FWAIT_OPCODE))
3106 return;
3107
3108 /* Check for lock without a lockable instruction. Destination operand
3109 must be memory unless it is xchg (0x86). */
3110 if (i.prefix[LOCK_PREFIX]
3111 && (!i.tm.opcode_modifier.islockable
3112 || i.mem_operands == 0
3113 || (i.tm.base_opcode != 0x86
3114 && !operand_type_check (i.types[i.operands - 1], anymem))))
3115 {
3116 as_bad (_("expecting lockable instruction after `lock'"));
3117 return;
3118 }
3119
3120 /* Check string instruction segment overrides. */
3121 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3122 {
3123 if (!check_string ())
3124 return;
3125 i.disp_operands = 0;
3126 }
3127
3128 if (!process_suffix ())
3129 return;
3130
3131 /* Update operand types. */
3132 for (j = 0; j < i.operands; j++)
3133 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3134
3135 /* Make still unresolved immediate matches conform to size of immediate
3136 given in i.suffix. */
3137 if (!finalize_imm ())
3138 return;
3139
3140 if (i.types[0].bitfield.imm1)
3141 i.imm_operands = 0; /* kludge for shift insns. */
3142
3143 /* We only need to check those implicit registers for instructions
3144 with 3 operands or less. */
3145 if (i.operands <= 3)
3146 for (j = 0; j < i.operands; j++)
3147 if (i.types[j].bitfield.inoutportreg
3148 || i.types[j].bitfield.shiftcount
3149 || i.types[j].bitfield.acc
3150 || i.types[j].bitfield.floatacc)
3151 i.reg_operands--;
3152
3153 /* ImmExt should be processed after SSE2AVX. */
3154 if (!i.tm.opcode_modifier.sse2avx
3155 && i.tm.opcode_modifier.immext)
3156 process_immext ();
3157
3158 /* For insns with operands there are more diddles to do to the opcode. */
3159 if (i.operands)
3160 {
3161 if (!process_operands ())
3162 return;
3163 }
3164 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3165 {
3166 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3167 as_warn (_("translating to `%sp'"), i.tm.name);
3168 }
3169
3170 if (i.tm.opcode_modifier.vex)
3171 build_vex_prefix (t);
3172
3173 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3174 instructions may define INT_OPCODE as well, so avoid this corner
3175 case for those instructions that use MODRM. */
3176 if (i.tm.base_opcode == INT_OPCODE
3177 && !i.tm.opcode_modifier.modrm
3178 && i.op[0].imms->X_add_number == 3)
3179 {
3180 i.tm.base_opcode = INT3_OPCODE;
3181 i.imm_operands = 0;
3182 }
3183
3184 if ((i.tm.opcode_modifier.jump
3185 || i.tm.opcode_modifier.jumpbyte
3186 || i.tm.opcode_modifier.jumpdword)
3187 && i.op[0].disps->X_op == O_constant)
3188 {
3189 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3190 the absolute address given by the constant. Since ix86 jumps and
3191 calls are pc relative, we need to generate a reloc. */
3192 i.op[0].disps->X_add_symbol = &abs_symbol;
3193 i.op[0].disps->X_op = O_symbol;
3194 }
3195
3196 if (i.tm.opcode_modifier.rex64)
3197 i.rex |= REX_W;
3198
3199 /* For 8 bit registers we need an empty rex prefix. Also if the
3200 instruction already has a prefix, we need to convert old
3201 registers to new ones. */
3202
3203 if ((i.types[0].bitfield.reg8
3204 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3205 || (i.types[1].bitfield.reg8
3206 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3207 || ((i.types[0].bitfield.reg8
3208 || i.types[1].bitfield.reg8)
3209 && i.rex != 0))
3210 {
3211 int x;
3212
3213 i.rex |= REX_OPCODE;
3214 for (x = 0; x < 2; x++)
3215 {
3216 /* Look for 8 bit operand that uses old registers. */
3217 if (i.types[x].bitfield.reg8
3218 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3219 {
3220 /* In case it is "hi" register, give up. */
3221 if (i.op[x].regs->reg_num > 3)
3222 as_bad (_("can't encode register '%s%s' in an "
3223 "instruction requiring REX prefix."),
3224 register_prefix, i.op[x].regs->reg_name);
3225
3226 /* Otherwise it is equivalent to the extended register.
3227 Since the encoding doesn't change this is merely
3228 cosmetic cleanup for debug output. */
3229
3230 i.op[x].regs = i.op[x].regs + 8;
3231 }
3232 }
3233 }
3234
3235 if (i.rex != 0)
3236 add_prefix (REX_OPCODE | i.rex);
3237
3238 /* We are ready to output the insn. */
3239 output_insn ();
3240 }
3241
3242 static char *
3243 parse_insn (char *line, char *mnemonic)
3244 {
3245 char *l = line;
3246 char *token_start = l;
3247 char *mnem_p;
3248 int supported;
3249 const insn_template *t;
3250 char *dot_p = NULL;
3251
3252 /* Non-zero if we found a prefix only acceptable with string insns. */
3253 const char *expecting_string_instruction = NULL;
3254
3255 while (1)
3256 {
3257 mnem_p = mnemonic;
3258 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3259 {
3260 if (*mnem_p == '.')
3261 dot_p = mnem_p;
3262 mnem_p++;
3263 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3264 {
3265 as_bad (_("no such instruction: `%s'"), token_start);
3266 return NULL;
3267 }
3268 l++;
3269 }
3270 if (!is_space_char (*l)
3271 && *l != END_OF_INSN
3272 && (intel_syntax
3273 || (*l != PREFIX_SEPARATOR
3274 && *l != ',')))
3275 {
3276 as_bad (_("invalid character %s in mnemonic"),
3277 output_invalid (*l));
3278 return NULL;
3279 }
3280 if (token_start == l)
3281 {
3282 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3283 as_bad (_("expecting prefix; got nothing"));
3284 else
3285 as_bad (_("expecting mnemonic; got nothing"));
3286 return NULL;
3287 }
3288
3289 /* Look up instruction (or prefix) via hash table. */
3290 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3291
3292 if (*l != END_OF_INSN
3293 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3294 && current_templates
3295 && current_templates->start->opcode_modifier.isprefix)
3296 {
3297 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3298 {
3299 as_bad ((flag_code != CODE_64BIT
3300 ? _("`%s' is only supported in 64-bit mode")
3301 : _("`%s' is not supported in 64-bit mode")),
3302 current_templates->start->name);
3303 return NULL;
3304 }
3305 /* If we are in 16-bit mode, do not allow addr16 or data16.
3306 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3307 if ((current_templates->start->opcode_modifier.size16
3308 || current_templates->start->opcode_modifier.size32)
3309 && flag_code != CODE_64BIT
3310 && (current_templates->start->opcode_modifier.size32
3311 ^ (flag_code == CODE_16BIT)))
3312 {
3313 as_bad (_("redundant %s prefix"),
3314 current_templates->start->name);
3315 return NULL;
3316 }
3317 /* Add prefix, checking for repeated prefixes. */
3318 switch (add_prefix (current_templates->start->base_opcode))
3319 {
3320 case PREFIX_EXIST:
3321 return NULL;
3322 case PREFIX_REP:
3323 expecting_string_instruction = current_templates->start->name;
3324 break;
3325 default:
3326 break;
3327 }
3328 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3329 token_start = ++l;
3330 }
3331 else
3332 break;
3333 }
3334
3335 if (!current_templates)
3336 {
3337 /* Check if we should swap operand or force 32bit displacement in
3338 encoding. */
3339 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3340 i.swap_operand = 1;
3341 else if (mnem_p - 3 == dot_p
3342 && dot_p[1] == 'd'
3343 && dot_p[2] == '8')
3344 i.disp_encoding = disp_encoding_8bit;
3345 else if (mnem_p - 4 == dot_p
3346 && dot_p[1] == 'd'
3347 && dot_p[2] == '3'
3348 && dot_p[3] == '2')
3349 i.disp_encoding = disp_encoding_32bit;
3350 else
3351 goto check_suffix;
3352 mnem_p = dot_p;
3353 *dot_p = '\0';
3354 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3355 }
3356
3357 if (!current_templates)
3358 {
3359 check_suffix:
3360 /* See if we can get a match by trimming off a suffix. */
3361 switch (mnem_p[-1])
3362 {
3363 case WORD_MNEM_SUFFIX:
3364 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3365 i.suffix = SHORT_MNEM_SUFFIX;
3366 else
3367 case BYTE_MNEM_SUFFIX:
3368 case QWORD_MNEM_SUFFIX:
3369 i.suffix = mnem_p[-1];
3370 mnem_p[-1] = '\0';
3371 current_templates = (const templates *) hash_find (op_hash,
3372 mnemonic);
3373 break;
3374 case SHORT_MNEM_SUFFIX:
3375 case LONG_MNEM_SUFFIX:
3376 if (!intel_syntax)
3377 {
3378 i.suffix = mnem_p[-1];
3379 mnem_p[-1] = '\0';
3380 current_templates = (const templates *) hash_find (op_hash,
3381 mnemonic);
3382 }
3383 break;
3384
3385 /* Intel Syntax. */
3386 case 'd':
3387 if (intel_syntax)
3388 {
3389 if (intel_float_operand (mnemonic) == 1)
3390 i.suffix = SHORT_MNEM_SUFFIX;
3391 else
3392 i.suffix = LONG_MNEM_SUFFIX;
3393 mnem_p[-1] = '\0';
3394 current_templates = (const templates *) hash_find (op_hash,
3395 mnemonic);
3396 }
3397 break;
3398 }
3399 if (!current_templates)
3400 {
3401 as_bad (_("no such instruction: `%s'"), token_start);
3402 return NULL;
3403 }
3404 }
3405
3406 if (current_templates->start->opcode_modifier.jump
3407 || current_templates->start->opcode_modifier.jumpbyte)
3408 {
3409 /* Check for a branch hint. We allow ",pt" and ",pn" for
3410 predict taken and predict not taken respectively.
3411 I'm not sure that branch hints actually do anything on loop
3412 and jcxz insns (JumpByte) for current Pentium4 chips. They
3413 may work in the future and it doesn't hurt to accept them
3414 now. */
3415 if (l[0] == ',' && l[1] == 'p')
3416 {
3417 if (l[2] == 't')
3418 {
3419 if (!add_prefix (DS_PREFIX_OPCODE))
3420 return NULL;
3421 l += 3;
3422 }
3423 else if (l[2] == 'n')
3424 {
3425 if (!add_prefix (CS_PREFIX_OPCODE))
3426 return NULL;
3427 l += 3;
3428 }
3429 }
3430 }
3431 /* Any other comma loses. */
3432 if (*l == ',')
3433 {
3434 as_bad (_("invalid character %s in mnemonic"),
3435 output_invalid (*l));
3436 return NULL;
3437 }
3438
3439 /* Check if instruction is supported on specified architecture. */
3440 supported = 0;
3441 for (t = current_templates->start; t < current_templates->end; ++t)
3442 {
3443 supported |= cpu_flags_match (t);
3444 if (supported == CPU_FLAGS_PERFECT_MATCH)
3445 goto skip;
3446 }
3447
3448 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3449 {
3450 as_bad (flag_code == CODE_64BIT
3451 ? _("`%s' is not supported in 64-bit mode")
3452 : _("`%s' is only supported in 64-bit mode"),
3453 current_templates->start->name);
3454 return NULL;
3455 }
3456 if (supported != CPU_FLAGS_PERFECT_MATCH)
3457 {
3458 as_bad (_("`%s' is not supported on `%s%s'"),
3459 current_templates->start->name,
3460 cpu_arch_name ? cpu_arch_name : default_arch,
3461 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3462 return NULL;
3463 }
3464
3465 skip:
3466 if (!cpu_arch_flags.bitfield.cpui386
3467 && (flag_code != CODE_16BIT))
3468 {
3469 as_warn (_("use .code16 to ensure correct addressing mode"));
3470 }
3471
3472 /* Check for rep/repne without a string instruction. */
3473 if (expecting_string_instruction)
3474 {
3475 static templates override;
3476
3477 for (t = current_templates->start; t < current_templates->end; ++t)
3478 if (t->opcode_modifier.isstring)
3479 break;
3480 if (t >= current_templates->end)
3481 {
3482 as_bad (_("expecting string instruction after `%s'"),
3483 expecting_string_instruction);
3484 return NULL;
3485 }
3486 for (override.start = t; t < current_templates->end; ++t)
3487 if (!t->opcode_modifier.isstring)
3488 break;
3489 override.end = t;
3490 current_templates = &override;
3491 }
3492
3493 return l;
3494 }
3495
3496 static char *
3497 parse_operands (char *l, const char *mnemonic)
3498 {
3499 char *token_start;
3500
3501 /* 1 if operand is pending after ','. */
3502 unsigned int expecting_operand = 0;
3503
3504 /* Non-zero if operand parens not balanced. */
3505 unsigned int paren_not_balanced;
3506
3507 while (*l != END_OF_INSN)
3508 {
3509 /* Skip optional white space before operand. */
3510 if (is_space_char (*l))
3511 ++l;
3512 if (!is_operand_char (*l) && *l != END_OF_INSN)
3513 {
3514 as_bad (_("invalid character %s before operand %d"),
3515 output_invalid (*l),
3516 i.operands + 1);
3517 return NULL;
3518 }
3519 token_start = l; /* after white space */
3520 paren_not_balanced = 0;
3521 while (paren_not_balanced || *l != ',')
3522 {
3523 if (*l == END_OF_INSN)
3524 {
3525 if (paren_not_balanced)
3526 {
3527 if (!intel_syntax)
3528 as_bad (_("unbalanced parenthesis in operand %d."),
3529 i.operands + 1);
3530 else
3531 as_bad (_("unbalanced brackets in operand %d."),
3532 i.operands + 1);
3533 return NULL;
3534 }
3535 else
3536 break; /* we are done */
3537 }
3538 else if (!is_operand_char (*l) && !is_space_char (*l))
3539 {
3540 as_bad (_("invalid character %s in operand %d"),
3541 output_invalid (*l),
3542 i.operands + 1);
3543 return NULL;
3544 }
3545 if (!intel_syntax)
3546 {
3547 if (*l == '(')
3548 ++paren_not_balanced;
3549 if (*l == ')')
3550 --paren_not_balanced;
3551 }
3552 else
3553 {
3554 if (*l == '[')
3555 ++paren_not_balanced;
3556 if (*l == ']')
3557 --paren_not_balanced;
3558 }
3559 l++;
3560 }
3561 if (l != token_start)
3562 { /* Yes, we've read in another operand. */
3563 unsigned int operand_ok;
3564 this_operand = i.operands++;
3565 i.types[this_operand].bitfield.unspecified = 1;
3566 if (i.operands > MAX_OPERANDS)
3567 {
3568 as_bad (_("spurious operands; (%d operands/instruction max)"),
3569 MAX_OPERANDS);
3570 return NULL;
3571 }
3572 /* Now parse operand adding info to 'i' as we go along. */
3573 END_STRING_AND_SAVE (l);
3574
3575 if (intel_syntax)
3576 operand_ok =
3577 i386_intel_operand (token_start,
3578 intel_float_operand (mnemonic));
3579 else
3580 operand_ok = i386_att_operand (token_start);
3581
3582 RESTORE_END_STRING (l);
3583 if (!operand_ok)
3584 return NULL;
3585 }
3586 else
3587 {
3588 if (expecting_operand)
3589 {
3590 expecting_operand_after_comma:
3591 as_bad (_("expecting operand after ','; got nothing"));
3592 return NULL;
3593 }
3594 if (*l == ',')
3595 {
3596 as_bad (_("expecting operand before ','; got nothing"));
3597 return NULL;
3598 }
3599 }
3600
3601 /* Now *l must be either ',' or END_OF_INSN. */
3602 if (*l == ',')
3603 {
3604 if (*++l == END_OF_INSN)
3605 {
3606 /* Just skip it, if it's \n complain. */
3607 goto expecting_operand_after_comma;
3608 }
3609 expecting_operand = 1;
3610 }
3611 }
3612 return l;
3613 }
3614
3615 static void
3616 swap_2_operands (int xchg1, int xchg2)
3617 {
3618 union i386_op temp_op;
3619 i386_operand_type temp_type;
3620 enum bfd_reloc_code_real temp_reloc;
3621
3622 temp_type = i.types[xchg2];
3623 i.types[xchg2] = i.types[xchg1];
3624 i.types[xchg1] = temp_type;
3625 temp_op = i.op[xchg2];
3626 i.op[xchg2] = i.op[xchg1];
3627 i.op[xchg1] = temp_op;
3628 temp_reloc = i.reloc[xchg2];
3629 i.reloc[xchg2] = i.reloc[xchg1];
3630 i.reloc[xchg1] = temp_reloc;
3631 }
3632
3633 static void
3634 swap_operands (void)
3635 {
3636 switch (i.operands)
3637 {
3638 case 5:
3639 case 4:
3640 swap_2_operands (1, i.operands - 2);
3641 case 3:
3642 case 2:
3643 swap_2_operands (0, i.operands - 1);
3644 break;
3645 default:
3646 abort ();
3647 }
3648
3649 if (i.mem_operands == 2)
3650 {
3651 const seg_entry *temp_seg;
3652 temp_seg = i.seg[0];
3653 i.seg[0] = i.seg[1];
3654 i.seg[1] = temp_seg;
3655 }
3656 }
3657
3658 /* Try to ensure constant immediates are represented in the smallest
3659 opcode possible. */
3660 static void
3661 optimize_imm (void)
3662 {
3663 char guess_suffix = 0;
3664 int op;
3665
3666 if (i.suffix)
3667 guess_suffix = i.suffix;
3668 else if (i.reg_operands)
3669 {
3670 /* Figure out a suffix from the last register operand specified.
3671 We can't do this properly yet, ie. excluding InOutPortReg,
3672 but the following works for instructions with immediates.
3673 In any case, we can't set i.suffix yet. */
3674 for (op = i.operands; --op >= 0;)
3675 if (i.types[op].bitfield.reg8)
3676 {
3677 guess_suffix = BYTE_MNEM_SUFFIX;
3678 break;
3679 }
3680 else if (i.types[op].bitfield.reg16)
3681 {
3682 guess_suffix = WORD_MNEM_SUFFIX;
3683 break;
3684 }
3685 else if (i.types[op].bitfield.reg32)
3686 {
3687 guess_suffix = LONG_MNEM_SUFFIX;
3688 break;
3689 }
3690 else if (i.types[op].bitfield.reg64)
3691 {
3692 guess_suffix = QWORD_MNEM_SUFFIX;
3693 break;
3694 }
3695 }
3696 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3697 guess_suffix = WORD_MNEM_SUFFIX;
3698
3699 for (op = i.operands; --op >= 0;)
3700 if (operand_type_check (i.types[op], imm))
3701 {
3702 switch (i.op[op].imms->X_op)
3703 {
3704 case O_constant:
3705 /* If a suffix is given, this operand may be shortened. */
3706 switch (guess_suffix)
3707 {
3708 case LONG_MNEM_SUFFIX:
3709 i.types[op].bitfield.imm32 = 1;
3710 i.types[op].bitfield.imm64 = 1;
3711 break;
3712 case WORD_MNEM_SUFFIX:
3713 i.types[op].bitfield.imm16 = 1;
3714 i.types[op].bitfield.imm32 = 1;
3715 i.types[op].bitfield.imm32s = 1;
3716 i.types[op].bitfield.imm64 = 1;
3717 break;
3718 case BYTE_MNEM_SUFFIX:
3719 i.types[op].bitfield.imm8 = 1;
3720 i.types[op].bitfield.imm8s = 1;
3721 i.types[op].bitfield.imm16 = 1;
3722 i.types[op].bitfield.imm32 = 1;
3723 i.types[op].bitfield.imm32s = 1;
3724 i.types[op].bitfield.imm64 = 1;
3725 break;
3726 }
3727
3728 /* If this operand is at most 16 bits, convert it
3729 to a signed 16 bit number before trying to see
3730 whether it will fit in an even smaller size.
3731 This allows a 16-bit operand such as $0xffe0 to
3732 be recognised as within Imm8S range. */
3733 if ((i.types[op].bitfield.imm16)
3734 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3735 {
3736 i.op[op].imms->X_add_number =
3737 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3738 }
3739 if ((i.types[op].bitfield.imm32)
3740 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3741 == 0))
3742 {
3743 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3744 ^ ((offsetT) 1 << 31))
3745 - ((offsetT) 1 << 31));
3746 }
3747 i.types[op]
3748 = operand_type_or (i.types[op],
3749 smallest_imm_type (i.op[op].imms->X_add_number));
3750
3751 /* We must avoid matching of Imm32 templates when 64bit
3752 only immediate is available. */
3753 if (guess_suffix == QWORD_MNEM_SUFFIX)
3754 i.types[op].bitfield.imm32 = 0;
3755 break;
3756
3757 case O_absent:
3758 case O_register:
3759 abort ();
3760
3761 /* Symbols and expressions. */
3762 default:
3763 /* Convert symbolic operand to proper sizes for matching, but don't
3764 prevent matching a set of insns that only supports sizes other
3765 than those matching the insn suffix. */
3766 {
3767 i386_operand_type mask, allowed;
3768 const insn_template *t;
3769
3770 operand_type_set (&mask, 0);
3771 operand_type_set (&allowed, 0);
3772
3773 for (t = current_templates->start;
3774 t < current_templates->end;
3775 ++t)
3776 allowed = operand_type_or (allowed,
3777 t->operand_types[op]);
3778 switch (guess_suffix)
3779 {
3780 case QWORD_MNEM_SUFFIX:
3781 mask.bitfield.imm64 = 1;
3782 mask.bitfield.imm32s = 1;
3783 break;
3784 case LONG_MNEM_SUFFIX:
3785 mask.bitfield.imm32 = 1;
3786 break;
3787 case WORD_MNEM_SUFFIX:
3788 mask.bitfield.imm16 = 1;
3789 break;
3790 case BYTE_MNEM_SUFFIX:
3791 mask.bitfield.imm8 = 1;
3792 break;
3793 default:
3794 break;
3795 }
3796 allowed = operand_type_and (mask, allowed);
3797 if (!operand_type_all_zero (&allowed))
3798 i.types[op] = operand_type_and (i.types[op], mask);
3799 }
3800 break;
3801 }
3802 }
3803 }
3804
3805 /* Try to use the smallest displacement type too. */
3806 static void
3807 optimize_disp (void)
3808 {
3809 int op;
3810
3811 for (op = i.operands; --op >= 0;)
3812 if (operand_type_check (i.types[op], disp))
3813 {
3814 if (i.op[op].disps->X_op == O_constant)
3815 {
3816 offsetT op_disp = i.op[op].disps->X_add_number;
3817
3818 if (i.types[op].bitfield.disp16
3819 && (op_disp & ~(offsetT) 0xffff) == 0)
3820 {
3821 /* If this operand is at most 16 bits, convert
3822 to a signed 16 bit number and don't use 64bit
3823 displacement. */
3824 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3825 i.types[op].bitfield.disp64 = 0;
3826 }
3827 if (i.types[op].bitfield.disp32
3828 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3829 {
3830 /* If this operand is at most 32 bits, convert
3831 to a signed 32 bit number and don't use 64bit
3832 displacement. */
3833 op_disp &= (((offsetT) 2 << 31) - 1);
3834 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3835 i.types[op].bitfield.disp64 = 0;
3836 }
3837 if (!op_disp && i.types[op].bitfield.baseindex)
3838 {
3839 i.types[op].bitfield.disp8 = 0;
3840 i.types[op].bitfield.disp16 = 0;
3841 i.types[op].bitfield.disp32 = 0;
3842 i.types[op].bitfield.disp32s = 0;
3843 i.types[op].bitfield.disp64 = 0;
3844 i.op[op].disps = 0;
3845 i.disp_operands--;
3846 }
3847 else if (flag_code == CODE_64BIT)
3848 {
3849 if (fits_in_signed_long (op_disp))
3850 {
3851 i.types[op].bitfield.disp64 = 0;
3852 i.types[op].bitfield.disp32s = 1;
3853 }
3854 if (i.prefix[ADDR_PREFIX]
3855 && fits_in_unsigned_long (op_disp))
3856 i.types[op].bitfield.disp32 = 1;
3857 }
3858 if ((i.types[op].bitfield.disp32
3859 || i.types[op].bitfield.disp32s
3860 || i.types[op].bitfield.disp16)
3861 && fits_in_signed_byte (op_disp))
3862 i.types[op].bitfield.disp8 = 1;
3863 }
3864 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3865 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3866 {
3867 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3868 i.op[op].disps, 0, i.reloc[op]);
3869 i.types[op].bitfield.disp8 = 0;
3870 i.types[op].bitfield.disp16 = 0;
3871 i.types[op].bitfield.disp32 = 0;
3872 i.types[op].bitfield.disp32s = 0;
3873 i.types[op].bitfield.disp64 = 0;
3874 }
3875 else
3876 /* We only support 64bit displacement on constants. */
3877 i.types[op].bitfield.disp64 = 0;
3878 }
3879 }
3880
3881 /* Check if operands are valid for the instruction. */
3882
3883 static int
3884 check_VecOperands (const insn_template *t)
3885 {
3886 /* Without VSIB byte, we can't have a vector register for index. */
3887 if (!t->opcode_modifier.vecsib
3888 && i.index_reg
3889 && (i.index_reg->reg_type.bitfield.regxmm
3890 || i.index_reg->reg_type.bitfield.regymm))
3891 {
3892 i.error = unsupported_vector_index_register;
3893 return 1;
3894 }
3895
3896 /* For VSIB byte, we need a vector register for index and no PC
3897 relative addressing is allowed. */
3898 if (t->opcode_modifier.vecsib
3899 && (!i.index_reg
3900 || !((t->opcode_modifier.vecsib == VecSIB128
3901 && i.index_reg->reg_type.bitfield.regxmm)
3902 || (t->opcode_modifier.vecsib == VecSIB256
3903 && i.index_reg->reg_type.bitfield.regymm))
3904 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3905 {
3906 i.error = invalid_vsib_address;
3907 return 1;
3908 }
3909
3910 return 0;
3911 }
3912
3913 /* Check if operands are valid for the instruction. Update VEX
3914 operand types. */
3915
3916 static int
3917 VEX_check_operands (const insn_template *t)
3918 {
3919 if (!t->opcode_modifier.vex)
3920 return 0;
3921
3922 /* Only check VEX_Imm4, which must be the first operand. */
3923 if (t->operand_types[0].bitfield.vec_imm4)
3924 {
3925 if (i.op[0].imms->X_op != O_constant
3926 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3927 {
3928 i.error = bad_imm4;
3929 return 1;
3930 }
3931
3932 /* Turn off Imm8 so that update_imm won't complain. */
3933 i.types[0] = vec_imm4;
3934 }
3935
3936 return 0;
3937 }
3938
3939 static const insn_template *
3940 match_template (void)
3941 {
3942 /* Points to template once we've found it. */
3943 const insn_template *t;
3944 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3945 i386_operand_type overlap4;
3946 unsigned int found_reverse_match;
3947 i386_opcode_modifier suffix_check;
3948 i386_operand_type operand_types [MAX_OPERANDS];
3949 int addr_prefix_disp;
3950 unsigned int j;
3951 unsigned int found_cpu_match;
3952 unsigned int check_register;
3953
3954 #if MAX_OPERANDS != 5
3955 # error "MAX_OPERANDS must be 5."
3956 #endif
3957
3958 found_reverse_match = 0;
3959 addr_prefix_disp = -1;
3960
3961 memset (&suffix_check, 0, sizeof (suffix_check));
3962 if (i.suffix == BYTE_MNEM_SUFFIX)
3963 suffix_check.no_bsuf = 1;
3964 else if (i.suffix == WORD_MNEM_SUFFIX)
3965 suffix_check.no_wsuf = 1;
3966 else if (i.suffix == SHORT_MNEM_SUFFIX)
3967 suffix_check.no_ssuf = 1;
3968 else if (i.suffix == LONG_MNEM_SUFFIX)
3969 suffix_check.no_lsuf = 1;
3970 else if (i.suffix == QWORD_MNEM_SUFFIX)
3971 suffix_check.no_qsuf = 1;
3972 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3973 suffix_check.no_ldsuf = 1;
3974
3975 /* Must have right number of operands. */
3976 i.error = number_of_operands_mismatch;
3977
3978 for (t = current_templates->start; t < current_templates->end; t++)
3979 {
3980 addr_prefix_disp = -1;
3981
3982 if (i.operands != t->operands)
3983 continue;
3984
3985 /* Check processor support. */
3986 i.error = unsupported;
3987 found_cpu_match = (cpu_flags_match (t)
3988 == CPU_FLAGS_PERFECT_MATCH);
3989 if (!found_cpu_match)
3990 continue;
3991
3992 /* Check old gcc support. */
3993 i.error = old_gcc_only;
3994 if (!old_gcc && t->opcode_modifier.oldgcc)
3995 continue;
3996
3997 /* Check AT&T mnemonic. */
3998 i.error = unsupported_with_intel_mnemonic;
3999 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4000 continue;
4001
4002 /* Check AT&T/Intel syntax. */
4003 i.error = unsupported_syntax;
4004 if ((intel_syntax && t->opcode_modifier.attsyntax)
4005 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4006 continue;
4007
4008 /* Check the suffix, except for some instructions in intel mode. */
4009 i.error = invalid_instruction_suffix;
4010 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4011 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4012 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4013 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4014 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4015 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4016 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4017 continue;
4018
4019 if (!operand_size_match (t))
4020 continue;
4021
4022 for (j = 0; j < MAX_OPERANDS; j++)
4023 operand_types[j] = t->operand_types[j];
4024
4025 /* In general, don't allow 64-bit operands in 32-bit mode. */
4026 if (i.suffix == QWORD_MNEM_SUFFIX
4027 && flag_code != CODE_64BIT
4028 && (intel_syntax
4029 ? (!t->opcode_modifier.ignoresize
4030 && !intel_float_operand (t->name))
4031 : intel_float_operand (t->name) != 2)
4032 && ((!operand_types[0].bitfield.regmmx
4033 && !operand_types[0].bitfield.regxmm
4034 && !operand_types[0].bitfield.regymm)
4035 || (!operand_types[t->operands > 1].bitfield.regmmx
4036 && !!operand_types[t->operands > 1].bitfield.regxmm
4037 && !!operand_types[t->operands > 1].bitfield.regymm))
4038 && (t->base_opcode != 0x0fc7
4039 || t->extension_opcode != 1 /* cmpxchg8b */))
4040 continue;
4041
4042 /* In general, don't allow 32-bit operands on pre-386. */
4043 else if (i.suffix == LONG_MNEM_SUFFIX
4044 && !cpu_arch_flags.bitfield.cpui386
4045 && (intel_syntax
4046 ? (!t->opcode_modifier.ignoresize
4047 && !intel_float_operand (t->name))
4048 : intel_float_operand (t->name) != 2)
4049 && ((!operand_types[0].bitfield.regmmx
4050 && !operand_types[0].bitfield.regxmm)
4051 || (!operand_types[t->operands > 1].bitfield.regmmx
4052 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4053 continue;
4054
4055 /* Do not verify operands when there are none. */
4056 else
4057 {
4058 if (!t->operands)
4059 /* We've found a match; break out of loop. */
4060 break;
4061 }
4062
4063 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4064 into Disp32/Disp16/Disp32 operand. */
4065 if (i.prefix[ADDR_PREFIX] != 0)
4066 {
4067 /* There should be only one Disp operand. */
4068 switch (flag_code)
4069 {
4070 case CODE_16BIT:
4071 for (j = 0; j < MAX_OPERANDS; j++)
4072 {
4073 if (operand_types[j].bitfield.disp16)
4074 {
4075 addr_prefix_disp = j;
4076 operand_types[j].bitfield.disp32 = 1;
4077 operand_types[j].bitfield.disp16 = 0;
4078 break;
4079 }
4080 }
4081 break;
4082 case CODE_32BIT:
4083 for (j = 0; j < MAX_OPERANDS; j++)
4084 {
4085 if (operand_types[j].bitfield.disp32)
4086 {
4087 addr_prefix_disp = j;
4088 operand_types[j].bitfield.disp32 = 0;
4089 operand_types[j].bitfield.disp16 = 1;
4090 break;
4091 }
4092 }
4093 break;
4094 case CODE_64BIT:
4095 for (j = 0; j < MAX_OPERANDS; j++)
4096 {
4097 if (operand_types[j].bitfield.disp64)
4098 {
4099 addr_prefix_disp = j;
4100 operand_types[j].bitfield.disp64 = 0;
4101 operand_types[j].bitfield.disp32 = 1;
4102 break;
4103 }
4104 }
4105 break;
4106 }
4107 }
4108
4109 /* We check register size if needed. */
4110 check_register = t->opcode_modifier.checkregsize;
4111 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4112 switch (t->operands)
4113 {
4114 case 1:
4115 if (!operand_type_match (overlap0, i.types[0]))
4116 continue;
4117 break;
4118 case 2:
4119 /* xchg %eax, %eax is a special case. It is an aliase for nop
4120 only in 32bit mode and we can use opcode 0x90. In 64bit
4121 mode, we can't use 0x90 for xchg %eax, %eax since it should
4122 zero-extend %eax to %rax. */
4123 if (flag_code == CODE_64BIT
4124 && t->base_opcode == 0x90
4125 && operand_type_equal (&i.types [0], &acc32)
4126 && operand_type_equal (&i.types [1], &acc32))
4127 continue;
4128 if (i.swap_operand)
4129 {
4130 /* If we swap operand in encoding, we either match
4131 the next one or reverse direction of operands. */
4132 if (t->opcode_modifier.s)
4133 continue;
4134 else if (t->opcode_modifier.d)
4135 goto check_reverse;
4136 }
4137
4138 case 3:
4139 /* If we swap operand in encoding, we match the next one. */
4140 if (i.swap_operand && t->opcode_modifier.s)
4141 continue;
4142 case 4:
4143 case 5:
4144 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4145 if (!operand_type_match (overlap0, i.types[0])
4146 || !operand_type_match (overlap1, i.types[1])
4147 || (check_register
4148 && !operand_type_register_match (overlap0, i.types[0],
4149 operand_types[0],
4150 overlap1, i.types[1],
4151 operand_types[1])))
4152 {
4153 /* Check if other direction is valid ... */
4154 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4155 continue;
4156
4157 check_reverse:
4158 /* Try reversing direction of operands. */
4159 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4160 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4161 if (!operand_type_match (overlap0, i.types[0])
4162 || !operand_type_match (overlap1, i.types[1])
4163 || (check_register
4164 && !operand_type_register_match (overlap0,
4165 i.types[0],
4166 operand_types[1],
4167 overlap1,
4168 i.types[1],
4169 operand_types[0])))
4170 {
4171 /* Does not match either direction. */
4172 continue;
4173 }
4174 /* found_reverse_match holds which of D or FloatDR
4175 we've found. */
4176 if (t->opcode_modifier.d)
4177 found_reverse_match = Opcode_D;
4178 else if (t->opcode_modifier.floatd)
4179 found_reverse_match = Opcode_FloatD;
4180 else
4181 found_reverse_match = 0;
4182 if (t->opcode_modifier.floatr)
4183 found_reverse_match |= Opcode_FloatR;
4184 }
4185 else
4186 {
4187 /* Found a forward 2 operand match here. */
4188 switch (t->operands)
4189 {
4190 case 5:
4191 overlap4 = operand_type_and (i.types[4],
4192 operand_types[4]);
4193 case 4:
4194 overlap3 = operand_type_and (i.types[3],
4195 operand_types[3]);
4196 case 3:
4197 overlap2 = operand_type_and (i.types[2],
4198 operand_types[2]);
4199 break;
4200 }
4201
4202 switch (t->operands)
4203 {
4204 case 5:
4205 if (!operand_type_match (overlap4, i.types[4])
4206 || !operand_type_register_match (overlap3,
4207 i.types[3],
4208 operand_types[3],
4209 overlap4,
4210 i.types[4],
4211 operand_types[4]))
4212 continue;
4213 case 4:
4214 if (!operand_type_match (overlap3, i.types[3])
4215 || (check_register
4216 && !operand_type_register_match (overlap2,
4217 i.types[2],
4218 operand_types[2],
4219 overlap3,
4220 i.types[3],
4221 operand_types[3])))
4222 continue;
4223 case 3:
4224 /* Here we make use of the fact that there are no
4225 reverse match 3 operand instructions, and all 3
4226 operand instructions only need to be checked for
4227 register consistency between operands 2 and 3. */
4228 if (!operand_type_match (overlap2, i.types[2])
4229 || (check_register
4230 && !operand_type_register_match (overlap1,
4231 i.types[1],
4232 operand_types[1],
4233 overlap2,
4234 i.types[2],
4235 operand_types[2])))
4236 continue;
4237 break;
4238 }
4239 }
4240 /* Found either forward/reverse 2, 3 or 4 operand match here:
4241 slip through to break. */
4242 }
4243 if (!found_cpu_match)
4244 {
4245 found_reverse_match = 0;
4246 continue;
4247 }
4248
4249 /* Check if vector operands are valid. */
4250 if (check_VecOperands (t))
4251 continue;
4252
4253 /* Check if VEX operands are valid. */
4254 if (VEX_check_operands (t))
4255 continue;
4256
4257 /* We've found a match; break out of loop. */
4258 break;
4259 }
4260
4261 if (t == current_templates->end)
4262 {
4263 /* We found no match. */
4264 const char *err_msg;
4265 switch (i.error)
4266 {
4267 default:
4268 abort ();
4269 case operand_size_mismatch:
4270 err_msg = _("operand size mismatch");
4271 break;
4272 case operand_type_mismatch:
4273 err_msg = _("operand type mismatch");
4274 break;
4275 case register_type_mismatch:
4276 err_msg = _("register type mismatch");
4277 break;
4278 case number_of_operands_mismatch:
4279 err_msg = _("number of operands mismatch");
4280 break;
4281 case invalid_instruction_suffix:
4282 err_msg = _("invalid instruction suffix");
4283 break;
4284 case bad_imm4:
4285 err_msg = _("Imm4 isn't the first operand");
4286 break;
4287 case old_gcc_only:
4288 err_msg = _("only supported with old gcc");
4289 break;
4290 case unsupported_with_intel_mnemonic:
4291 err_msg = _("unsupported with Intel mnemonic");
4292 break;
4293 case unsupported_syntax:
4294 err_msg = _("unsupported syntax");
4295 break;
4296 case unsupported:
4297 err_msg = _("unsupported");
4298 break;
4299 case invalid_vsib_address:
4300 err_msg = _("invalid VSIB address");
4301 break;
4302 case unsupported_vector_index_register:
4303 err_msg = _("unsupported vector index register");
4304 break;
4305 }
4306 as_bad (_("%s for `%s'"), err_msg,
4307 current_templates->start->name);
4308 return NULL;
4309 }
4310
4311 if (!quiet_warnings)
4312 {
4313 if (!intel_syntax
4314 && (i.types[0].bitfield.jumpabsolute
4315 != operand_types[0].bitfield.jumpabsolute))
4316 {
4317 as_warn (_("indirect %s without `*'"), t->name);
4318 }
4319
4320 if (t->opcode_modifier.isprefix
4321 && t->opcode_modifier.ignoresize)
4322 {
4323 /* Warn them that a data or address size prefix doesn't
4324 affect assembly of the next line of code. */
4325 as_warn (_("stand-alone `%s' prefix"), t->name);
4326 }
4327 }
4328
4329 /* Copy the template we found. */
4330 i.tm = *t;
4331
4332 if (addr_prefix_disp != -1)
4333 i.tm.operand_types[addr_prefix_disp]
4334 = operand_types[addr_prefix_disp];
4335
4336 if (found_reverse_match)
4337 {
4338 /* If we found a reverse match we must alter the opcode
4339 direction bit. found_reverse_match holds bits to change
4340 (different for int & float insns). */
4341
4342 i.tm.base_opcode ^= found_reverse_match;
4343
4344 i.tm.operand_types[0] = operand_types[1];
4345 i.tm.operand_types[1] = operand_types[0];
4346 }
4347
4348 return t;
4349 }
4350
4351 static int
4352 check_string (void)
4353 {
4354 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4355 if (i.tm.operand_types[mem_op].bitfield.esseg)
4356 {
4357 if (i.seg[0] != NULL && i.seg[0] != &es)
4358 {
4359 as_bad (_("`%s' operand %d must use `%ses' segment"),
4360 i.tm.name,
4361 mem_op + 1,
4362 register_prefix);
4363 return 0;
4364 }
4365 /* There's only ever one segment override allowed per instruction.
4366 This instruction possibly has a legal segment override on the
4367 second operand, so copy the segment to where non-string
4368 instructions store it, allowing common code. */
4369 i.seg[0] = i.seg[1];
4370 }
4371 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4372 {
4373 if (i.seg[1] != NULL && i.seg[1] != &es)
4374 {
4375 as_bad (_("`%s' operand %d must use `%ses' segment"),
4376 i.tm.name,
4377 mem_op + 2,
4378 register_prefix);
4379 return 0;
4380 }
4381 }
4382 return 1;
4383 }
4384
4385 static int
4386 process_suffix (void)
4387 {
4388 /* If matched instruction specifies an explicit instruction mnemonic
4389 suffix, use it. */
4390 if (i.tm.opcode_modifier.size16)
4391 i.suffix = WORD_MNEM_SUFFIX;
4392 else if (i.tm.opcode_modifier.size32)
4393 i.suffix = LONG_MNEM_SUFFIX;
4394 else if (i.tm.opcode_modifier.size64)
4395 i.suffix = QWORD_MNEM_SUFFIX;
4396 else if (i.reg_operands)
4397 {
4398 /* If there's no instruction mnemonic suffix we try to invent one
4399 based on register operands. */
4400 if (!i.suffix)
4401 {
4402 /* We take i.suffix from the last register operand specified,
4403 Destination register type is more significant than source
4404 register type. crc32 in SSE4.2 prefers source register
4405 type. */
4406 if (i.tm.base_opcode == 0xf20f38f1)
4407 {
4408 if (i.types[0].bitfield.reg16)
4409 i.suffix = WORD_MNEM_SUFFIX;
4410 else if (i.types[0].bitfield.reg32)
4411 i.suffix = LONG_MNEM_SUFFIX;
4412 else if (i.types[0].bitfield.reg64)
4413 i.suffix = QWORD_MNEM_SUFFIX;
4414 }
4415 else if (i.tm.base_opcode == 0xf20f38f0)
4416 {
4417 if (i.types[0].bitfield.reg8)
4418 i.suffix = BYTE_MNEM_SUFFIX;
4419 }
4420
4421 if (!i.suffix)
4422 {
4423 int op;
4424
4425 if (i.tm.base_opcode == 0xf20f38f1
4426 || i.tm.base_opcode == 0xf20f38f0)
4427 {
4428 /* We have to know the operand size for crc32. */
4429 as_bad (_("ambiguous memory operand size for `%s`"),
4430 i.tm.name);
4431 return 0;
4432 }
4433
4434 for (op = i.operands; --op >= 0;)
4435 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4436 {
4437 if (i.types[op].bitfield.reg8)
4438 {
4439 i.suffix = BYTE_MNEM_SUFFIX;
4440 break;
4441 }
4442 else if (i.types[op].bitfield.reg16)
4443 {
4444 i.suffix = WORD_MNEM_SUFFIX;
4445 break;
4446 }
4447 else if (i.types[op].bitfield.reg32)
4448 {
4449 i.suffix = LONG_MNEM_SUFFIX;
4450 break;
4451 }
4452 else if (i.types[op].bitfield.reg64)
4453 {
4454 i.suffix = QWORD_MNEM_SUFFIX;
4455 break;
4456 }
4457 }
4458 }
4459 }
4460 else if (i.suffix == BYTE_MNEM_SUFFIX)
4461 {
4462 if (intel_syntax
4463 && i.tm.opcode_modifier.ignoresize
4464 && i.tm.opcode_modifier.no_bsuf)
4465 i.suffix = 0;
4466 else if (!check_byte_reg ())
4467 return 0;
4468 }
4469 else if (i.suffix == LONG_MNEM_SUFFIX)
4470 {
4471 if (intel_syntax
4472 && i.tm.opcode_modifier.ignoresize
4473 && i.tm.opcode_modifier.no_lsuf)
4474 i.suffix = 0;
4475 else if (!check_long_reg ())
4476 return 0;
4477 }
4478 else if (i.suffix == QWORD_MNEM_SUFFIX)
4479 {
4480 if (intel_syntax
4481 && i.tm.opcode_modifier.ignoresize
4482 && i.tm.opcode_modifier.no_qsuf)
4483 i.suffix = 0;
4484 else if (!check_qword_reg ())
4485 return 0;
4486 }
4487 else if (i.suffix == WORD_MNEM_SUFFIX)
4488 {
4489 if (intel_syntax
4490 && i.tm.opcode_modifier.ignoresize
4491 && i.tm.opcode_modifier.no_wsuf)
4492 i.suffix = 0;
4493 else if (!check_word_reg ())
4494 return 0;
4495 }
4496 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4497 || i.suffix == YMMWORD_MNEM_SUFFIX)
4498 {
4499 /* Skip if the instruction has x/y suffix. match_template
4500 should check if it is a valid suffix. */
4501 }
4502 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4503 /* Do nothing if the instruction is going to ignore the prefix. */
4504 ;
4505 else
4506 abort ();
4507 }
4508 else if (i.tm.opcode_modifier.defaultsize
4509 && !i.suffix
4510 /* exclude fldenv/frstor/fsave/fstenv */
4511 && i.tm.opcode_modifier.no_ssuf)
4512 {
4513 i.suffix = stackop_size;
4514 }
4515 else if (intel_syntax
4516 && !i.suffix
4517 && (i.tm.operand_types[0].bitfield.jumpabsolute
4518 || i.tm.opcode_modifier.jumpbyte
4519 || i.tm.opcode_modifier.jumpintersegment
4520 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4521 && i.tm.extension_opcode <= 3)))
4522 {
4523 switch (flag_code)
4524 {
4525 case CODE_64BIT:
4526 if (!i.tm.opcode_modifier.no_qsuf)
4527 {
4528 i.suffix = QWORD_MNEM_SUFFIX;
4529 break;
4530 }
4531 case CODE_32BIT:
4532 if (!i.tm.opcode_modifier.no_lsuf)
4533 i.suffix = LONG_MNEM_SUFFIX;
4534 break;
4535 case CODE_16BIT:
4536 if (!i.tm.opcode_modifier.no_wsuf)
4537 i.suffix = WORD_MNEM_SUFFIX;
4538 break;
4539 }
4540 }
4541
4542 if (!i.suffix)
4543 {
4544 if (!intel_syntax)
4545 {
4546 if (i.tm.opcode_modifier.w)
4547 {
4548 as_bad (_("no instruction mnemonic suffix given and "
4549 "no register operands; can't size instruction"));
4550 return 0;
4551 }
4552 }
4553 else
4554 {
4555 unsigned int suffixes;
4556
4557 suffixes = !i.tm.opcode_modifier.no_bsuf;
4558 if (!i.tm.opcode_modifier.no_wsuf)
4559 suffixes |= 1 << 1;
4560 if (!i.tm.opcode_modifier.no_lsuf)
4561 suffixes |= 1 << 2;
4562 if (!i.tm.opcode_modifier.no_ldsuf)
4563 suffixes |= 1 << 3;
4564 if (!i.tm.opcode_modifier.no_ssuf)
4565 suffixes |= 1 << 4;
4566 if (!i.tm.opcode_modifier.no_qsuf)
4567 suffixes |= 1 << 5;
4568
4569 /* There are more than suffix matches. */
4570 if (i.tm.opcode_modifier.w
4571 || ((suffixes & (suffixes - 1))
4572 && !i.tm.opcode_modifier.defaultsize
4573 && !i.tm.opcode_modifier.ignoresize))
4574 {
4575 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4576 return 0;
4577 }
4578 }
4579 }
4580
4581 /* Change the opcode based on the operand size given by i.suffix;
4582 We don't need to change things for byte insns. */
4583
4584 if (i.suffix
4585 && i.suffix != BYTE_MNEM_SUFFIX
4586 && i.suffix != XMMWORD_MNEM_SUFFIX
4587 && i.suffix != YMMWORD_MNEM_SUFFIX)
4588 {
4589 /* It's not a byte, select word/dword operation. */
4590 if (i.tm.opcode_modifier.w)
4591 {
4592 if (i.tm.opcode_modifier.shortform)
4593 i.tm.base_opcode |= 8;
4594 else
4595 i.tm.base_opcode |= 1;
4596 }
4597
4598 /* Now select between word & dword operations via the operand
4599 size prefix, except for instructions that will ignore this
4600 prefix anyway. */
4601 if (i.tm.opcode_modifier.addrprefixop0)
4602 {
4603 /* The address size override prefix changes the size of the
4604 first operand. */
4605 if ((flag_code == CODE_32BIT
4606 && i.op->regs[0].reg_type.bitfield.reg16)
4607 || (flag_code != CODE_32BIT
4608 && i.op->regs[0].reg_type.bitfield.reg32))
4609 if (!add_prefix (ADDR_PREFIX_OPCODE))
4610 return 0;
4611 }
4612 else if (i.suffix != QWORD_MNEM_SUFFIX
4613 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4614 && !i.tm.opcode_modifier.ignoresize
4615 && !i.tm.opcode_modifier.floatmf
4616 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4617 || (flag_code == CODE_64BIT
4618 && i.tm.opcode_modifier.jumpbyte)))
4619 {
4620 unsigned int prefix = DATA_PREFIX_OPCODE;
4621
4622 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4623 prefix = ADDR_PREFIX_OPCODE;
4624
4625 if (!add_prefix (prefix))
4626 return 0;
4627 }
4628
4629 /* Set mode64 for an operand. */
4630 if (i.suffix == QWORD_MNEM_SUFFIX
4631 && flag_code == CODE_64BIT
4632 && !i.tm.opcode_modifier.norex64)
4633 {
4634 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4635 need rex64. cmpxchg8b is also a special case. */
4636 if (! (i.operands == 2
4637 && i.tm.base_opcode == 0x90
4638 && i.tm.extension_opcode == None
4639 && operand_type_equal (&i.types [0], &acc64)
4640 && operand_type_equal (&i.types [1], &acc64))
4641 && ! (i.operands == 1
4642 && i.tm.base_opcode == 0xfc7
4643 && i.tm.extension_opcode == 1
4644 && !operand_type_check (i.types [0], reg)
4645 && operand_type_check (i.types [0], anymem)))
4646 i.rex |= REX_W;
4647 }
4648
4649 /* Size floating point instruction. */
4650 if (i.suffix == LONG_MNEM_SUFFIX)
4651 if (i.tm.opcode_modifier.floatmf)
4652 i.tm.base_opcode ^= 4;
4653 }
4654
4655 return 1;
4656 }
4657
4658 static int
4659 check_byte_reg (void)
4660 {
4661 int op;
4662
4663 for (op = i.operands; --op >= 0;)
4664 {
4665 /* If this is an eight bit register, it's OK. If it's the 16 or
4666 32 bit version of an eight bit register, we will just use the
4667 low portion, and that's OK too. */
4668 if (i.types[op].bitfield.reg8)
4669 continue;
4670
4671 /* crc32 doesn't generate this warning. */
4672 if (i.tm.base_opcode == 0xf20f38f0)
4673 continue;
4674
4675 if ((i.types[op].bitfield.reg16
4676 || i.types[op].bitfield.reg32
4677 || i.types[op].bitfield.reg64)
4678 && i.op[op].regs->reg_num < 4)
4679 {
4680 /* Prohibit these changes in the 64bit mode, since the
4681 lowering is more complicated. */
4682 if (flag_code == CODE_64BIT
4683 && !i.tm.operand_types[op].bitfield.inoutportreg)
4684 {
4685 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4686 register_prefix, i.op[op].regs->reg_name,
4687 i.suffix);
4688 return 0;
4689 }
4690 #if REGISTER_WARNINGS
4691 if (!quiet_warnings
4692 && !i.tm.operand_types[op].bitfield.inoutportreg)
4693 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4694 register_prefix,
4695 (i.op[op].regs + (i.types[op].bitfield.reg16
4696 ? REGNAM_AL - REGNAM_AX
4697 : REGNAM_AL - REGNAM_EAX))->reg_name,
4698 register_prefix,
4699 i.op[op].regs->reg_name,
4700 i.suffix);
4701 #endif
4702 continue;
4703 }
4704 /* Any other register is bad. */
4705 if (i.types[op].bitfield.reg16
4706 || i.types[op].bitfield.reg32
4707 || i.types[op].bitfield.reg64
4708 || i.types[op].bitfield.regmmx
4709 || i.types[op].bitfield.regxmm
4710 || i.types[op].bitfield.regymm
4711 || i.types[op].bitfield.sreg2
4712 || i.types[op].bitfield.sreg3
4713 || i.types[op].bitfield.control
4714 || i.types[op].bitfield.debug
4715 || i.types[op].bitfield.test
4716 || i.types[op].bitfield.floatreg
4717 || i.types[op].bitfield.floatacc)
4718 {
4719 as_bad (_("`%s%s' not allowed with `%s%c'"),
4720 register_prefix,
4721 i.op[op].regs->reg_name,
4722 i.tm.name,
4723 i.suffix);
4724 return 0;
4725 }
4726 }
4727 return 1;
4728 }
4729
4730 static int
4731 check_long_reg (void)
4732 {
4733 int op;
4734
4735 for (op = i.operands; --op >= 0;)
4736 /* Reject eight bit registers, except where the template requires
4737 them. (eg. movzb) */
4738 if (i.types[op].bitfield.reg8
4739 && (i.tm.operand_types[op].bitfield.reg16
4740 || i.tm.operand_types[op].bitfield.reg32
4741 || i.tm.operand_types[op].bitfield.acc))
4742 {
4743 as_bad (_("`%s%s' not allowed with `%s%c'"),
4744 register_prefix,
4745 i.op[op].regs->reg_name,
4746 i.tm.name,
4747 i.suffix);
4748 return 0;
4749 }
4750 /* Warn if the e prefix on a general reg is missing. */
4751 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4752 && i.types[op].bitfield.reg16
4753 && (i.tm.operand_types[op].bitfield.reg32
4754 || i.tm.operand_types[op].bitfield.acc))
4755 {
4756 /* Prohibit these changes in the 64bit mode, since the
4757 lowering is more complicated. */
4758 if (flag_code == CODE_64BIT)
4759 {
4760 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4761 register_prefix, i.op[op].regs->reg_name,
4762 i.suffix);
4763 return 0;
4764 }
4765 #if REGISTER_WARNINGS
4766 else
4767 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4768 register_prefix,
4769 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4770 register_prefix,
4771 i.op[op].regs->reg_name,
4772 i.suffix);
4773 #endif
4774 }
4775 /* Warn if the r prefix on a general reg is missing. */
4776 else if (i.types[op].bitfield.reg64
4777 && (i.tm.operand_types[op].bitfield.reg32
4778 || i.tm.operand_types[op].bitfield.acc))
4779 {
4780 if (intel_syntax
4781 && i.tm.opcode_modifier.toqword
4782 && !i.types[0].bitfield.regxmm)
4783 {
4784 /* Convert to QWORD. We want REX byte. */
4785 i.suffix = QWORD_MNEM_SUFFIX;
4786 }
4787 else
4788 {
4789 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4790 register_prefix, i.op[op].regs->reg_name,
4791 i.suffix);
4792 return 0;
4793 }
4794 }
4795 return 1;
4796 }
4797
4798 static int
4799 check_qword_reg (void)
4800 {
4801 int op;
4802
4803 for (op = i.operands; --op >= 0; )
4804 /* Reject eight bit registers, except where the template requires
4805 them. (eg. movzb) */
4806 if (i.types[op].bitfield.reg8
4807 && (i.tm.operand_types[op].bitfield.reg16
4808 || i.tm.operand_types[op].bitfield.reg32
4809 || i.tm.operand_types[op].bitfield.acc))
4810 {
4811 as_bad (_("`%s%s' not allowed with `%s%c'"),
4812 register_prefix,
4813 i.op[op].regs->reg_name,
4814 i.tm.name,
4815 i.suffix);
4816 return 0;
4817 }
4818 /* Warn if the e prefix on a general reg is missing. */
4819 else if ((i.types[op].bitfield.reg16
4820 || i.types[op].bitfield.reg32)
4821 && (i.tm.operand_types[op].bitfield.reg32
4822 || i.tm.operand_types[op].bitfield.acc))
4823 {
4824 /* Prohibit these changes in the 64bit mode, since the
4825 lowering is more complicated. */
4826 if (intel_syntax
4827 && i.tm.opcode_modifier.todword
4828 && !i.types[0].bitfield.regxmm)
4829 {
4830 /* Convert to DWORD. We don't want REX byte. */
4831 i.suffix = LONG_MNEM_SUFFIX;
4832 }
4833 else
4834 {
4835 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4836 register_prefix, i.op[op].regs->reg_name,
4837 i.suffix);
4838 return 0;
4839 }
4840 }
4841 return 1;
4842 }
4843
4844 static int
4845 check_word_reg (void)
4846 {
4847 int op;
4848 for (op = i.operands; --op >= 0;)
4849 /* Reject eight bit registers, except where the template requires
4850 them. (eg. movzb) */
4851 if (i.types[op].bitfield.reg8
4852 && (i.tm.operand_types[op].bitfield.reg16
4853 || i.tm.operand_types[op].bitfield.reg32
4854 || i.tm.operand_types[op].bitfield.acc))
4855 {
4856 as_bad (_("`%s%s' not allowed with `%s%c'"),
4857 register_prefix,
4858 i.op[op].regs->reg_name,
4859 i.tm.name,
4860 i.suffix);
4861 return 0;
4862 }
4863 /* Warn if the e prefix on a general reg is present. */
4864 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4865 && i.types[op].bitfield.reg32
4866 && (i.tm.operand_types[op].bitfield.reg16
4867 || i.tm.operand_types[op].bitfield.acc))
4868 {
4869 /* Prohibit these changes in the 64bit mode, since the
4870 lowering is more complicated. */
4871 if (flag_code == CODE_64BIT)
4872 {
4873 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4874 register_prefix, i.op[op].regs->reg_name,
4875 i.suffix);
4876 return 0;
4877 }
4878 else
4879 #if REGISTER_WARNINGS
4880 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4881 register_prefix,
4882 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4883 register_prefix,
4884 i.op[op].regs->reg_name,
4885 i.suffix);
4886 #endif
4887 }
4888 return 1;
4889 }
4890
4891 static int
4892 update_imm (unsigned int j)
4893 {
4894 i386_operand_type overlap = i.types[j];
4895 if ((overlap.bitfield.imm8
4896 || overlap.bitfield.imm8s
4897 || overlap.bitfield.imm16
4898 || overlap.bitfield.imm32
4899 || overlap.bitfield.imm32s
4900 || overlap.bitfield.imm64)
4901 && !operand_type_equal (&overlap, &imm8)
4902 && !operand_type_equal (&overlap, &imm8s)
4903 && !operand_type_equal (&overlap, &imm16)
4904 && !operand_type_equal (&overlap, &imm32)
4905 && !operand_type_equal (&overlap, &imm32s)
4906 && !operand_type_equal (&overlap, &imm64))
4907 {
4908 if (i.suffix)
4909 {
4910 i386_operand_type temp;
4911
4912 operand_type_set (&temp, 0);
4913 if (i.suffix == BYTE_MNEM_SUFFIX)
4914 {
4915 temp.bitfield.imm8 = overlap.bitfield.imm8;
4916 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4917 }
4918 else if (i.suffix == WORD_MNEM_SUFFIX)
4919 temp.bitfield.imm16 = overlap.bitfield.imm16;
4920 else if (i.suffix == QWORD_MNEM_SUFFIX)
4921 {
4922 temp.bitfield.imm64 = overlap.bitfield.imm64;
4923 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4924 }
4925 else
4926 temp.bitfield.imm32 = overlap.bitfield.imm32;
4927 overlap = temp;
4928 }
4929 else if (operand_type_equal (&overlap, &imm16_32_32s)
4930 || operand_type_equal (&overlap, &imm16_32)
4931 || operand_type_equal (&overlap, &imm16_32s))
4932 {
4933 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4934 overlap = imm16;
4935 else
4936 overlap = imm32s;
4937 }
4938 if (!operand_type_equal (&overlap, &imm8)
4939 && !operand_type_equal (&overlap, &imm8s)
4940 && !operand_type_equal (&overlap, &imm16)
4941 && !operand_type_equal (&overlap, &imm32)
4942 && !operand_type_equal (&overlap, &imm32s)
4943 && !operand_type_equal (&overlap, &imm64))
4944 {
4945 as_bad (_("no instruction mnemonic suffix given; "
4946 "can't determine immediate size"));
4947 return 0;
4948 }
4949 }
4950 i.types[j] = overlap;
4951
4952 return 1;
4953 }
4954
4955 static int
4956 finalize_imm (void)
4957 {
4958 unsigned int j, n;
4959
4960 /* Update the first 2 immediate operands. */
4961 n = i.operands > 2 ? 2 : i.operands;
4962 if (n)
4963 {
4964 for (j = 0; j < n; j++)
4965 if (update_imm (j) == 0)
4966 return 0;
4967
4968 /* The 3rd operand can't be immediate operand. */
4969 gas_assert (operand_type_check (i.types[2], imm) == 0);
4970 }
4971
4972 return 1;
4973 }
4974
4975 static int
4976 bad_implicit_operand (int xmm)
4977 {
4978 const char *ireg = xmm ? "xmm0" : "ymm0";
4979
4980 if (intel_syntax)
4981 as_bad (_("the last operand of `%s' must be `%s%s'"),
4982 i.tm.name, register_prefix, ireg);
4983 else
4984 as_bad (_("the first operand of `%s' must be `%s%s'"),
4985 i.tm.name, register_prefix, ireg);
4986 return 0;
4987 }
4988
4989 static int
4990 process_operands (void)
4991 {
4992 /* Default segment register this instruction will use for memory
4993 accesses. 0 means unknown. This is only for optimizing out
4994 unnecessary segment overrides. */
4995 const seg_entry *default_seg = 0;
4996
4997 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4998 {
4999 unsigned int dupl = i.operands;
5000 unsigned int dest = dupl - 1;
5001 unsigned int j;
5002
5003 /* The destination must be an xmm register. */
5004 gas_assert (i.reg_operands
5005 && MAX_OPERANDS > dupl
5006 && operand_type_equal (&i.types[dest], &regxmm));
5007
5008 if (i.tm.opcode_modifier.firstxmm0)
5009 {
5010 /* The first operand is implicit and must be xmm0. */
5011 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5012 if (i.op[0].regs->reg_num != 0)
5013 return bad_implicit_operand (1);
5014
5015 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5016 {
5017 /* Keep xmm0 for instructions with VEX prefix and 3
5018 sources. */
5019 goto duplicate;
5020 }
5021 else
5022 {
5023 /* We remove the first xmm0 and keep the number of
5024 operands unchanged, which in fact duplicates the
5025 destination. */
5026 for (j = 1; j < i.operands; j++)
5027 {
5028 i.op[j - 1] = i.op[j];
5029 i.types[j - 1] = i.types[j];
5030 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5031 }
5032 }
5033 }
5034 else if (i.tm.opcode_modifier.implicit1stxmm0)
5035 {
5036 gas_assert ((MAX_OPERANDS - 1) > dupl
5037 && (i.tm.opcode_modifier.vexsources
5038 == VEX3SOURCES));
5039
5040 /* Add the implicit xmm0 for instructions with VEX prefix
5041 and 3 sources. */
5042 for (j = i.operands; j > 0; j--)
5043 {
5044 i.op[j] = i.op[j - 1];
5045 i.types[j] = i.types[j - 1];
5046 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5047 }
5048 i.op[0].regs
5049 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5050 i.types[0] = regxmm;
5051 i.tm.operand_types[0] = regxmm;
5052
5053 i.operands += 2;
5054 i.reg_operands += 2;
5055 i.tm.operands += 2;
5056
5057 dupl++;
5058 dest++;
5059 i.op[dupl] = i.op[dest];
5060 i.types[dupl] = i.types[dest];
5061 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5062 }
5063 else
5064 {
5065 duplicate:
5066 i.operands++;
5067 i.reg_operands++;
5068 i.tm.operands++;
5069
5070 i.op[dupl] = i.op[dest];
5071 i.types[dupl] = i.types[dest];
5072 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5073 }
5074
5075 if (i.tm.opcode_modifier.immext)
5076 process_immext ();
5077 }
5078 else if (i.tm.opcode_modifier.firstxmm0)
5079 {
5080 unsigned int j;
5081
5082 /* The first operand is implicit and must be xmm0/ymm0. */
5083 gas_assert (i.reg_operands
5084 && (operand_type_equal (&i.types[0], &regxmm)
5085 || operand_type_equal (&i.types[0], &regymm)));
5086 if (i.op[0].regs->reg_num != 0)
5087 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5088
5089 for (j = 1; j < i.operands; j++)
5090 {
5091 i.op[j - 1] = i.op[j];
5092 i.types[j - 1] = i.types[j];
5093
5094 /* We need to adjust fields in i.tm since they are used by
5095 build_modrm_byte. */
5096 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5097 }
5098
5099 i.operands--;
5100 i.reg_operands--;
5101 i.tm.operands--;
5102 }
5103 else if (i.tm.opcode_modifier.regkludge)
5104 {
5105 /* The imul $imm, %reg instruction is converted into
5106 imul $imm, %reg, %reg, and the clr %reg instruction
5107 is converted into xor %reg, %reg. */
5108
5109 unsigned int first_reg_op;
5110
5111 if (operand_type_check (i.types[0], reg))
5112 first_reg_op = 0;
5113 else
5114 first_reg_op = 1;
5115 /* Pretend we saw the extra register operand. */
5116 gas_assert (i.reg_operands == 1
5117 && i.op[first_reg_op + 1].regs == 0);
5118 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5119 i.types[first_reg_op + 1] = i.types[first_reg_op];
5120 i.operands++;
5121 i.reg_operands++;
5122 }
5123
5124 if (i.tm.opcode_modifier.shortform)
5125 {
5126 if (i.types[0].bitfield.sreg2
5127 || i.types[0].bitfield.sreg3)
5128 {
5129 if (i.tm.base_opcode == POP_SEG_SHORT
5130 && i.op[0].regs->reg_num == 1)
5131 {
5132 as_bad (_("you can't `pop %scs'"), register_prefix);
5133 return 0;
5134 }
5135 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5136 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5137 i.rex |= REX_B;
5138 }
5139 else
5140 {
5141 /* The register or float register operand is in operand
5142 0 or 1. */
5143 unsigned int op;
5144
5145 if (i.types[0].bitfield.floatreg
5146 || operand_type_check (i.types[0], reg))
5147 op = 0;
5148 else
5149 op = 1;
5150 /* Register goes in low 3 bits of opcode. */
5151 i.tm.base_opcode |= i.op[op].regs->reg_num;
5152 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5153 i.rex |= REX_B;
5154 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5155 {
5156 /* Warn about some common errors, but press on regardless.
5157 The first case can be generated by gcc (<= 2.8.1). */
5158 if (i.operands == 2)
5159 {
5160 /* Reversed arguments on faddp, fsubp, etc. */
5161 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5162 register_prefix, i.op[!intel_syntax].regs->reg_name,
5163 register_prefix, i.op[intel_syntax].regs->reg_name);
5164 }
5165 else
5166 {
5167 /* Extraneous `l' suffix on fp insn. */
5168 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5169 register_prefix, i.op[0].regs->reg_name);
5170 }
5171 }
5172 }
5173 }
5174 else if (i.tm.opcode_modifier.modrm)
5175 {
5176 /* The opcode is completed (modulo i.tm.extension_opcode which
5177 must be put into the modrm byte). Now, we make the modrm and
5178 index base bytes based on all the info we've collected. */
5179
5180 default_seg = build_modrm_byte ();
5181 }
5182 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5183 {
5184 default_seg = &ds;
5185 }
5186 else if (i.tm.opcode_modifier.isstring)
5187 {
5188 /* For the string instructions that allow a segment override
5189 on one of their operands, the default segment is ds. */
5190 default_seg = &ds;
5191 }
5192
5193 if (i.tm.base_opcode == 0x8d /* lea */
5194 && i.seg[0]
5195 && !quiet_warnings)
5196 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5197
5198 /* If a segment was explicitly specified, and the specified segment
5199 is not the default, use an opcode prefix to select it. If we
5200 never figured out what the default segment is, then default_seg
5201 will be zero at this point, and the specified segment prefix will
5202 always be used. */
5203 if ((i.seg[0]) && (i.seg[0] != default_seg))
5204 {
5205 if (!add_prefix (i.seg[0]->seg_prefix))
5206 return 0;
5207 }
5208 return 1;
5209 }
5210
5211 static const seg_entry *
5212 build_modrm_byte (void)
5213 {
5214 const seg_entry *default_seg = 0;
5215 unsigned int source, dest;
5216 int vex_3_sources;
5217
5218 /* The first operand of instructions with VEX prefix and 3 sources
5219 must be VEX_Imm4. */
5220 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5221 if (vex_3_sources)
5222 {
5223 unsigned int nds, reg_slot;
5224 expressionS *exp;
5225
5226 if (i.tm.opcode_modifier.veximmext
5227 && i.tm.opcode_modifier.immext)
5228 {
5229 dest = i.operands - 2;
5230 gas_assert (dest == 3);
5231 }
5232 else
5233 dest = i.operands - 1;
5234 nds = dest - 1;
5235
5236 /* There are 2 kinds of instructions:
5237 1. 5 operands: 4 register operands or 3 register operands
5238 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5239 VexW0 or VexW1. The destination must be either XMM or YMM
5240 register.
5241 2. 4 operands: 4 register operands or 3 register operands
5242 plus 1 memory operand, VexXDS, and VexImmExt */
5243 gas_assert ((i.reg_operands == 4
5244 || (i.reg_operands == 3 && i.mem_operands == 1))
5245 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5246 && (i.tm.opcode_modifier.veximmext
5247 || (i.imm_operands == 1
5248 && i.types[0].bitfield.vec_imm4
5249 && (i.tm.opcode_modifier.vexw == VEXW0
5250 || i.tm.opcode_modifier.vexw == VEXW1)
5251 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5252 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5253
5254 if (i.imm_operands == 0)
5255 {
5256 /* When there is no immediate operand, generate an 8bit
5257 immediate operand to encode the first operand. */
5258 exp = &im_expressions[i.imm_operands++];
5259 i.op[i.operands].imms = exp;
5260 i.types[i.operands] = imm8;
5261 i.operands++;
5262 /* If VexW1 is set, the first operand is the source and
5263 the second operand is encoded in the immediate operand. */
5264 if (i.tm.opcode_modifier.vexw == VEXW1)
5265 {
5266 source = 0;
5267 reg_slot = 1;
5268 }
5269 else
5270 {
5271 source = 1;
5272 reg_slot = 0;
5273 }
5274
5275 /* FMA swaps REG and NDS. */
5276 if (i.tm.cpu_flags.bitfield.cpufma)
5277 {
5278 unsigned int tmp;
5279 tmp = reg_slot;
5280 reg_slot = nds;
5281 nds = tmp;
5282 }
5283
5284 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5285 &regxmm)
5286 || operand_type_equal (&i.tm.operand_types[reg_slot],
5287 &regymm));
5288 exp->X_op = O_constant;
5289 exp->X_add_number
5290 = ((i.op[reg_slot].regs->reg_num
5291 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5292 << 4);
5293 }
5294 else
5295 {
5296 unsigned int imm_slot;
5297
5298 if (i.tm.opcode_modifier.vexw == VEXW0)
5299 {
5300 /* If VexW0 is set, the third operand is the source and
5301 the second operand is encoded in the immediate
5302 operand. */
5303 source = 2;
5304 reg_slot = 1;
5305 }
5306 else
5307 {
5308 /* VexW1 is set, the second operand is the source and
5309 the third operand is encoded in the immediate
5310 operand. */
5311 source = 1;
5312 reg_slot = 2;
5313 }
5314
5315 if (i.tm.opcode_modifier.immext)
5316 {
5317 /* When ImmExt is set, the immdiate byte is the last
5318 operand. */
5319 imm_slot = i.operands - 1;
5320 source--;
5321 reg_slot--;
5322 }
5323 else
5324 {
5325 imm_slot = 0;
5326
5327 /* Turn on Imm8 so that output_imm will generate it. */
5328 i.types[imm_slot].bitfield.imm8 = 1;
5329 }
5330
5331 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5332 &regxmm)
5333 || operand_type_equal (&i.tm.operand_types[reg_slot],
5334 &regymm));
5335 i.op[imm_slot].imms->X_add_number
5336 |= ((i.op[reg_slot].regs->reg_num
5337 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5338 << 4);
5339 }
5340
5341 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5342 || operand_type_equal (&i.tm.operand_types[nds],
5343 &regymm));
5344 i.vex.register_specifier = i.op[nds].regs;
5345 }
5346 else
5347 source = dest = 0;
5348
5349 /* i.reg_operands MUST be the number of real register operands;
5350 implicit registers do not count. If there are 3 register
5351 operands, it must be a instruction with VexNDS. For a
5352 instruction with VexNDD, the destination register is encoded
5353 in VEX prefix. If there are 4 register operands, it must be
5354 a instruction with VEX prefix and 3 sources. */
5355 if (i.mem_operands == 0
5356 && ((i.reg_operands == 2
5357 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5358 || (i.reg_operands == 3
5359 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5360 || (i.reg_operands == 4 && vex_3_sources)))
5361 {
5362 switch (i.operands)
5363 {
5364 case 2:
5365 source = 0;
5366 break;
5367 case 3:
5368 /* When there are 3 operands, one of them may be immediate,
5369 which may be the first or the last operand. Otherwise,
5370 the first operand must be shift count register (cl) or it
5371 is an instruction with VexNDS. */
5372 gas_assert (i.imm_operands == 1
5373 || (i.imm_operands == 0
5374 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5375 || i.types[0].bitfield.shiftcount)));
5376 if (operand_type_check (i.types[0], imm)
5377 || i.types[0].bitfield.shiftcount)
5378 source = 1;
5379 else
5380 source = 0;
5381 break;
5382 case 4:
5383 /* When there are 4 operands, the first two must be 8bit
5384 immediate operands. The source operand will be the 3rd
5385 one.
5386
5387 For instructions with VexNDS, if the first operand
5388 an imm8, the source operand is the 2nd one. If the last
5389 operand is imm8, the source operand is the first one. */
5390 gas_assert ((i.imm_operands == 2
5391 && i.types[0].bitfield.imm8
5392 && i.types[1].bitfield.imm8)
5393 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5394 && i.imm_operands == 1
5395 && (i.types[0].bitfield.imm8
5396 || i.types[i.operands - 1].bitfield.imm8)));
5397 if (i.imm_operands == 2)
5398 source = 2;
5399 else
5400 {
5401 if (i.types[0].bitfield.imm8)
5402 source = 1;
5403 else
5404 source = 0;
5405 }
5406 break;
5407 case 5:
5408 break;
5409 default:
5410 abort ();
5411 }
5412
5413 if (!vex_3_sources)
5414 {
5415 dest = source + 1;
5416
5417 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5418 {
5419 /* For instructions with VexNDS, the register-only
5420 source operand must be 32/64bit integer, XMM or
5421 YMM register. It is encoded in VEX prefix. We
5422 need to clear RegMem bit before calling
5423 operand_type_equal. */
5424
5425 i386_operand_type op;
5426 unsigned int vvvv;
5427
5428 /* Check register-only source operand when two source
5429 operands are swapped. */
5430 if (!i.tm.operand_types[source].bitfield.baseindex
5431 && i.tm.operand_types[dest].bitfield.baseindex)
5432 {
5433 vvvv = source;
5434 source = dest;
5435 }
5436 else
5437 vvvv = dest;
5438
5439 op = i.tm.operand_types[vvvv];
5440 op.bitfield.regmem = 0;
5441 if ((dest + 1) >= i.operands
5442 || (op.bitfield.reg32 != 1
5443 && !op.bitfield.reg64 != 1
5444 && !operand_type_equal (&op, &regxmm)
5445 && !operand_type_equal (&op, &regymm)))
5446 abort ();
5447 i.vex.register_specifier = i.op[vvvv].regs;
5448 dest++;
5449 }
5450 }
5451
5452 i.rm.mode = 3;
5453 /* One of the register operands will be encoded in the i.tm.reg
5454 field, the other in the combined i.tm.mode and i.tm.regmem
5455 fields. If no form of this instruction supports a memory
5456 destination operand, then we assume the source operand may
5457 sometimes be a memory operand and so we need to store the
5458 destination in the i.rm.reg field. */
5459 if (!i.tm.operand_types[dest].bitfield.regmem
5460 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5461 {
5462 i.rm.reg = i.op[dest].regs->reg_num;
5463 i.rm.regmem = i.op[source].regs->reg_num;
5464 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5465 i.rex |= REX_R;
5466 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5467 i.rex |= REX_B;
5468 }
5469 else
5470 {
5471 i.rm.reg = i.op[source].regs->reg_num;
5472 i.rm.regmem = i.op[dest].regs->reg_num;
5473 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5474 i.rex |= REX_B;
5475 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5476 i.rex |= REX_R;
5477 }
5478 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5479 {
5480 if (!i.types[0].bitfield.control
5481 && !i.types[1].bitfield.control)
5482 abort ();
5483 i.rex &= ~(REX_R | REX_B);
5484 add_prefix (LOCK_PREFIX_OPCODE);
5485 }
5486 }
5487 else
5488 { /* If it's not 2 reg operands... */
5489 unsigned int mem;
5490
5491 if (i.mem_operands)
5492 {
5493 unsigned int fake_zero_displacement = 0;
5494 unsigned int op;
5495
5496 for (op = 0; op < i.operands; op++)
5497 if (operand_type_check (i.types[op], anymem))
5498 break;
5499 gas_assert (op < i.operands);
5500
5501 if (i.tm.opcode_modifier.vecsib)
5502 {
5503 if (i.index_reg->reg_num == RegEiz
5504 || i.index_reg->reg_num == RegRiz)
5505 abort ();
5506
5507 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5508 if (!i.base_reg)
5509 {
5510 i.sib.base = NO_BASE_REGISTER;
5511 i.sib.scale = i.log2_scale_factor;
5512 i.types[op].bitfield.disp8 = 0;
5513 i.types[op].bitfield.disp16 = 0;
5514 i.types[op].bitfield.disp64 = 0;
5515 if (flag_code != CODE_64BIT)
5516 {
5517 /* Must be 32 bit */
5518 i.types[op].bitfield.disp32 = 1;
5519 i.types[op].bitfield.disp32s = 0;
5520 }
5521 else
5522 {
5523 i.types[op].bitfield.disp32 = 0;
5524 i.types[op].bitfield.disp32s = 1;
5525 }
5526 }
5527 i.sib.index = i.index_reg->reg_num;
5528 if ((i.index_reg->reg_flags & RegRex) != 0)
5529 i.rex |= REX_X;
5530 }
5531
5532 default_seg = &ds;
5533
5534 if (i.base_reg == 0)
5535 {
5536 i.rm.mode = 0;
5537 if (!i.disp_operands)
5538 {
5539 fake_zero_displacement = 1;
5540 /* Instructions with VSIB byte need 32bit displacement
5541 if there is no base register. */
5542 if (i.tm.opcode_modifier.vecsib)
5543 i.types[op].bitfield.disp32 = 1;
5544 }
5545 if (i.index_reg == 0)
5546 {
5547 gas_assert (!i.tm.opcode_modifier.vecsib);
5548 /* Operand is just <disp> */
5549 if (flag_code == CODE_64BIT)
5550 {
5551 /* 64bit mode overwrites the 32bit absolute
5552 addressing by RIP relative addressing and
5553 absolute addressing is encoded by one of the
5554 redundant SIB forms. */
5555 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5556 i.sib.base = NO_BASE_REGISTER;
5557 i.sib.index = NO_INDEX_REGISTER;
5558 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5559 ? disp32s : disp32);
5560 }
5561 else if ((flag_code == CODE_16BIT)
5562 ^ (i.prefix[ADDR_PREFIX] != 0))
5563 {
5564 i.rm.regmem = NO_BASE_REGISTER_16;
5565 i.types[op] = disp16;
5566 }
5567 else
5568 {
5569 i.rm.regmem = NO_BASE_REGISTER;
5570 i.types[op] = disp32;
5571 }
5572 }
5573 else if (!i.tm.opcode_modifier.vecsib)
5574 {
5575 /* !i.base_reg && i.index_reg */
5576 if (i.index_reg->reg_num == RegEiz
5577 || i.index_reg->reg_num == RegRiz)
5578 i.sib.index = NO_INDEX_REGISTER;
5579 else
5580 i.sib.index = i.index_reg->reg_num;
5581 i.sib.base = NO_BASE_REGISTER;
5582 i.sib.scale = i.log2_scale_factor;
5583 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5584 i.types[op].bitfield.disp8 = 0;
5585 i.types[op].bitfield.disp16 = 0;
5586 i.types[op].bitfield.disp64 = 0;
5587 if (flag_code != CODE_64BIT)
5588 {
5589 /* Must be 32 bit */
5590 i.types[op].bitfield.disp32 = 1;
5591 i.types[op].bitfield.disp32s = 0;
5592 }
5593 else
5594 {
5595 i.types[op].bitfield.disp32 = 0;
5596 i.types[op].bitfield.disp32s = 1;
5597 }
5598 if ((i.index_reg->reg_flags & RegRex) != 0)
5599 i.rex |= REX_X;
5600 }
5601 }
5602 /* RIP addressing for 64bit mode. */
5603 else if (i.base_reg->reg_num == RegRip ||
5604 i.base_reg->reg_num == RegEip)
5605 {
5606 gas_assert (!i.tm.opcode_modifier.vecsib);
5607 i.rm.regmem = NO_BASE_REGISTER;
5608 i.types[op].bitfield.disp8 = 0;
5609 i.types[op].bitfield.disp16 = 0;
5610 i.types[op].bitfield.disp32 = 0;
5611 i.types[op].bitfield.disp32s = 1;
5612 i.types[op].bitfield.disp64 = 0;
5613 i.flags[op] |= Operand_PCrel;
5614 if (! i.disp_operands)
5615 fake_zero_displacement = 1;
5616 }
5617 else if (i.base_reg->reg_type.bitfield.reg16)
5618 {
5619 gas_assert (!i.tm.opcode_modifier.vecsib);
5620 switch (i.base_reg->reg_num)
5621 {
5622 case 3: /* (%bx) */
5623 if (i.index_reg == 0)
5624 i.rm.regmem = 7;
5625 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5626 i.rm.regmem = i.index_reg->reg_num - 6;
5627 break;
5628 case 5: /* (%bp) */
5629 default_seg = &ss;
5630 if (i.index_reg == 0)
5631 {
5632 i.rm.regmem = 6;
5633 if (operand_type_check (i.types[op], disp) == 0)
5634 {
5635 /* fake (%bp) into 0(%bp) */
5636 i.types[op].bitfield.disp8 = 1;
5637 fake_zero_displacement = 1;
5638 }
5639 }
5640 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5641 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5642 break;
5643 default: /* (%si) -> 4 or (%di) -> 5 */
5644 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5645 }
5646 i.rm.mode = mode_from_disp_size (i.types[op]);
5647 }
5648 else /* i.base_reg and 32/64 bit mode */
5649 {
5650 if (flag_code == CODE_64BIT
5651 && operand_type_check (i.types[op], disp))
5652 {
5653 i386_operand_type temp;
5654 operand_type_set (&temp, 0);
5655 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5656 i.types[op] = temp;
5657 if (i.prefix[ADDR_PREFIX] == 0)
5658 i.types[op].bitfield.disp32s = 1;
5659 else
5660 i.types[op].bitfield.disp32 = 1;
5661 }
5662
5663 if (!i.tm.opcode_modifier.vecsib)
5664 i.rm.regmem = i.base_reg->reg_num;
5665 if ((i.base_reg->reg_flags & RegRex) != 0)
5666 i.rex |= REX_B;
5667 i.sib.base = i.base_reg->reg_num;
5668 /* x86-64 ignores REX prefix bit here to avoid decoder
5669 complications. */
5670 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5671 {
5672 default_seg = &ss;
5673 if (i.disp_operands == 0)
5674 {
5675 fake_zero_displacement = 1;
5676 i.types[op].bitfield.disp8 = 1;
5677 }
5678 }
5679 else if (i.base_reg->reg_num == ESP_REG_NUM)
5680 {
5681 default_seg = &ss;
5682 }
5683 i.sib.scale = i.log2_scale_factor;
5684 if (i.index_reg == 0)
5685 {
5686 gas_assert (!i.tm.opcode_modifier.vecsib);
5687 /* <disp>(%esp) becomes two byte modrm with no index
5688 register. We've already stored the code for esp
5689 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5690 Any base register besides %esp will not use the
5691 extra modrm byte. */
5692 i.sib.index = NO_INDEX_REGISTER;
5693 }
5694 else if (!i.tm.opcode_modifier.vecsib)
5695 {
5696 if (i.index_reg->reg_num == RegEiz
5697 || i.index_reg->reg_num == RegRiz)
5698 i.sib.index = NO_INDEX_REGISTER;
5699 else
5700 i.sib.index = i.index_reg->reg_num;
5701 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5702 if ((i.index_reg->reg_flags & RegRex) != 0)
5703 i.rex |= REX_X;
5704 }
5705
5706 if (i.disp_operands
5707 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5708 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5709 i.rm.mode = 0;
5710 else
5711 {
5712 if (!fake_zero_displacement
5713 && !i.disp_operands
5714 && i.disp_encoding)
5715 {
5716 fake_zero_displacement = 1;
5717 if (i.disp_encoding == disp_encoding_8bit)
5718 i.types[op].bitfield.disp8 = 1;
5719 else
5720 i.types[op].bitfield.disp32 = 1;
5721 }
5722 i.rm.mode = mode_from_disp_size (i.types[op]);
5723 }
5724 }
5725
5726 if (fake_zero_displacement)
5727 {
5728 /* Fakes a zero displacement assuming that i.types[op]
5729 holds the correct displacement size. */
5730 expressionS *exp;
5731
5732 gas_assert (i.op[op].disps == 0);
5733 exp = &disp_expressions[i.disp_operands++];
5734 i.op[op].disps = exp;
5735 exp->X_op = O_constant;
5736 exp->X_add_number = 0;
5737 exp->X_add_symbol = (symbolS *) 0;
5738 exp->X_op_symbol = (symbolS *) 0;
5739 }
5740
5741 mem = op;
5742 }
5743 else
5744 mem = ~0;
5745
5746 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5747 {
5748 if (operand_type_check (i.types[0], imm))
5749 i.vex.register_specifier = NULL;
5750 else
5751 {
5752 /* VEX.vvvv encodes one of the sources when the first
5753 operand is not an immediate. */
5754 if (i.tm.opcode_modifier.vexw == VEXW0)
5755 i.vex.register_specifier = i.op[0].regs;
5756 else
5757 i.vex.register_specifier = i.op[1].regs;
5758 }
5759
5760 /* Destination is a XMM register encoded in the ModRM.reg
5761 and VEX.R bit. */
5762 i.rm.reg = i.op[2].regs->reg_num;
5763 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5764 i.rex |= REX_R;
5765
5766 /* ModRM.rm and VEX.B encodes the other source. */
5767 if (!i.mem_operands)
5768 {
5769 i.rm.mode = 3;
5770
5771 if (i.tm.opcode_modifier.vexw == VEXW0)
5772 i.rm.regmem = i.op[1].regs->reg_num;
5773 else
5774 i.rm.regmem = i.op[0].regs->reg_num;
5775
5776 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5777 i.rex |= REX_B;
5778 }
5779 }
5780 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5781 {
5782 i.vex.register_specifier = i.op[2].regs;
5783 if (!i.mem_operands)
5784 {
5785 i.rm.mode = 3;
5786 i.rm.regmem = i.op[1].regs->reg_num;
5787 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5788 i.rex |= REX_B;
5789 }
5790 }
5791 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5792 (if any) based on i.tm.extension_opcode. Again, we must be
5793 careful to make sure that segment/control/debug/test/MMX
5794 registers are coded into the i.rm.reg field. */
5795 else if (i.reg_operands)
5796 {
5797 unsigned int op;
5798 unsigned int vex_reg = ~0;
5799
5800 for (op = 0; op < i.operands; op++)
5801 if (i.types[op].bitfield.reg8
5802 || i.types[op].bitfield.reg16
5803 || i.types[op].bitfield.reg32
5804 || i.types[op].bitfield.reg64
5805 || i.types[op].bitfield.regmmx
5806 || i.types[op].bitfield.regxmm
5807 || i.types[op].bitfield.regymm
5808 || i.types[op].bitfield.sreg2
5809 || i.types[op].bitfield.sreg3
5810 || i.types[op].bitfield.control
5811 || i.types[op].bitfield.debug
5812 || i.types[op].bitfield.test)
5813 break;
5814
5815 if (vex_3_sources)
5816 op = dest;
5817 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5818 {
5819 /* For instructions with VexNDS, the register-only
5820 source operand is encoded in VEX prefix. */
5821 gas_assert (mem != (unsigned int) ~0);
5822
5823 if (op > mem)
5824 {
5825 vex_reg = op++;
5826 gas_assert (op < i.operands);
5827 }
5828 else
5829 {
5830 /* Check register-only source operand when two source
5831 operands are swapped. */
5832 if (!i.tm.operand_types[op].bitfield.baseindex
5833 && i.tm.operand_types[op + 1].bitfield.baseindex)
5834 {
5835 vex_reg = op;
5836 op += 2;
5837 gas_assert (mem == (vex_reg + 1)
5838 && op < i.operands);
5839 }
5840 else
5841 {
5842 vex_reg = op + 1;
5843 gas_assert (vex_reg < i.operands);
5844 }
5845 }
5846 }
5847 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5848 {
5849 /* For instructions with VexNDD, the register destination
5850 is encoded in VEX prefix. */
5851 if (i.mem_operands == 0)
5852 {
5853 /* There is no memory operand. */
5854 gas_assert ((op + 2) == i.operands);
5855 vex_reg = op + 1;
5856 }
5857 else
5858 {
5859 /* There are only 2 operands. */
5860 gas_assert (op < 2 && i.operands == 2);
5861 vex_reg = 1;
5862 }
5863 }
5864 else
5865 gas_assert (op < i.operands);
5866
5867 if (vex_reg != (unsigned int) ~0)
5868 {
5869 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5870
5871 if (type->bitfield.reg32 != 1
5872 && type->bitfield.reg64 != 1
5873 && !operand_type_equal (type, &regxmm)
5874 && !operand_type_equal (type, &regymm))
5875 abort ();
5876
5877 i.vex.register_specifier = i.op[vex_reg].regs;
5878 }
5879
5880 /* Don't set OP operand twice. */
5881 if (vex_reg != op)
5882 {
5883 /* If there is an extension opcode to put here, the
5884 register number must be put into the regmem field. */
5885 if (i.tm.extension_opcode != None)
5886 {
5887 i.rm.regmem = i.op[op].regs->reg_num;
5888 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5889 i.rex |= REX_B;
5890 }
5891 else
5892 {
5893 i.rm.reg = i.op[op].regs->reg_num;
5894 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5895 i.rex |= REX_R;
5896 }
5897 }
5898
5899 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5900 must set it to 3 to indicate this is a register operand
5901 in the regmem field. */
5902 if (!i.mem_operands)
5903 i.rm.mode = 3;
5904 }
5905
5906 /* Fill in i.rm.reg field with extension opcode (if any). */
5907 if (i.tm.extension_opcode != None)
5908 i.rm.reg = i.tm.extension_opcode;
5909 }
5910 return default_seg;
5911 }
5912
5913 static void
5914 output_branch (void)
5915 {
5916 char *p;
5917 int size;
5918 int code16;
5919 int prefix;
5920 relax_substateT subtype;
5921 symbolS *sym;
5922 offsetT off;
5923
5924 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5925 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5926
5927 prefix = 0;
5928 if (i.prefix[DATA_PREFIX] != 0)
5929 {
5930 prefix = 1;
5931 i.prefixes -= 1;
5932 code16 ^= CODE16;
5933 }
5934 /* Pentium4 branch hints. */
5935 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5936 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5937 {
5938 prefix++;
5939 i.prefixes--;
5940 }
5941 if (i.prefix[REX_PREFIX] != 0)
5942 {
5943 prefix++;
5944 i.prefixes--;
5945 }
5946
5947 if (i.prefixes != 0 && !intel_syntax)
5948 as_warn (_("skipping prefixes on this instruction"));
5949
5950 /* It's always a symbol; End frag & setup for relax.
5951 Make sure there is enough room in this frag for the largest
5952 instruction we may generate in md_convert_frag. This is 2
5953 bytes for the opcode and room for the prefix and largest
5954 displacement. */
5955 frag_grow (prefix + 2 + 4);
5956 /* Prefix and 1 opcode byte go in fr_fix. */
5957 p = frag_more (prefix + 1);
5958 if (i.prefix[DATA_PREFIX] != 0)
5959 *p++ = DATA_PREFIX_OPCODE;
5960 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5961 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5962 *p++ = i.prefix[SEG_PREFIX];
5963 if (i.prefix[REX_PREFIX] != 0)
5964 *p++ = i.prefix[REX_PREFIX];
5965 *p = i.tm.base_opcode;
5966
5967 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5968 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5969 else if (cpu_arch_flags.bitfield.cpui386)
5970 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5971 else
5972 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5973 subtype |= code16;
5974
5975 sym = i.op[0].disps->X_add_symbol;
5976 off = i.op[0].disps->X_add_number;
5977
5978 if (i.op[0].disps->X_op != O_constant
5979 && i.op[0].disps->X_op != O_symbol)
5980 {
5981 /* Handle complex expressions. */
5982 sym = make_expr_symbol (i.op[0].disps);
5983 off = 0;
5984 }
5985
5986 /* 1 possible extra opcode + 4 byte displacement go in var part.
5987 Pass reloc in fr_var. */
5988 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5989 }
5990
5991 static void
5992 output_jump (void)
5993 {
5994 char *p;
5995 int size;
5996 fixS *fixP;
5997
5998 if (i.tm.opcode_modifier.jumpbyte)
5999 {
6000 /* This is a loop or jecxz type instruction. */
6001 size = 1;
6002 if (i.prefix[ADDR_PREFIX] != 0)
6003 {
6004 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6005 i.prefixes -= 1;
6006 }
6007 /* Pentium4 branch hints. */
6008 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6009 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6010 {
6011 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6012 i.prefixes--;
6013 }
6014 }
6015 else
6016 {
6017 int code16;
6018
6019 code16 = 0;
6020 if (flag_code == CODE_16BIT)
6021 code16 = CODE16;
6022
6023 if (i.prefix[DATA_PREFIX] != 0)
6024 {
6025 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6026 i.prefixes -= 1;
6027 code16 ^= CODE16;
6028 }
6029
6030 size = 4;
6031 if (code16)
6032 size = 2;
6033 }
6034
6035 if (i.prefix[REX_PREFIX] != 0)
6036 {
6037 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6038 i.prefixes -= 1;
6039 }
6040
6041 if (i.prefixes != 0 && !intel_syntax)
6042 as_warn (_("skipping prefixes on this instruction"));
6043
6044 p = frag_more (1 + size);
6045 *p++ = i.tm.base_opcode;
6046
6047 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6048 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6049
6050 /* All jumps handled here are signed, but don't use a signed limit
6051 check for 32 and 16 bit jumps as we want to allow wrap around at
6052 4G and 64k respectively. */
6053 if (size == 1)
6054 fixP->fx_signed = 1;
6055 }
6056
6057 static void
6058 output_interseg_jump (void)
6059 {
6060 char *p;
6061 int size;
6062 int prefix;
6063 int code16;
6064
6065 code16 = 0;
6066 if (flag_code == CODE_16BIT)
6067 code16 = CODE16;
6068
6069 prefix = 0;
6070 if (i.prefix[DATA_PREFIX] != 0)
6071 {
6072 prefix = 1;
6073 i.prefixes -= 1;
6074 code16 ^= CODE16;
6075 }
6076 if (i.prefix[REX_PREFIX] != 0)
6077 {
6078 prefix++;
6079 i.prefixes -= 1;
6080 }
6081
6082 size = 4;
6083 if (code16)
6084 size = 2;
6085
6086 if (i.prefixes != 0 && !intel_syntax)
6087 as_warn (_("skipping prefixes on this instruction"));
6088
6089 /* 1 opcode; 2 segment; offset */
6090 p = frag_more (prefix + 1 + 2 + size);
6091
6092 if (i.prefix[DATA_PREFIX] != 0)
6093 *p++ = DATA_PREFIX_OPCODE;
6094
6095 if (i.prefix[REX_PREFIX] != 0)
6096 *p++ = i.prefix[REX_PREFIX];
6097
6098 *p++ = i.tm.base_opcode;
6099 if (i.op[1].imms->X_op == O_constant)
6100 {
6101 offsetT n = i.op[1].imms->X_add_number;
6102
6103 if (size == 2
6104 && !fits_in_unsigned_word (n)
6105 && !fits_in_signed_word (n))
6106 {
6107 as_bad (_("16-bit jump out of range"));
6108 return;
6109 }
6110 md_number_to_chars (p, n, size);
6111 }
6112 else
6113 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6114 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6115 if (i.op[0].imms->X_op != O_constant)
6116 as_bad (_("can't handle non absolute segment in `%s'"),
6117 i.tm.name);
6118 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6119 }
6120
6121 static void
6122 output_insn (void)
6123 {
6124 fragS *insn_start_frag;
6125 offsetT insn_start_off;
6126
6127 /* Tie dwarf2 debug info to the address at the start of the insn.
6128 We can't do this after the insn has been output as the current
6129 frag may have been closed off. eg. by frag_var. */
6130 dwarf2_emit_insn (0);
6131
6132 insn_start_frag = frag_now;
6133 insn_start_off = frag_now_fix ();
6134
6135 /* Output jumps. */
6136 if (i.tm.opcode_modifier.jump)
6137 output_branch ();
6138 else if (i.tm.opcode_modifier.jumpbyte
6139 || i.tm.opcode_modifier.jumpdword)
6140 output_jump ();
6141 else if (i.tm.opcode_modifier.jumpintersegment)
6142 output_interseg_jump ();
6143 else
6144 {
6145 /* Output normal instructions here. */
6146 char *p;
6147 unsigned char *q;
6148 unsigned int j;
6149 unsigned int prefix;
6150
6151 /* Since the VEX prefix contains the implicit prefix, we don't
6152 need the explicit prefix. */
6153 if (!i.tm.opcode_modifier.vex)
6154 {
6155 switch (i.tm.opcode_length)
6156 {
6157 case 3:
6158 if (i.tm.base_opcode & 0xff000000)
6159 {
6160 prefix = (i.tm.base_opcode >> 24) & 0xff;
6161 goto check_prefix;
6162 }
6163 break;
6164 case 2:
6165 if ((i.tm.base_opcode & 0xff0000) != 0)
6166 {
6167 prefix = (i.tm.base_opcode >> 16) & 0xff;
6168 if (i.tm.cpu_flags.bitfield.cpupadlock)
6169 {
6170 check_prefix:
6171 if (prefix != REPE_PREFIX_OPCODE
6172 || (i.prefix[REP_PREFIX]
6173 != REPE_PREFIX_OPCODE))
6174 add_prefix (prefix);
6175 }
6176 else
6177 add_prefix (prefix);
6178 }
6179 break;
6180 case 1:
6181 break;
6182 default:
6183 abort ();
6184 }
6185
6186 /* The prefix bytes. */
6187 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6188 if (*q)
6189 FRAG_APPEND_1_CHAR (*q);
6190 }
6191
6192 if (i.tm.opcode_modifier.vex)
6193 {
6194 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6195 if (*q)
6196 switch (j)
6197 {
6198 case REX_PREFIX:
6199 /* REX byte is encoded in VEX prefix. */
6200 break;
6201 case SEG_PREFIX:
6202 case ADDR_PREFIX:
6203 FRAG_APPEND_1_CHAR (*q);
6204 break;
6205 default:
6206 /* There should be no other prefixes for instructions
6207 with VEX prefix. */
6208 abort ();
6209 }
6210
6211 /* Now the VEX prefix. */
6212 p = frag_more (i.vex.length);
6213 for (j = 0; j < i.vex.length; j++)
6214 p[j] = i.vex.bytes[j];
6215 }
6216
6217 /* Now the opcode; be careful about word order here! */
6218 if (i.tm.opcode_length == 1)
6219 {
6220 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6221 }
6222 else
6223 {
6224 switch (i.tm.opcode_length)
6225 {
6226 case 3:
6227 p = frag_more (3);
6228 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6229 break;
6230 case 2:
6231 p = frag_more (2);
6232 break;
6233 default:
6234 abort ();
6235 break;
6236 }
6237
6238 /* Put out high byte first: can't use md_number_to_chars! */
6239 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6240 *p = i.tm.base_opcode & 0xff;
6241 }
6242
6243 /* Now the modrm byte and sib byte (if present). */
6244 if (i.tm.opcode_modifier.modrm)
6245 {
6246 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6247 | i.rm.reg << 3
6248 | i.rm.mode << 6));
6249 /* If i.rm.regmem == ESP (4)
6250 && i.rm.mode != (Register mode)
6251 && not 16 bit
6252 ==> need second modrm byte. */
6253 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6254 && i.rm.mode != 3
6255 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6256 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6257 | i.sib.index << 3
6258 | i.sib.scale << 6));
6259 }
6260
6261 if (i.disp_operands)
6262 output_disp (insn_start_frag, insn_start_off);
6263
6264 if (i.imm_operands)
6265 output_imm (insn_start_frag, insn_start_off);
6266 }
6267
6268 #ifdef DEBUG386
6269 if (flag_debug)
6270 {
6271 pi ("" /*line*/, &i);
6272 }
6273 #endif /* DEBUG386 */
6274 }
6275
6276 /* Return the size of the displacement operand N. */
6277
6278 static int
6279 disp_size (unsigned int n)
6280 {
6281 int size = 4;
6282 if (i.types[n].bitfield.disp64)
6283 size = 8;
6284 else if (i.types[n].bitfield.disp8)
6285 size = 1;
6286 else if (i.types[n].bitfield.disp16)
6287 size = 2;
6288 return size;
6289 }
6290
6291 /* Return the size of the immediate operand N. */
6292
6293 static int
6294 imm_size (unsigned int n)
6295 {
6296 int size = 4;
6297 if (i.types[n].bitfield.imm64)
6298 size = 8;
6299 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6300 size = 1;
6301 else if (i.types[n].bitfield.imm16)
6302 size = 2;
6303 return size;
6304 }
6305
6306 static void
6307 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6308 {
6309 char *p;
6310 unsigned int n;
6311
6312 for (n = 0; n < i.operands; n++)
6313 {
6314 if (operand_type_check (i.types[n], disp))
6315 {
6316 if (i.op[n].disps->X_op == O_constant)
6317 {
6318 int size = disp_size (n);
6319 offsetT val;
6320
6321 val = offset_in_range (i.op[n].disps->X_add_number,
6322 size);
6323 p = frag_more (size);
6324 md_number_to_chars (p, val, size);
6325 }
6326 else
6327 {
6328 enum bfd_reloc_code_real reloc_type;
6329 int size = disp_size (n);
6330 int sign = i.types[n].bitfield.disp32s;
6331 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6332
6333 /* We can't have 8 bit displacement here. */
6334 gas_assert (!i.types[n].bitfield.disp8);
6335
6336 /* The PC relative address is computed relative
6337 to the instruction boundary, so in case immediate
6338 fields follows, we need to adjust the value. */
6339 if (pcrel && i.imm_operands)
6340 {
6341 unsigned int n1;
6342 int sz = 0;
6343
6344 for (n1 = 0; n1 < i.operands; n1++)
6345 if (operand_type_check (i.types[n1], imm))
6346 {
6347 /* Only one immediate is allowed for PC
6348 relative address. */
6349 gas_assert (sz == 0);
6350 sz = imm_size (n1);
6351 i.op[n].disps->X_add_number -= sz;
6352 }
6353 /* We should find the immediate. */
6354 gas_assert (sz != 0);
6355 }
6356
6357 p = frag_more (size);
6358 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6359 if (GOT_symbol
6360 && GOT_symbol == i.op[n].disps->X_add_symbol
6361 && (((reloc_type == BFD_RELOC_32
6362 || reloc_type == BFD_RELOC_X86_64_32S
6363 || (reloc_type == BFD_RELOC_64
6364 && object_64bit))
6365 && (i.op[n].disps->X_op == O_symbol
6366 || (i.op[n].disps->X_op == O_add
6367 && ((symbol_get_value_expression
6368 (i.op[n].disps->X_op_symbol)->X_op)
6369 == O_subtract))))
6370 || reloc_type == BFD_RELOC_32_PCREL))
6371 {
6372 offsetT add;
6373
6374 if (insn_start_frag == frag_now)
6375 add = (p - frag_now->fr_literal) - insn_start_off;
6376 else
6377 {
6378 fragS *fr;
6379
6380 add = insn_start_frag->fr_fix - insn_start_off;
6381 for (fr = insn_start_frag->fr_next;
6382 fr && fr != frag_now; fr = fr->fr_next)
6383 add += fr->fr_fix;
6384 add += p - frag_now->fr_literal;
6385 }
6386
6387 if (!object_64bit)
6388 {
6389 reloc_type = BFD_RELOC_386_GOTPC;
6390 i.op[n].imms->X_add_number += add;
6391 }
6392 else if (reloc_type == BFD_RELOC_64)
6393 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6394 else
6395 /* Don't do the adjustment for x86-64, as there
6396 the pcrel addressing is relative to the _next_
6397 insn, and that is taken care of in other code. */
6398 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6399 }
6400 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6401 i.op[n].disps, pcrel, reloc_type);
6402 }
6403 }
6404 }
6405 }
6406
6407 static void
6408 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6409 {
6410 char *p;
6411 unsigned int n;
6412
6413 for (n = 0; n < i.operands; n++)
6414 {
6415 if (operand_type_check (i.types[n], imm))
6416 {
6417 if (i.op[n].imms->X_op == O_constant)
6418 {
6419 int size = imm_size (n);
6420 offsetT val;
6421
6422 val = offset_in_range (i.op[n].imms->X_add_number,
6423 size);
6424 p = frag_more (size);
6425 md_number_to_chars (p, val, size);
6426 }
6427 else
6428 {
6429 /* Not absolute_section.
6430 Need a 32-bit fixup (don't support 8bit
6431 non-absolute imms). Try to support other
6432 sizes ... */
6433 enum bfd_reloc_code_real reloc_type;
6434 int size = imm_size (n);
6435 int sign;
6436
6437 if (i.types[n].bitfield.imm32s
6438 && (i.suffix == QWORD_MNEM_SUFFIX
6439 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6440 sign = 1;
6441 else
6442 sign = 0;
6443
6444 p = frag_more (size);
6445 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6446
6447 /* This is tough to explain. We end up with this one if we
6448 * have operands that look like
6449 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6450 * obtain the absolute address of the GOT, and it is strongly
6451 * preferable from a performance point of view to avoid using
6452 * a runtime relocation for this. The actual sequence of
6453 * instructions often look something like:
6454 *
6455 * call .L66
6456 * .L66:
6457 * popl %ebx
6458 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6459 *
6460 * The call and pop essentially return the absolute address
6461 * of the label .L66 and store it in %ebx. The linker itself
6462 * will ultimately change the first operand of the addl so
6463 * that %ebx points to the GOT, but to keep things simple, the
6464 * .o file must have this operand set so that it generates not
6465 * the absolute address of .L66, but the absolute address of
6466 * itself. This allows the linker itself simply treat a GOTPC
6467 * relocation as asking for a pcrel offset to the GOT to be
6468 * added in, and the addend of the relocation is stored in the
6469 * operand field for the instruction itself.
6470 *
6471 * Our job here is to fix the operand so that it would add
6472 * the correct offset so that %ebx would point to itself. The
6473 * thing that is tricky is that .-.L66 will point to the
6474 * beginning of the instruction, so we need to further modify
6475 * the operand so that it will point to itself. There are
6476 * other cases where you have something like:
6477 *
6478 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6479 *
6480 * and here no correction would be required. Internally in
6481 * the assembler we treat operands of this form as not being
6482 * pcrel since the '.' is explicitly mentioned, and I wonder
6483 * whether it would simplify matters to do it this way. Who
6484 * knows. In earlier versions of the PIC patches, the
6485 * pcrel_adjust field was used to store the correction, but
6486 * since the expression is not pcrel, I felt it would be
6487 * confusing to do it this way. */
6488
6489 if ((reloc_type == BFD_RELOC_32
6490 || reloc_type == BFD_RELOC_X86_64_32S
6491 || reloc_type == BFD_RELOC_64)
6492 && GOT_symbol
6493 && GOT_symbol == i.op[n].imms->X_add_symbol
6494 && (i.op[n].imms->X_op == O_symbol
6495 || (i.op[n].imms->X_op == O_add
6496 && ((symbol_get_value_expression
6497 (i.op[n].imms->X_op_symbol)->X_op)
6498 == O_subtract))))
6499 {
6500 offsetT add;
6501
6502 if (insn_start_frag == frag_now)
6503 add = (p - frag_now->fr_literal) - insn_start_off;
6504 else
6505 {
6506 fragS *fr;
6507
6508 add = insn_start_frag->fr_fix - insn_start_off;
6509 for (fr = insn_start_frag->fr_next;
6510 fr && fr != frag_now; fr = fr->fr_next)
6511 add += fr->fr_fix;
6512 add += p - frag_now->fr_literal;
6513 }
6514
6515 if (!object_64bit)
6516 reloc_type = BFD_RELOC_386_GOTPC;
6517 else if (size == 4)
6518 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6519 else if (size == 8)
6520 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6521 i.op[n].imms->X_add_number += add;
6522 }
6523 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6524 i.op[n].imms, 0, reloc_type);
6525 }
6526 }
6527 }
6528 }
6529 \f
6530 /* x86_cons_fix_new is called via the expression parsing code when a
6531 reloc is needed. We use this hook to get the correct .got reloc. */
6532 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6533 static int cons_sign = -1;
6534
6535 void
6536 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6537 expressionS *exp)
6538 {
6539 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6540
6541 got_reloc = NO_RELOC;
6542
6543 #ifdef TE_PE
6544 if (exp->X_op == O_secrel)
6545 {
6546 exp->X_op = O_symbol;
6547 r = BFD_RELOC_32_SECREL;
6548 }
6549 #endif
6550
6551 fix_new_exp (frag, off, len, exp, 0, r);
6552 }
6553
6554 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6555 || defined (LEX_AT)
6556 # define lex_got(reloc, adjust, types) NULL
6557 #else
6558 /* Parse operands of the form
6559 <symbol>@GOTOFF+<nnn>
6560 and similar .plt or .got references.
6561
6562 If we find one, set up the correct relocation in RELOC and copy the
6563 input string, minus the `@GOTOFF' into a malloc'd buffer for
6564 parsing by the calling routine. Return this buffer, and if ADJUST
6565 is non-null set it to the length of the string we removed from the
6566 input line. Otherwise return NULL. */
6567 static char *
6568 lex_got (enum bfd_reloc_code_real *rel,
6569 int *adjust,
6570 i386_operand_type *types)
6571 {
6572 /* Some of the relocations depend on the size of what field is to
6573 be relocated. But in our callers i386_immediate and i386_displacement
6574 we don't yet know the operand size (this will be set by insn
6575 matching). Hence we record the word32 relocation here,
6576 and adjust the reloc according to the real size in reloc(). */
6577 static const struct {
6578 const char *str;
6579 int len;
6580 const enum bfd_reloc_code_real rel[2];
6581 const i386_operand_type types64;
6582 } gotrel[] = {
6583 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6584 BFD_RELOC_X86_64_PLTOFF64 },
6585 OPERAND_TYPE_IMM64 },
6586 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6587 BFD_RELOC_X86_64_PLT32 },
6588 OPERAND_TYPE_IMM32_32S_DISP32 },
6589 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6590 BFD_RELOC_X86_64_GOTPLT64 },
6591 OPERAND_TYPE_IMM64_DISP64 },
6592 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6593 BFD_RELOC_X86_64_GOTOFF64 },
6594 OPERAND_TYPE_IMM64_DISP64 },
6595 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6596 BFD_RELOC_X86_64_GOTPCREL },
6597 OPERAND_TYPE_IMM32_32S_DISP32 },
6598 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6599 BFD_RELOC_X86_64_TLSGD },
6600 OPERAND_TYPE_IMM32_32S_DISP32 },
6601 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6602 _dummy_first_bfd_reloc_code_real },
6603 OPERAND_TYPE_NONE },
6604 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6605 BFD_RELOC_X86_64_TLSLD },
6606 OPERAND_TYPE_IMM32_32S_DISP32 },
6607 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6608 BFD_RELOC_X86_64_GOTTPOFF },
6609 OPERAND_TYPE_IMM32_32S_DISP32 },
6610 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6611 BFD_RELOC_X86_64_TPOFF32 },
6612 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6613 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6614 _dummy_first_bfd_reloc_code_real },
6615 OPERAND_TYPE_NONE },
6616 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6617 BFD_RELOC_X86_64_DTPOFF32 },
6618 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6619 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6620 _dummy_first_bfd_reloc_code_real },
6621 OPERAND_TYPE_NONE },
6622 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6623 _dummy_first_bfd_reloc_code_real },
6624 OPERAND_TYPE_NONE },
6625 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6626 BFD_RELOC_X86_64_GOT32 },
6627 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6628 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6629 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6630 OPERAND_TYPE_IMM32_32S_DISP32 },
6631 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6632 BFD_RELOC_X86_64_TLSDESC_CALL },
6633 OPERAND_TYPE_IMM32_32S_DISP32 },
6634 };
6635 char *cp;
6636 unsigned int j;
6637
6638 #if defined (OBJ_MAYBE_ELF)
6639 if (!IS_ELF)
6640 return NULL;
6641 #endif
6642
6643 for (cp = input_line_pointer; *cp != '@'; cp++)
6644 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6645 return NULL;
6646
6647 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6648 {
6649 int len = gotrel[j].len;
6650 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6651 {
6652 if (gotrel[j].rel[object_64bit] != 0)
6653 {
6654 int first, second;
6655 char *tmpbuf, *past_reloc;
6656
6657 *rel = gotrel[j].rel[object_64bit];
6658 if (adjust)
6659 *adjust = len;
6660
6661 if (types)
6662 {
6663 if (flag_code != CODE_64BIT)
6664 {
6665 types->bitfield.imm32 = 1;
6666 types->bitfield.disp32 = 1;
6667 }
6668 else
6669 *types = gotrel[j].types64;
6670 }
6671
6672 if (GOT_symbol == NULL)
6673 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6674
6675 /* The length of the first part of our input line. */
6676 first = cp - input_line_pointer;
6677
6678 /* The second part goes from after the reloc token until
6679 (and including) an end_of_line char or comma. */
6680 past_reloc = cp + 1 + len;
6681 cp = past_reloc;
6682 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6683 ++cp;
6684 second = cp + 1 - past_reloc;
6685
6686 /* Allocate and copy string. The trailing NUL shouldn't
6687 be necessary, but be safe. */
6688 tmpbuf = (char *) xmalloc (first + second + 2);
6689 memcpy (tmpbuf, input_line_pointer, first);
6690 if (second != 0 && *past_reloc != ' ')
6691 /* Replace the relocation token with ' ', so that
6692 errors like foo@GOTOFF1 will be detected. */
6693 tmpbuf[first++] = ' ';
6694 memcpy (tmpbuf + first, past_reloc, second);
6695 tmpbuf[first + second] = '\0';
6696 return tmpbuf;
6697 }
6698
6699 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6700 gotrel[j].str, 1 << (5 + object_64bit));
6701 return NULL;
6702 }
6703 }
6704
6705 /* Might be a symbol version string. Don't as_bad here. */
6706 return NULL;
6707 }
6708 #endif
6709
6710 void
6711 x86_cons (expressionS *exp, int size)
6712 {
6713 intel_syntax = -intel_syntax;
6714
6715 exp->X_md = 0;
6716 if (size == 4 || (object_64bit && size == 8))
6717 {
6718 /* Handle @GOTOFF and the like in an expression. */
6719 char *save;
6720 char *gotfree_input_line;
6721 int adjust = 0;
6722
6723 save = input_line_pointer;
6724 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6725 if (gotfree_input_line)
6726 input_line_pointer = gotfree_input_line;
6727
6728 expression (exp);
6729
6730 if (gotfree_input_line)
6731 {
6732 /* expression () has merrily parsed up to the end of line,
6733 or a comma - in the wrong buffer. Transfer how far
6734 input_line_pointer has moved to the right buffer. */
6735 input_line_pointer = (save
6736 + (input_line_pointer - gotfree_input_line)
6737 + adjust);
6738 free (gotfree_input_line);
6739 if (exp->X_op == O_constant
6740 || exp->X_op == O_absent
6741 || exp->X_op == O_illegal
6742 || exp->X_op == O_register
6743 || exp->X_op == O_big)
6744 {
6745 char c = *input_line_pointer;
6746 *input_line_pointer = 0;
6747 as_bad (_("missing or invalid expression `%s'"), save);
6748 *input_line_pointer = c;
6749 }
6750 }
6751 }
6752 else
6753 expression (exp);
6754
6755 intel_syntax = -intel_syntax;
6756
6757 if (intel_syntax)
6758 i386_intel_simplify (exp);
6759 }
6760
6761 static void
6762 signed_cons (int size)
6763 {
6764 if (flag_code == CODE_64BIT)
6765 cons_sign = 1;
6766 cons (size);
6767 cons_sign = -1;
6768 }
6769
6770 #ifdef TE_PE
6771 static void
6772 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6773 {
6774 expressionS exp;
6775
6776 do
6777 {
6778 expression (&exp);
6779 if (exp.X_op == O_symbol)
6780 exp.X_op = O_secrel;
6781
6782 emit_expr (&exp, 4);
6783 }
6784 while (*input_line_pointer++ == ',');
6785
6786 input_line_pointer--;
6787 demand_empty_rest_of_line ();
6788 }
6789 #endif
6790
6791 static int
6792 i386_immediate (char *imm_start)
6793 {
6794 char *save_input_line_pointer;
6795 char *gotfree_input_line;
6796 segT exp_seg = 0;
6797 expressionS *exp;
6798 i386_operand_type types;
6799
6800 operand_type_set (&types, ~0);
6801
6802 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6803 {
6804 as_bad (_("at most %d immediate operands are allowed"),
6805 MAX_IMMEDIATE_OPERANDS);
6806 return 0;
6807 }
6808
6809 exp = &im_expressions[i.imm_operands++];
6810 i.op[this_operand].imms = exp;
6811
6812 if (is_space_char (*imm_start))
6813 ++imm_start;
6814
6815 save_input_line_pointer = input_line_pointer;
6816 input_line_pointer = imm_start;
6817
6818 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6819 if (gotfree_input_line)
6820 input_line_pointer = gotfree_input_line;
6821
6822 exp_seg = expression (exp);
6823
6824 SKIP_WHITESPACE ();
6825 if (*input_line_pointer)
6826 as_bad (_("junk `%s' after expression"), input_line_pointer);
6827
6828 input_line_pointer = save_input_line_pointer;
6829 if (gotfree_input_line)
6830 {
6831 free (gotfree_input_line);
6832
6833 if (exp->X_op == O_constant || exp->X_op == O_register)
6834 exp->X_op = O_illegal;
6835 }
6836
6837 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6838 }
6839
6840 static int
6841 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6842 i386_operand_type types, const char *imm_start)
6843 {
6844 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6845 {
6846 if (imm_start)
6847 as_bad (_("missing or invalid immediate expression `%s'"),
6848 imm_start);
6849 return 0;
6850 }
6851 else if (exp->X_op == O_constant)
6852 {
6853 /* Size it properly later. */
6854 i.types[this_operand].bitfield.imm64 = 1;
6855 /* If not 64bit, sign extend val. */
6856 if (flag_code != CODE_64BIT
6857 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6858 exp->X_add_number
6859 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6860 }
6861 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6862 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6863 && exp_seg != absolute_section
6864 && exp_seg != text_section
6865 && exp_seg != data_section
6866 && exp_seg != bss_section
6867 && exp_seg != undefined_section
6868 && !bfd_is_com_section (exp_seg))
6869 {
6870 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6871 return 0;
6872 }
6873 #endif
6874 else if (!intel_syntax && exp->X_op == O_register)
6875 {
6876 if (imm_start)
6877 as_bad (_("illegal immediate register operand %s"), imm_start);
6878 return 0;
6879 }
6880 else
6881 {
6882 /* This is an address. The size of the address will be
6883 determined later, depending on destination register,
6884 suffix, or the default for the section. */
6885 i.types[this_operand].bitfield.imm8 = 1;
6886 i.types[this_operand].bitfield.imm16 = 1;
6887 i.types[this_operand].bitfield.imm32 = 1;
6888 i.types[this_operand].bitfield.imm32s = 1;
6889 i.types[this_operand].bitfield.imm64 = 1;
6890 i.types[this_operand] = operand_type_and (i.types[this_operand],
6891 types);
6892 }
6893
6894 return 1;
6895 }
6896
6897 static char *
6898 i386_scale (char *scale)
6899 {
6900 offsetT val;
6901 char *save = input_line_pointer;
6902
6903 input_line_pointer = scale;
6904 val = get_absolute_expression ();
6905
6906 switch (val)
6907 {
6908 case 1:
6909 i.log2_scale_factor = 0;
6910 break;
6911 case 2:
6912 i.log2_scale_factor = 1;
6913 break;
6914 case 4:
6915 i.log2_scale_factor = 2;
6916 break;
6917 case 8:
6918 i.log2_scale_factor = 3;
6919 break;
6920 default:
6921 {
6922 char sep = *input_line_pointer;
6923
6924 *input_line_pointer = '\0';
6925 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6926 scale);
6927 *input_line_pointer = sep;
6928 input_line_pointer = save;
6929 return NULL;
6930 }
6931 }
6932 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6933 {
6934 as_warn (_("scale factor of %d without an index register"),
6935 1 << i.log2_scale_factor);
6936 i.log2_scale_factor = 0;
6937 }
6938 scale = input_line_pointer;
6939 input_line_pointer = save;
6940 return scale;
6941 }
6942
6943 static int
6944 i386_displacement (char *disp_start, char *disp_end)
6945 {
6946 expressionS *exp;
6947 segT exp_seg = 0;
6948 char *save_input_line_pointer;
6949 char *gotfree_input_line;
6950 int override;
6951 i386_operand_type bigdisp, types = anydisp;
6952 int ret;
6953
6954 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6955 {
6956 as_bad (_("at most %d displacement operands are allowed"),
6957 MAX_MEMORY_OPERANDS);
6958 return 0;
6959 }
6960
6961 operand_type_set (&bigdisp, 0);
6962 if ((i.types[this_operand].bitfield.jumpabsolute)
6963 || (!current_templates->start->opcode_modifier.jump
6964 && !current_templates->start->opcode_modifier.jumpdword))
6965 {
6966 bigdisp.bitfield.disp32 = 1;
6967 override = (i.prefix[ADDR_PREFIX] != 0);
6968 if (flag_code == CODE_64BIT)
6969 {
6970 if (!override)
6971 {
6972 bigdisp.bitfield.disp32s = 1;
6973 bigdisp.bitfield.disp64 = 1;
6974 }
6975 }
6976 else if ((flag_code == CODE_16BIT) ^ override)
6977 {
6978 bigdisp.bitfield.disp32 = 0;
6979 bigdisp.bitfield.disp16 = 1;
6980 }
6981 }
6982 else
6983 {
6984 /* For PC-relative branches, the width of the displacement
6985 is dependent upon data size, not address size. */
6986 override = (i.prefix[DATA_PREFIX] != 0);
6987 if (flag_code == CODE_64BIT)
6988 {
6989 if (override || i.suffix == WORD_MNEM_SUFFIX)
6990 bigdisp.bitfield.disp16 = 1;
6991 else
6992 {
6993 bigdisp.bitfield.disp32 = 1;
6994 bigdisp.bitfield.disp32s = 1;
6995 }
6996 }
6997 else
6998 {
6999 if (!override)
7000 override = (i.suffix == (flag_code != CODE_16BIT
7001 ? WORD_MNEM_SUFFIX
7002 : LONG_MNEM_SUFFIX));
7003 bigdisp.bitfield.disp32 = 1;
7004 if ((flag_code == CODE_16BIT) ^ override)
7005 {
7006 bigdisp.bitfield.disp32 = 0;
7007 bigdisp.bitfield.disp16 = 1;
7008 }
7009 }
7010 }
7011 i.types[this_operand] = operand_type_or (i.types[this_operand],
7012 bigdisp);
7013
7014 exp = &disp_expressions[i.disp_operands];
7015 i.op[this_operand].disps = exp;
7016 i.disp_operands++;
7017 save_input_line_pointer = input_line_pointer;
7018 input_line_pointer = disp_start;
7019 END_STRING_AND_SAVE (disp_end);
7020
7021 #ifndef GCC_ASM_O_HACK
7022 #define GCC_ASM_O_HACK 0
7023 #endif
7024 #if GCC_ASM_O_HACK
7025 END_STRING_AND_SAVE (disp_end + 1);
7026 if (i.types[this_operand].bitfield.baseIndex
7027 && displacement_string_end[-1] == '+')
7028 {
7029 /* This hack is to avoid a warning when using the "o"
7030 constraint within gcc asm statements.
7031 For instance:
7032
7033 #define _set_tssldt_desc(n,addr,limit,type) \
7034 __asm__ __volatile__ ( \
7035 "movw %w2,%0\n\t" \
7036 "movw %w1,2+%0\n\t" \
7037 "rorl $16,%1\n\t" \
7038 "movb %b1,4+%0\n\t" \
7039 "movb %4,5+%0\n\t" \
7040 "movb $0,6+%0\n\t" \
7041 "movb %h1,7+%0\n\t" \
7042 "rorl $16,%1" \
7043 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7044
7045 This works great except that the output assembler ends
7046 up looking a bit weird if it turns out that there is
7047 no offset. You end up producing code that looks like:
7048
7049 #APP
7050 movw $235,(%eax)
7051 movw %dx,2+(%eax)
7052 rorl $16,%edx
7053 movb %dl,4+(%eax)
7054 movb $137,5+(%eax)
7055 movb $0,6+(%eax)
7056 movb %dh,7+(%eax)
7057 rorl $16,%edx
7058 #NO_APP
7059
7060 So here we provide the missing zero. */
7061
7062 *displacement_string_end = '0';
7063 }
7064 #endif
7065 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7066 if (gotfree_input_line)
7067 input_line_pointer = gotfree_input_line;
7068
7069 exp_seg = expression (exp);
7070
7071 SKIP_WHITESPACE ();
7072 if (*input_line_pointer)
7073 as_bad (_("junk `%s' after expression"), input_line_pointer);
7074 #if GCC_ASM_O_HACK
7075 RESTORE_END_STRING (disp_end + 1);
7076 #endif
7077 input_line_pointer = save_input_line_pointer;
7078 if (gotfree_input_line)
7079 {
7080 free (gotfree_input_line);
7081
7082 if (exp->X_op == O_constant || exp->X_op == O_register)
7083 exp->X_op = O_illegal;
7084 }
7085
7086 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7087
7088 RESTORE_END_STRING (disp_end);
7089
7090 return ret;
7091 }
7092
7093 static int
7094 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7095 i386_operand_type types, const char *disp_start)
7096 {
7097 i386_operand_type bigdisp;
7098 int ret = 1;
7099
7100 /* We do this to make sure that the section symbol is in
7101 the symbol table. We will ultimately change the relocation
7102 to be relative to the beginning of the section. */
7103 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7104 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7105 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7106 {
7107 if (exp->X_op != O_symbol)
7108 goto inv_disp;
7109
7110 if (S_IS_LOCAL (exp->X_add_symbol)
7111 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7112 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7113 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7114 exp->X_op = O_subtract;
7115 exp->X_op_symbol = GOT_symbol;
7116 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7117 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7118 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7119 i.reloc[this_operand] = BFD_RELOC_64;
7120 else
7121 i.reloc[this_operand] = BFD_RELOC_32;
7122 }
7123
7124 else if (exp->X_op == O_absent
7125 || exp->X_op == O_illegal
7126 || exp->X_op == O_big)
7127 {
7128 inv_disp:
7129 as_bad (_("missing or invalid displacement expression `%s'"),
7130 disp_start);
7131 ret = 0;
7132 }
7133
7134 else if (flag_code == CODE_64BIT
7135 && !i.prefix[ADDR_PREFIX]
7136 && exp->X_op == O_constant)
7137 {
7138 /* Since displacement is signed extended to 64bit, don't allow
7139 disp32 and turn off disp32s if they are out of range. */
7140 i.types[this_operand].bitfield.disp32 = 0;
7141 if (!fits_in_signed_long (exp->X_add_number))
7142 {
7143 i.types[this_operand].bitfield.disp32s = 0;
7144 if (i.types[this_operand].bitfield.baseindex)
7145 {
7146 as_bad (_("0x%lx out range of signed 32bit displacement"),
7147 (long) exp->X_add_number);
7148 ret = 0;
7149 }
7150 }
7151 }
7152
7153 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7154 else if (exp->X_op != O_constant
7155 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7156 && exp_seg != absolute_section
7157 && exp_seg != text_section
7158 && exp_seg != data_section
7159 && exp_seg != bss_section
7160 && exp_seg != undefined_section
7161 && !bfd_is_com_section (exp_seg))
7162 {
7163 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7164 ret = 0;
7165 }
7166 #endif
7167
7168 /* Check if this is a displacement only operand. */
7169 bigdisp = i.types[this_operand];
7170 bigdisp.bitfield.disp8 = 0;
7171 bigdisp.bitfield.disp16 = 0;
7172 bigdisp.bitfield.disp32 = 0;
7173 bigdisp.bitfield.disp32s = 0;
7174 bigdisp.bitfield.disp64 = 0;
7175 if (operand_type_all_zero (&bigdisp))
7176 i.types[this_operand] = operand_type_and (i.types[this_operand],
7177 types);
7178
7179 return ret;
7180 }
7181
7182 /* Make sure the memory operand we've been dealt is valid.
7183 Return 1 on success, 0 on a failure. */
7184
7185 static int
7186 i386_index_check (const char *operand_string)
7187 {
7188 int ok;
7189 const char *kind = "base/index";
7190 #if INFER_ADDR_PREFIX
7191 int fudged = 0;
7192
7193 tryprefix:
7194 #endif
7195 ok = 1;
7196 if (current_templates->start->opcode_modifier.isstring
7197 && !current_templates->start->opcode_modifier.immext
7198 && (current_templates->end[-1].opcode_modifier.isstring
7199 || i.mem_operands))
7200 {
7201 /* Memory operands of string insns are special in that they only allow
7202 a single register (rDI, rSI, or rBX) as their memory address. */
7203 unsigned int expected;
7204
7205 kind = "string address";
7206
7207 if (current_templates->start->opcode_modifier.w)
7208 {
7209 i386_operand_type type = current_templates->end[-1].operand_types[0];
7210
7211 if (!type.bitfield.baseindex
7212 || ((!i.mem_operands != !intel_syntax)
7213 && current_templates->end[-1].operand_types[1]
7214 .bitfield.baseindex))
7215 type = current_templates->end[-1].operand_types[1];
7216 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7217 }
7218 else
7219 expected = 3 /* rBX */;
7220
7221 if (!i.base_reg || i.index_reg
7222 || operand_type_check (i.types[this_operand], disp))
7223 ok = -1;
7224 else if (!(flag_code == CODE_64BIT
7225 ? i.prefix[ADDR_PREFIX]
7226 ? i.base_reg->reg_type.bitfield.reg32
7227 : i.base_reg->reg_type.bitfield.reg64
7228 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7229 ? i.base_reg->reg_type.bitfield.reg32
7230 : i.base_reg->reg_type.bitfield.reg16))
7231 ok = 0;
7232 else if (i.base_reg->reg_num != expected)
7233 ok = -1;
7234
7235 if (ok < 0)
7236 {
7237 unsigned int j;
7238
7239 for (j = 0; j < i386_regtab_size; ++j)
7240 if ((flag_code == CODE_64BIT
7241 ? i.prefix[ADDR_PREFIX]
7242 ? i386_regtab[j].reg_type.bitfield.reg32
7243 : i386_regtab[j].reg_type.bitfield.reg64
7244 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7245 ? i386_regtab[j].reg_type.bitfield.reg32
7246 : i386_regtab[j].reg_type.bitfield.reg16)
7247 && i386_regtab[j].reg_num == expected)
7248 break;
7249 gas_assert (j < i386_regtab_size);
7250 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7251 operand_string,
7252 intel_syntax ? '[' : '(',
7253 register_prefix,
7254 i386_regtab[j].reg_name,
7255 intel_syntax ? ']' : ')');
7256 ok = 1;
7257 }
7258 }
7259 else if (flag_code == CODE_64BIT)
7260 {
7261 if ((i.base_reg
7262 && ((i.prefix[ADDR_PREFIX] == 0
7263 && !i.base_reg->reg_type.bitfield.reg64)
7264 || (i.prefix[ADDR_PREFIX]
7265 && !i.base_reg->reg_type.bitfield.reg32))
7266 && (i.index_reg
7267 || i.base_reg->reg_num !=
7268 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7269 || (i.index_reg
7270 && !(i.index_reg->reg_type.bitfield.regxmm
7271 || i.index_reg->reg_type.bitfield.regymm)
7272 && (!i.index_reg->reg_type.bitfield.baseindex
7273 || (i.prefix[ADDR_PREFIX] == 0
7274 && i.index_reg->reg_num != RegRiz
7275 && !i.index_reg->reg_type.bitfield.reg64
7276 )
7277 || (i.prefix[ADDR_PREFIX]
7278 && i.index_reg->reg_num != RegEiz
7279 && !i.index_reg->reg_type.bitfield.reg32))))
7280 ok = 0;
7281 }
7282 else
7283 {
7284 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7285 {
7286 /* 16bit checks. */
7287 if ((i.base_reg
7288 && (!i.base_reg->reg_type.bitfield.reg16
7289 || !i.base_reg->reg_type.bitfield.baseindex))
7290 || (i.index_reg
7291 && (!i.index_reg->reg_type.bitfield.reg16
7292 || !i.index_reg->reg_type.bitfield.baseindex
7293 || !(i.base_reg
7294 && i.base_reg->reg_num < 6
7295 && i.index_reg->reg_num >= 6
7296 && i.log2_scale_factor == 0))))
7297 ok = 0;
7298 }
7299 else
7300 {
7301 /* 32bit checks. */
7302 if ((i.base_reg
7303 && !i.base_reg->reg_type.bitfield.reg32)
7304 || (i.index_reg
7305 && !i.index_reg->reg_type.bitfield.regxmm
7306 && !i.index_reg->reg_type.bitfield.regymm
7307 && ((!i.index_reg->reg_type.bitfield.reg32
7308 && i.index_reg->reg_num != RegEiz)
7309 || !i.index_reg->reg_type.bitfield.baseindex)))
7310 ok = 0;
7311 }
7312 }
7313 if (!ok)
7314 {
7315 #if INFER_ADDR_PREFIX
7316 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7317 {
7318 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7319 i.prefixes += 1;
7320 /* Change the size of any displacement too. At most one of
7321 Disp16 or Disp32 is set.
7322 FIXME. There doesn't seem to be any real need for separate
7323 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7324 Removing them would probably clean up the code quite a lot. */
7325 if (flag_code != CODE_64BIT
7326 && (i.types[this_operand].bitfield.disp16
7327 || i.types[this_operand].bitfield.disp32))
7328 i.types[this_operand]
7329 = operand_type_xor (i.types[this_operand], disp16_32);
7330 fudged = 1;
7331 goto tryprefix;
7332 }
7333 if (fudged)
7334 as_bad (_("`%s' is not a valid %s expression"),
7335 operand_string,
7336 kind);
7337 else
7338 #endif
7339 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7340 operand_string,
7341 flag_code_names[i.prefix[ADDR_PREFIX]
7342 ? flag_code == CODE_32BIT
7343 ? CODE_16BIT
7344 : CODE_32BIT
7345 : flag_code],
7346 kind);
7347 }
7348 return ok;
7349 }
7350
7351 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7352 on error. */
7353
7354 static int
7355 i386_att_operand (char *operand_string)
7356 {
7357 const reg_entry *r;
7358 char *end_op;
7359 char *op_string = operand_string;
7360
7361 if (is_space_char (*op_string))
7362 ++op_string;
7363
7364 /* We check for an absolute prefix (differentiating,
7365 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7366 if (*op_string == ABSOLUTE_PREFIX)
7367 {
7368 ++op_string;
7369 if (is_space_char (*op_string))
7370 ++op_string;
7371 i.types[this_operand].bitfield.jumpabsolute = 1;
7372 }
7373
7374 /* Check if operand is a register. */
7375 if ((r = parse_register (op_string, &end_op)) != NULL)
7376 {
7377 i386_operand_type temp;
7378
7379 /* Check for a segment override by searching for ':' after a
7380 segment register. */
7381 op_string = end_op;
7382 if (is_space_char (*op_string))
7383 ++op_string;
7384 if (*op_string == ':'
7385 && (r->reg_type.bitfield.sreg2
7386 || r->reg_type.bitfield.sreg3))
7387 {
7388 switch (r->reg_num)
7389 {
7390 case 0:
7391 i.seg[i.mem_operands] = &es;
7392 break;
7393 case 1:
7394 i.seg[i.mem_operands] = &cs;
7395 break;
7396 case 2:
7397 i.seg[i.mem_operands] = &ss;
7398 break;
7399 case 3:
7400 i.seg[i.mem_operands] = &ds;
7401 break;
7402 case 4:
7403 i.seg[i.mem_operands] = &fs;
7404 break;
7405 case 5:
7406 i.seg[i.mem_operands] = &gs;
7407 break;
7408 }
7409
7410 /* Skip the ':' and whitespace. */
7411 ++op_string;
7412 if (is_space_char (*op_string))
7413 ++op_string;
7414
7415 if (!is_digit_char (*op_string)
7416 && !is_identifier_char (*op_string)
7417 && *op_string != '('
7418 && *op_string != ABSOLUTE_PREFIX)
7419 {
7420 as_bad (_("bad memory operand `%s'"), op_string);
7421 return 0;
7422 }
7423 /* Handle case of %es:*foo. */
7424 if (*op_string == ABSOLUTE_PREFIX)
7425 {
7426 ++op_string;
7427 if (is_space_char (*op_string))
7428 ++op_string;
7429 i.types[this_operand].bitfield.jumpabsolute = 1;
7430 }
7431 goto do_memory_reference;
7432 }
7433 if (*op_string)
7434 {
7435 as_bad (_("junk `%s' after register"), op_string);
7436 return 0;
7437 }
7438 temp = r->reg_type;
7439 temp.bitfield.baseindex = 0;
7440 i.types[this_operand] = operand_type_or (i.types[this_operand],
7441 temp);
7442 i.types[this_operand].bitfield.unspecified = 0;
7443 i.op[this_operand].regs = r;
7444 i.reg_operands++;
7445 }
7446 else if (*op_string == REGISTER_PREFIX)
7447 {
7448 as_bad (_("bad register name `%s'"), op_string);
7449 return 0;
7450 }
7451 else if (*op_string == IMMEDIATE_PREFIX)
7452 {
7453 ++op_string;
7454 if (i.types[this_operand].bitfield.jumpabsolute)
7455 {
7456 as_bad (_("immediate operand illegal with absolute jump"));
7457 return 0;
7458 }
7459 if (!i386_immediate (op_string))
7460 return 0;
7461 }
7462 else if (is_digit_char (*op_string)
7463 || is_identifier_char (*op_string)
7464 || *op_string == '(')
7465 {
7466 /* This is a memory reference of some sort. */
7467 char *base_string;
7468
7469 /* Start and end of displacement string expression (if found). */
7470 char *displacement_string_start;
7471 char *displacement_string_end;
7472
7473 do_memory_reference:
7474 if ((i.mem_operands == 1
7475 && !current_templates->start->opcode_modifier.isstring)
7476 || i.mem_operands == 2)
7477 {
7478 as_bad (_("too many memory references for `%s'"),
7479 current_templates->start->name);
7480 return 0;
7481 }
7482
7483 /* Check for base index form. We detect the base index form by
7484 looking for an ')' at the end of the operand, searching
7485 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7486 after the '('. */
7487 base_string = op_string + strlen (op_string);
7488
7489 --base_string;
7490 if (is_space_char (*base_string))
7491 --base_string;
7492
7493 /* If we only have a displacement, set-up for it to be parsed later. */
7494 displacement_string_start = op_string;
7495 displacement_string_end = base_string + 1;
7496
7497 if (*base_string == ')')
7498 {
7499 char *temp_string;
7500 unsigned int parens_balanced = 1;
7501 /* We've already checked that the number of left & right ()'s are
7502 equal, so this loop will not be infinite. */
7503 do
7504 {
7505 base_string--;
7506 if (*base_string == ')')
7507 parens_balanced++;
7508 if (*base_string == '(')
7509 parens_balanced--;
7510 }
7511 while (parens_balanced);
7512
7513 temp_string = base_string;
7514
7515 /* Skip past '(' and whitespace. */
7516 ++base_string;
7517 if (is_space_char (*base_string))
7518 ++base_string;
7519
7520 if (*base_string == ','
7521 || ((i.base_reg = parse_register (base_string, &end_op))
7522 != NULL))
7523 {
7524 displacement_string_end = temp_string;
7525
7526 i.types[this_operand].bitfield.baseindex = 1;
7527
7528 if (i.base_reg)
7529 {
7530 base_string = end_op;
7531 if (is_space_char (*base_string))
7532 ++base_string;
7533 }
7534
7535 /* There may be an index reg or scale factor here. */
7536 if (*base_string == ',')
7537 {
7538 ++base_string;
7539 if (is_space_char (*base_string))
7540 ++base_string;
7541
7542 if ((i.index_reg = parse_register (base_string, &end_op))
7543 != NULL)
7544 {
7545 base_string = end_op;
7546 if (is_space_char (*base_string))
7547 ++base_string;
7548 if (*base_string == ',')
7549 {
7550 ++base_string;
7551 if (is_space_char (*base_string))
7552 ++base_string;
7553 }
7554 else if (*base_string != ')')
7555 {
7556 as_bad (_("expecting `,' or `)' "
7557 "after index register in `%s'"),
7558 operand_string);
7559 return 0;
7560 }
7561 }
7562 else if (*base_string == REGISTER_PREFIX)
7563 {
7564 as_bad (_("bad register name `%s'"), base_string);
7565 return 0;
7566 }
7567
7568 /* Check for scale factor. */
7569 if (*base_string != ')')
7570 {
7571 char *end_scale = i386_scale (base_string);
7572
7573 if (!end_scale)
7574 return 0;
7575
7576 base_string = end_scale;
7577 if (is_space_char (*base_string))
7578 ++base_string;
7579 if (*base_string != ')')
7580 {
7581 as_bad (_("expecting `)' "
7582 "after scale factor in `%s'"),
7583 operand_string);
7584 return 0;
7585 }
7586 }
7587 else if (!i.index_reg)
7588 {
7589 as_bad (_("expecting index register or scale factor "
7590 "after `,'; got '%c'"),
7591 *base_string);
7592 return 0;
7593 }
7594 }
7595 else if (*base_string != ')')
7596 {
7597 as_bad (_("expecting `,' or `)' "
7598 "after base register in `%s'"),
7599 operand_string);
7600 return 0;
7601 }
7602 }
7603 else if (*base_string == REGISTER_PREFIX)
7604 {
7605 as_bad (_("bad register name `%s'"), base_string);
7606 return 0;
7607 }
7608 }
7609
7610 /* If there's an expression beginning the operand, parse it,
7611 assuming displacement_string_start and
7612 displacement_string_end are meaningful. */
7613 if (displacement_string_start != displacement_string_end)
7614 {
7615 if (!i386_displacement (displacement_string_start,
7616 displacement_string_end))
7617 return 0;
7618 }
7619
7620 /* Special case for (%dx) while doing input/output op. */
7621 if (i.base_reg
7622 && operand_type_equal (&i.base_reg->reg_type,
7623 &reg16_inoutportreg)
7624 && i.index_reg == 0
7625 && i.log2_scale_factor == 0
7626 && i.seg[i.mem_operands] == 0
7627 && !operand_type_check (i.types[this_operand], disp))
7628 {
7629 i.types[this_operand] = inoutportreg;
7630 return 1;
7631 }
7632
7633 if (i386_index_check (operand_string) == 0)
7634 return 0;
7635 i.types[this_operand].bitfield.mem = 1;
7636 i.mem_operands++;
7637 }
7638 else
7639 {
7640 /* It's not a memory operand; argh! */
7641 as_bad (_("invalid char %s beginning operand %d `%s'"),
7642 output_invalid (*op_string),
7643 this_operand + 1,
7644 op_string);
7645 return 0;
7646 }
7647 return 1; /* Normal return. */
7648 }
7649 \f
7650 /* md_estimate_size_before_relax()
7651
7652 Called just before relax() for rs_machine_dependent frags. The x86
7653 assembler uses these frags to handle variable size jump
7654 instructions.
7655
7656 Any symbol that is now undefined will not become defined.
7657 Return the correct fr_subtype in the frag.
7658 Return the initial "guess for variable size of frag" to caller.
7659 The guess is actually the growth beyond the fixed part. Whatever
7660 we do to grow the fixed or variable part contributes to our
7661 returned value. */
7662
7663 int
7664 md_estimate_size_before_relax (fragS *fragP, segT segment)
7665 {
7666 /* We've already got fragP->fr_subtype right; all we have to do is
7667 check for un-relaxable symbols. On an ELF system, we can't relax
7668 an externally visible symbol, because it may be overridden by a
7669 shared library. */
7670 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7671 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7672 || (IS_ELF
7673 && (S_IS_EXTERNAL (fragP->fr_symbol)
7674 || S_IS_WEAK (fragP->fr_symbol)
7675 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7676 & BSF_GNU_INDIRECT_FUNCTION))))
7677 #endif
7678 #if defined (OBJ_COFF) && defined (TE_PE)
7679 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7680 && S_IS_WEAK (fragP->fr_symbol))
7681 #endif
7682 )
7683 {
7684 /* Symbol is undefined in this segment, or we need to keep a
7685 reloc so that weak symbols can be overridden. */
7686 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7687 enum bfd_reloc_code_real reloc_type;
7688 unsigned char *opcode;
7689 int old_fr_fix;
7690
7691 if (fragP->fr_var != NO_RELOC)
7692 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7693 else if (size == 2)
7694 reloc_type = BFD_RELOC_16_PCREL;
7695 else
7696 reloc_type = BFD_RELOC_32_PCREL;
7697
7698 old_fr_fix = fragP->fr_fix;
7699 opcode = (unsigned char *) fragP->fr_opcode;
7700
7701 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7702 {
7703 case UNCOND_JUMP:
7704 /* Make jmp (0xeb) a (d)word displacement jump. */
7705 opcode[0] = 0xe9;
7706 fragP->fr_fix += size;
7707 fix_new (fragP, old_fr_fix, size,
7708 fragP->fr_symbol,
7709 fragP->fr_offset, 1,
7710 reloc_type);
7711 break;
7712
7713 case COND_JUMP86:
7714 if (size == 2
7715 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7716 {
7717 /* Negate the condition, and branch past an
7718 unconditional jump. */
7719 opcode[0] ^= 1;
7720 opcode[1] = 3;
7721 /* Insert an unconditional jump. */
7722 opcode[2] = 0xe9;
7723 /* We added two extra opcode bytes, and have a two byte
7724 offset. */
7725 fragP->fr_fix += 2 + 2;
7726 fix_new (fragP, old_fr_fix + 2, 2,
7727 fragP->fr_symbol,
7728 fragP->fr_offset, 1,
7729 reloc_type);
7730 break;
7731 }
7732 /* Fall through. */
7733
7734 case COND_JUMP:
7735 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7736 {
7737 fixS *fixP;
7738
7739 fragP->fr_fix += 1;
7740 fixP = fix_new (fragP, old_fr_fix, 1,
7741 fragP->fr_symbol,
7742 fragP->fr_offset, 1,
7743 BFD_RELOC_8_PCREL);
7744 fixP->fx_signed = 1;
7745 break;
7746 }
7747
7748 /* This changes the byte-displacement jump 0x7N
7749 to the (d)word-displacement jump 0x0f,0x8N. */
7750 opcode[1] = opcode[0] + 0x10;
7751 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7752 /* We've added an opcode byte. */
7753 fragP->fr_fix += 1 + size;
7754 fix_new (fragP, old_fr_fix + 1, size,
7755 fragP->fr_symbol,
7756 fragP->fr_offset, 1,
7757 reloc_type);
7758 break;
7759
7760 default:
7761 BAD_CASE (fragP->fr_subtype);
7762 break;
7763 }
7764 frag_wane (fragP);
7765 return fragP->fr_fix - old_fr_fix;
7766 }
7767
7768 /* Guess size depending on current relax state. Initially the relax
7769 state will correspond to a short jump and we return 1, because
7770 the variable part of the frag (the branch offset) is one byte
7771 long. However, we can relax a section more than once and in that
7772 case we must either set fr_subtype back to the unrelaxed state,
7773 or return the value for the appropriate branch. */
7774 return md_relax_table[fragP->fr_subtype].rlx_length;
7775 }
7776
7777 /* Called after relax() is finished.
7778
7779 In: Address of frag.
7780 fr_type == rs_machine_dependent.
7781 fr_subtype is what the address relaxed to.
7782
7783 Out: Any fixSs and constants are set up.
7784 Caller will turn frag into a ".space 0". */
7785
7786 void
7787 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7788 fragS *fragP)
7789 {
7790 unsigned char *opcode;
7791 unsigned char *where_to_put_displacement = NULL;
7792 offsetT target_address;
7793 offsetT opcode_address;
7794 unsigned int extension = 0;
7795 offsetT displacement_from_opcode_start;
7796
7797 opcode = (unsigned char *) fragP->fr_opcode;
7798
7799 /* Address we want to reach in file space. */
7800 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7801
7802 /* Address opcode resides at in file space. */
7803 opcode_address = fragP->fr_address + fragP->fr_fix;
7804
7805 /* Displacement from opcode start to fill into instruction. */
7806 displacement_from_opcode_start = target_address - opcode_address;
7807
7808 if ((fragP->fr_subtype & BIG) == 0)
7809 {
7810 /* Don't have to change opcode. */
7811 extension = 1; /* 1 opcode + 1 displacement */
7812 where_to_put_displacement = &opcode[1];
7813 }
7814 else
7815 {
7816 if (no_cond_jump_promotion
7817 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7818 as_warn_where (fragP->fr_file, fragP->fr_line,
7819 _("long jump required"));
7820
7821 switch (fragP->fr_subtype)
7822 {
7823 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7824 extension = 4; /* 1 opcode + 4 displacement */
7825 opcode[0] = 0xe9;
7826 where_to_put_displacement = &opcode[1];
7827 break;
7828
7829 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7830 extension = 2; /* 1 opcode + 2 displacement */
7831 opcode[0] = 0xe9;
7832 where_to_put_displacement = &opcode[1];
7833 break;
7834
7835 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7836 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7837 extension = 5; /* 2 opcode + 4 displacement */
7838 opcode[1] = opcode[0] + 0x10;
7839 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7840 where_to_put_displacement = &opcode[2];
7841 break;
7842
7843 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7844 extension = 3; /* 2 opcode + 2 displacement */
7845 opcode[1] = opcode[0] + 0x10;
7846 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7847 where_to_put_displacement = &opcode[2];
7848 break;
7849
7850 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7851 extension = 4;
7852 opcode[0] ^= 1;
7853 opcode[1] = 3;
7854 opcode[2] = 0xe9;
7855 where_to_put_displacement = &opcode[3];
7856 break;
7857
7858 default:
7859 BAD_CASE (fragP->fr_subtype);
7860 break;
7861 }
7862 }
7863
7864 /* If size if less then four we are sure that the operand fits,
7865 but if it's 4, then it could be that the displacement is larger
7866 then -/+ 2GB. */
7867 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7868 && object_64bit
7869 && ((addressT) (displacement_from_opcode_start - extension
7870 + ((addressT) 1 << 31))
7871 > (((addressT) 2 << 31) - 1)))
7872 {
7873 as_bad_where (fragP->fr_file, fragP->fr_line,
7874 _("jump target out of range"));
7875 /* Make us emit 0. */
7876 displacement_from_opcode_start = extension;
7877 }
7878 /* Now put displacement after opcode. */
7879 md_number_to_chars ((char *) where_to_put_displacement,
7880 (valueT) (displacement_from_opcode_start - extension),
7881 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7882 fragP->fr_fix += extension;
7883 }
7884 \f
7885 /* Apply a fixup (fixP) to segment data, once it has been determined
7886 by our caller that we have all the info we need to fix it up.
7887
7888 Parameter valP is the pointer to the value of the bits.
7889
7890 On the 386, immediates, displacements, and data pointers are all in
7891 the same (little-endian) format, so we don't need to care about which
7892 we are handling. */
7893
7894 void
7895 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7896 {
7897 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7898 valueT value = *valP;
7899
7900 #if !defined (TE_Mach)
7901 if (fixP->fx_pcrel)
7902 {
7903 switch (fixP->fx_r_type)
7904 {
7905 default:
7906 break;
7907
7908 case BFD_RELOC_64:
7909 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7910 break;
7911 case BFD_RELOC_32:
7912 case BFD_RELOC_X86_64_32S:
7913 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7914 break;
7915 case BFD_RELOC_16:
7916 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7917 break;
7918 case BFD_RELOC_8:
7919 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7920 break;
7921 }
7922 }
7923
7924 if (fixP->fx_addsy != NULL
7925 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7926 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7927 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7928 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7929 && !use_rela_relocations)
7930 {
7931 /* This is a hack. There should be a better way to handle this.
7932 This covers for the fact that bfd_install_relocation will
7933 subtract the current location (for partial_inplace, PC relative
7934 relocations); see more below. */
7935 #ifndef OBJ_AOUT
7936 if (IS_ELF
7937 #ifdef TE_PE
7938 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7939 #endif
7940 )
7941 value += fixP->fx_where + fixP->fx_frag->fr_address;
7942 #endif
7943 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7944 if (IS_ELF)
7945 {
7946 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7947
7948 if ((sym_seg == seg
7949 || (symbol_section_p (fixP->fx_addsy)
7950 && sym_seg != absolute_section))
7951 && !generic_force_reloc (fixP))
7952 {
7953 /* Yes, we add the values in twice. This is because
7954 bfd_install_relocation subtracts them out again. I think
7955 bfd_install_relocation is broken, but I don't dare change
7956 it. FIXME. */
7957 value += fixP->fx_where + fixP->fx_frag->fr_address;
7958 }
7959 }
7960 #endif
7961 #if defined (OBJ_COFF) && defined (TE_PE)
7962 /* For some reason, the PE format does not store a
7963 section address offset for a PC relative symbol. */
7964 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7965 || S_IS_WEAK (fixP->fx_addsy))
7966 value += md_pcrel_from (fixP);
7967 #endif
7968 }
7969 #if defined (OBJ_COFF) && defined (TE_PE)
7970 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7971 {
7972 value -= S_GET_VALUE (fixP->fx_addsy);
7973 }
7974 #endif
7975
7976 /* Fix a few things - the dynamic linker expects certain values here,
7977 and we must not disappoint it. */
7978 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7979 if (IS_ELF && fixP->fx_addsy)
7980 switch (fixP->fx_r_type)
7981 {
7982 case BFD_RELOC_386_PLT32:
7983 case BFD_RELOC_X86_64_PLT32:
7984 /* Make the jump instruction point to the address of the operand. At
7985 runtime we merely add the offset to the actual PLT entry. */
7986 value = -4;
7987 break;
7988
7989 case BFD_RELOC_386_TLS_GD:
7990 case BFD_RELOC_386_TLS_LDM:
7991 case BFD_RELOC_386_TLS_IE_32:
7992 case BFD_RELOC_386_TLS_IE:
7993 case BFD_RELOC_386_TLS_GOTIE:
7994 case BFD_RELOC_386_TLS_GOTDESC:
7995 case BFD_RELOC_X86_64_TLSGD:
7996 case BFD_RELOC_X86_64_TLSLD:
7997 case BFD_RELOC_X86_64_GOTTPOFF:
7998 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7999 value = 0; /* Fully resolved at runtime. No addend. */
8000 /* Fallthrough */
8001 case BFD_RELOC_386_TLS_LE:
8002 case BFD_RELOC_386_TLS_LDO_32:
8003 case BFD_RELOC_386_TLS_LE_32:
8004 case BFD_RELOC_X86_64_DTPOFF32:
8005 case BFD_RELOC_X86_64_DTPOFF64:
8006 case BFD_RELOC_X86_64_TPOFF32:
8007 case BFD_RELOC_X86_64_TPOFF64:
8008 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8009 break;
8010
8011 case BFD_RELOC_386_TLS_DESC_CALL:
8012 case BFD_RELOC_X86_64_TLSDESC_CALL:
8013 value = 0; /* Fully resolved at runtime. No addend. */
8014 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8015 fixP->fx_done = 0;
8016 return;
8017
8018 case BFD_RELOC_386_GOT32:
8019 case BFD_RELOC_X86_64_GOT32:
8020 value = 0; /* Fully resolved at runtime. No addend. */
8021 break;
8022
8023 case BFD_RELOC_VTABLE_INHERIT:
8024 case BFD_RELOC_VTABLE_ENTRY:
8025 fixP->fx_done = 0;
8026 return;
8027
8028 default:
8029 break;
8030 }
8031 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8032 *valP = value;
8033 #endif /* !defined (TE_Mach) */
8034
8035 /* Are we finished with this relocation now? */
8036 if (fixP->fx_addsy == NULL)
8037 fixP->fx_done = 1;
8038 #if defined (OBJ_COFF) && defined (TE_PE)
8039 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8040 {
8041 fixP->fx_done = 0;
8042 /* Remember value for tc_gen_reloc. */
8043 fixP->fx_addnumber = value;
8044 /* Clear out the frag for now. */
8045 value = 0;
8046 }
8047 #endif
8048 else if (use_rela_relocations)
8049 {
8050 fixP->fx_no_overflow = 1;
8051 /* Remember value for tc_gen_reloc. */
8052 fixP->fx_addnumber = value;
8053 value = 0;
8054 }
8055
8056 md_number_to_chars (p, value, fixP->fx_size);
8057 }
8058 \f
8059 char *
8060 md_atof (int type, char *litP, int *sizeP)
8061 {
8062 /* This outputs the LITTLENUMs in REVERSE order;
8063 in accord with the bigendian 386. */
8064 return ieee_md_atof (type, litP, sizeP, FALSE);
8065 }
8066 \f
8067 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8068
8069 static char *
8070 output_invalid (int c)
8071 {
8072 if (ISPRINT (c))
8073 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8074 "'%c'", c);
8075 else
8076 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8077 "(0x%x)", (unsigned char) c);
8078 return output_invalid_buf;
8079 }
8080
8081 /* REG_STRING starts *before* REGISTER_PREFIX. */
8082
8083 static const reg_entry *
8084 parse_real_register (char *reg_string, char **end_op)
8085 {
8086 char *s = reg_string;
8087 char *p;
8088 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8089 const reg_entry *r;
8090
8091 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8092 if (*s == REGISTER_PREFIX)
8093 ++s;
8094
8095 if (is_space_char (*s))
8096 ++s;
8097
8098 p = reg_name_given;
8099 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8100 {
8101 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8102 return (const reg_entry *) NULL;
8103 s++;
8104 }
8105
8106 /* For naked regs, make sure that we are not dealing with an identifier.
8107 This prevents confusing an identifier like `eax_var' with register
8108 `eax'. */
8109 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8110 return (const reg_entry *) NULL;
8111
8112 *end_op = s;
8113
8114 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8115
8116 /* Handle floating point regs, allowing spaces in the (i) part. */
8117 if (r == i386_regtab /* %st is first entry of table */)
8118 {
8119 if (is_space_char (*s))
8120 ++s;
8121 if (*s == '(')
8122 {
8123 ++s;
8124 if (is_space_char (*s))
8125 ++s;
8126 if (*s >= '0' && *s <= '7')
8127 {
8128 int fpr = *s - '0';
8129 ++s;
8130 if (is_space_char (*s))
8131 ++s;
8132 if (*s == ')')
8133 {
8134 *end_op = s + 1;
8135 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8136 know (r);
8137 return r + fpr;
8138 }
8139 }
8140 /* We have "%st(" then garbage. */
8141 return (const reg_entry *) NULL;
8142 }
8143 }
8144
8145 if (r == NULL || allow_pseudo_reg)
8146 return r;
8147
8148 if (operand_type_all_zero (&r->reg_type))
8149 return (const reg_entry *) NULL;
8150
8151 if ((r->reg_type.bitfield.reg32
8152 || r->reg_type.bitfield.sreg3
8153 || r->reg_type.bitfield.control
8154 || r->reg_type.bitfield.debug
8155 || r->reg_type.bitfield.test)
8156 && !cpu_arch_flags.bitfield.cpui386)
8157 return (const reg_entry *) NULL;
8158
8159 if (r->reg_type.bitfield.floatreg
8160 && !cpu_arch_flags.bitfield.cpu8087
8161 && !cpu_arch_flags.bitfield.cpu287
8162 && !cpu_arch_flags.bitfield.cpu387)
8163 return (const reg_entry *) NULL;
8164
8165 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8166 return (const reg_entry *) NULL;
8167
8168 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8169 return (const reg_entry *) NULL;
8170
8171 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8172 return (const reg_entry *) NULL;
8173
8174 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8175 if (!allow_index_reg
8176 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8177 return (const reg_entry *) NULL;
8178
8179 if (((r->reg_flags & (RegRex64 | RegRex))
8180 || r->reg_type.bitfield.reg64)
8181 && (!cpu_arch_flags.bitfield.cpulm
8182 || !operand_type_equal (&r->reg_type, &control))
8183 && flag_code != CODE_64BIT)
8184 return (const reg_entry *) NULL;
8185
8186 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8187 return (const reg_entry *) NULL;
8188
8189 return r;
8190 }
8191
8192 /* REG_STRING starts *before* REGISTER_PREFIX. */
8193
8194 static const reg_entry *
8195 parse_register (char *reg_string, char **end_op)
8196 {
8197 const reg_entry *r;
8198
8199 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8200 r = parse_real_register (reg_string, end_op);
8201 else
8202 r = NULL;
8203 if (!r)
8204 {
8205 char *save = input_line_pointer;
8206 char c;
8207 symbolS *symbolP;
8208
8209 input_line_pointer = reg_string;
8210 c = get_symbol_end ();
8211 symbolP = symbol_find (reg_string);
8212 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8213 {
8214 const expressionS *e = symbol_get_value_expression (symbolP);
8215
8216 know (e->X_op == O_register);
8217 know (e->X_add_number >= 0
8218 && (valueT) e->X_add_number < i386_regtab_size);
8219 r = i386_regtab + e->X_add_number;
8220 *end_op = input_line_pointer;
8221 }
8222 *input_line_pointer = c;
8223 input_line_pointer = save;
8224 }
8225 return r;
8226 }
8227
8228 int
8229 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8230 {
8231 const reg_entry *r;
8232 char *end = input_line_pointer;
8233
8234 *end = *nextcharP;
8235 r = parse_register (name, &input_line_pointer);
8236 if (r && end <= input_line_pointer)
8237 {
8238 *nextcharP = *input_line_pointer;
8239 *input_line_pointer = 0;
8240 e->X_op = O_register;
8241 e->X_add_number = r - i386_regtab;
8242 return 1;
8243 }
8244 input_line_pointer = end;
8245 *end = 0;
8246 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8247 }
8248
8249 void
8250 md_operand (expressionS *e)
8251 {
8252 char *end;
8253 const reg_entry *r;
8254
8255 switch (*input_line_pointer)
8256 {
8257 case REGISTER_PREFIX:
8258 r = parse_real_register (input_line_pointer, &end);
8259 if (r)
8260 {
8261 e->X_op = O_register;
8262 e->X_add_number = r - i386_regtab;
8263 input_line_pointer = end;
8264 }
8265 break;
8266
8267 case '[':
8268 gas_assert (intel_syntax);
8269 end = input_line_pointer++;
8270 expression (e);
8271 if (*input_line_pointer == ']')
8272 {
8273 ++input_line_pointer;
8274 e->X_op_symbol = make_expr_symbol (e);
8275 e->X_add_symbol = NULL;
8276 e->X_add_number = 0;
8277 e->X_op = O_index;
8278 }
8279 else
8280 {
8281 e->X_op = O_absent;
8282 input_line_pointer = end;
8283 }
8284 break;
8285 }
8286 }
8287
8288 \f
8289 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8290 const char *md_shortopts = "kVQ:sqn";
8291 #else
8292 const char *md_shortopts = "qn";
8293 #endif
8294
8295 #define OPTION_32 (OPTION_MD_BASE + 0)
8296 #define OPTION_64 (OPTION_MD_BASE + 1)
8297 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8298 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8299 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8300 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8301 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8302 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8303 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8304 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8305 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8306 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8307 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8308 #define OPTION_X32 (OPTION_MD_BASE + 13)
8309
8310 struct option md_longopts[] =
8311 {
8312 {"32", no_argument, NULL, OPTION_32},
8313 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8314 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8315 {"64", no_argument, NULL, OPTION_64},
8316 #endif
8317 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8318 {"x32", no_argument, NULL, OPTION_X32},
8319 #endif
8320 {"divide", no_argument, NULL, OPTION_DIVIDE},
8321 {"march", required_argument, NULL, OPTION_MARCH},
8322 {"mtune", required_argument, NULL, OPTION_MTUNE},
8323 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8324 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8325 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8326 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8327 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8328 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8329 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8330 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8331 {NULL, no_argument, NULL, 0}
8332 };
8333 size_t md_longopts_size = sizeof (md_longopts);
8334
8335 int
8336 md_parse_option (int c, char *arg)
8337 {
8338 unsigned int j;
8339 char *arch, *next;
8340
8341 switch (c)
8342 {
8343 case 'n':
8344 optimize_align_code = 0;
8345 break;
8346
8347 case 'q':
8348 quiet_warnings = 1;
8349 break;
8350
8351 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8352 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8353 should be emitted or not. FIXME: Not implemented. */
8354 case 'Q':
8355 break;
8356
8357 /* -V: SVR4 argument to print version ID. */
8358 case 'V':
8359 print_version_id ();
8360 break;
8361
8362 /* -k: Ignore for FreeBSD compatibility. */
8363 case 'k':
8364 break;
8365
8366 case 's':
8367 /* -s: On i386 Solaris, this tells the native assembler to use
8368 .stab instead of .stab.excl. We always use .stab anyhow. */
8369 break;
8370 #endif
8371 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8372 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8373 case OPTION_64:
8374 {
8375 const char **list, **l;
8376
8377 list = bfd_target_list ();
8378 for (l = list; *l != NULL; l++)
8379 if (CONST_STRNEQ (*l, "elf64-x86-64")
8380 || strcmp (*l, "coff-x86-64") == 0
8381 || strcmp (*l, "pe-x86-64") == 0
8382 || strcmp (*l, "pei-x86-64") == 0
8383 || strcmp (*l, "mach-o-x86-64") == 0)
8384 {
8385 default_arch = "x86_64";
8386 break;
8387 }
8388 if (*l == NULL)
8389 as_fatal (_("no compiled in support for x86_64"));
8390 free (list);
8391 }
8392 break;
8393 #endif
8394
8395 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8396 case OPTION_X32:
8397 if (IS_ELF)
8398 {
8399 const char **list, **l;
8400
8401 list = bfd_target_list ();
8402 for (l = list; *l != NULL; l++)
8403 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8404 {
8405 default_arch = "x86_64:32";
8406 break;
8407 }
8408 if (*l == NULL)
8409 as_fatal (_("no compiled in support for 32bit x86_64"));
8410 free (list);
8411 }
8412 else
8413 as_fatal (_("32bit x86_64 is only supported for ELF"));
8414 break;
8415 #endif
8416
8417 case OPTION_32:
8418 default_arch = "i386";
8419 break;
8420
8421 case OPTION_DIVIDE:
8422 #ifdef SVR4_COMMENT_CHARS
8423 {
8424 char *n, *t;
8425 const char *s;
8426
8427 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8428 t = n;
8429 for (s = i386_comment_chars; *s != '\0'; s++)
8430 if (*s != '/')
8431 *t++ = *s;
8432 *t = '\0';
8433 i386_comment_chars = n;
8434 }
8435 #endif
8436 break;
8437
8438 case OPTION_MARCH:
8439 arch = xstrdup (arg);
8440 do
8441 {
8442 if (*arch == '.')
8443 as_fatal (_("invalid -march= option: `%s'"), arg);
8444 next = strchr (arch, '+');
8445 if (next)
8446 *next++ = '\0';
8447 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8448 {
8449 if (strcmp (arch, cpu_arch [j].name) == 0)
8450 {
8451 /* Processor. */
8452 if (! cpu_arch[j].flags.bitfield.cpui386)
8453 continue;
8454
8455 cpu_arch_name = cpu_arch[j].name;
8456 cpu_sub_arch_name = NULL;
8457 cpu_arch_flags = cpu_arch[j].flags;
8458 cpu_arch_isa = cpu_arch[j].type;
8459 cpu_arch_isa_flags = cpu_arch[j].flags;
8460 if (!cpu_arch_tune_set)
8461 {
8462 cpu_arch_tune = cpu_arch_isa;
8463 cpu_arch_tune_flags = cpu_arch_isa_flags;
8464 }
8465 break;
8466 }
8467 else if (*cpu_arch [j].name == '.'
8468 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8469 {
8470 /* ISA entension. */
8471 i386_cpu_flags flags;
8472
8473 if (!cpu_arch[j].negated)
8474 flags = cpu_flags_or (cpu_arch_flags,
8475 cpu_arch[j].flags);
8476 else
8477 flags = cpu_flags_and_not (cpu_arch_flags,
8478 cpu_arch[j].flags);
8479 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8480 {
8481 if (cpu_sub_arch_name)
8482 {
8483 char *name = cpu_sub_arch_name;
8484 cpu_sub_arch_name = concat (name,
8485 cpu_arch[j].name,
8486 (const char *) NULL);
8487 free (name);
8488 }
8489 else
8490 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8491 cpu_arch_flags = flags;
8492 cpu_arch_isa_flags = flags;
8493 }
8494 break;
8495 }
8496 }
8497
8498 if (j >= ARRAY_SIZE (cpu_arch))
8499 as_fatal (_("invalid -march= option: `%s'"), arg);
8500
8501 arch = next;
8502 }
8503 while (next != NULL );
8504 break;
8505
8506 case OPTION_MTUNE:
8507 if (*arg == '.')
8508 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8509 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8510 {
8511 if (strcmp (arg, cpu_arch [j].name) == 0)
8512 {
8513 cpu_arch_tune_set = 1;
8514 cpu_arch_tune = cpu_arch [j].type;
8515 cpu_arch_tune_flags = cpu_arch[j].flags;
8516 break;
8517 }
8518 }
8519 if (j >= ARRAY_SIZE (cpu_arch))
8520 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8521 break;
8522
8523 case OPTION_MMNEMONIC:
8524 if (strcasecmp (arg, "att") == 0)
8525 intel_mnemonic = 0;
8526 else if (strcasecmp (arg, "intel") == 0)
8527 intel_mnemonic = 1;
8528 else
8529 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8530 break;
8531
8532 case OPTION_MSYNTAX:
8533 if (strcasecmp (arg, "att") == 0)
8534 intel_syntax = 0;
8535 else if (strcasecmp (arg, "intel") == 0)
8536 intel_syntax = 1;
8537 else
8538 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8539 break;
8540
8541 case OPTION_MINDEX_REG:
8542 allow_index_reg = 1;
8543 break;
8544
8545 case OPTION_MNAKED_REG:
8546 allow_naked_reg = 1;
8547 break;
8548
8549 case OPTION_MOLD_GCC:
8550 old_gcc = 1;
8551 break;
8552
8553 case OPTION_MSSE2AVX:
8554 sse2avx = 1;
8555 break;
8556
8557 case OPTION_MSSE_CHECK:
8558 if (strcasecmp (arg, "error") == 0)
8559 sse_check = sse_check_error;
8560 else if (strcasecmp (arg, "warning") == 0)
8561 sse_check = sse_check_warning;
8562 else if (strcasecmp (arg, "none") == 0)
8563 sse_check = sse_check_none;
8564 else
8565 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8566 break;
8567
8568 case OPTION_MAVXSCALAR:
8569 if (strcasecmp (arg, "128") == 0)
8570 avxscalar = vex128;
8571 else if (strcasecmp (arg, "256") == 0)
8572 avxscalar = vex256;
8573 else
8574 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8575 break;
8576
8577 default:
8578 return 0;
8579 }
8580 return 1;
8581 }
8582
8583 #define MESSAGE_TEMPLATE \
8584 " "
8585
8586 static void
8587 show_arch (FILE *stream, int ext, int check)
8588 {
8589 static char message[] = MESSAGE_TEMPLATE;
8590 char *start = message + 27;
8591 char *p;
8592 int size = sizeof (MESSAGE_TEMPLATE);
8593 int left;
8594 const char *name;
8595 int len;
8596 unsigned int j;
8597
8598 p = start;
8599 left = size - (start - message);
8600 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8601 {
8602 /* Should it be skipped? */
8603 if (cpu_arch [j].skip)
8604 continue;
8605
8606 name = cpu_arch [j].name;
8607 len = cpu_arch [j].len;
8608 if (*name == '.')
8609 {
8610 /* It is an extension. Skip if we aren't asked to show it. */
8611 if (ext)
8612 {
8613 name++;
8614 len--;
8615 }
8616 else
8617 continue;
8618 }
8619 else if (ext)
8620 {
8621 /* It is an processor. Skip if we show only extension. */
8622 continue;
8623 }
8624 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8625 {
8626 /* It is an impossible processor - skip. */
8627 continue;
8628 }
8629
8630 /* Reserve 2 spaces for ", " or ",\0" */
8631 left -= len + 2;
8632
8633 /* Check if there is any room. */
8634 if (left >= 0)
8635 {
8636 if (p != start)
8637 {
8638 *p++ = ',';
8639 *p++ = ' ';
8640 }
8641 p = mempcpy (p, name, len);
8642 }
8643 else
8644 {
8645 /* Output the current message now and start a new one. */
8646 *p++ = ',';
8647 *p = '\0';
8648 fprintf (stream, "%s\n", message);
8649 p = start;
8650 left = size - (start - message) - len - 2;
8651
8652 gas_assert (left >= 0);
8653
8654 p = mempcpy (p, name, len);
8655 }
8656 }
8657
8658 *p = '\0';
8659 fprintf (stream, "%s\n", message);
8660 }
8661
8662 void
8663 md_show_usage (FILE *stream)
8664 {
8665 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8666 fprintf (stream, _("\
8667 -Q ignored\n\
8668 -V print assembler version number\n\
8669 -k ignored\n"));
8670 #endif
8671 fprintf (stream, _("\
8672 -n Do not optimize code alignment\n\
8673 -q quieten some warnings\n"));
8674 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8675 fprintf (stream, _("\
8676 -s ignored\n"));
8677 #endif
8678 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8679 || defined (TE_PE) || defined (TE_PEP))
8680 fprintf (stream, _("\
8681 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8682 #endif
8683 #ifdef SVR4_COMMENT_CHARS
8684 fprintf (stream, _("\
8685 --divide do not treat `/' as a comment character\n"));
8686 #else
8687 fprintf (stream, _("\
8688 --divide ignored\n"));
8689 #endif
8690 fprintf (stream, _("\
8691 -march=CPU[,+EXTENSION...]\n\
8692 generate code for CPU and EXTENSION, CPU is one of:\n"));
8693 show_arch (stream, 0, 1);
8694 fprintf (stream, _("\
8695 EXTENSION is combination of:\n"));
8696 show_arch (stream, 1, 0);
8697 fprintf (stream, _("\
8698 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8699 show_arch (stream, 0, 0);
8700 fprintf (stream, _("\
8701 -msse2avx encode SSE instructions with VEX prefix\n"));
8702 fprintf (stream, _("\
8703 -msse-check=[none|error|warning]\n\
8704 check SSE instructions\n"));
8705 fprintf (stream, _("\
8706 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8707 length\n"));
8708 fprintf (stream, _("\
8709 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8710 fprintf (stream, _("\
8711 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8712 fprintf (stream, _("\
8713 -mindex-reg support pseudo index registers\n"));
8714 fprintf (stream, _("\
8715 -mnaked-reg don't require `%%' prefix for registers\n"));
8716 fprintf (stream, _("\
8717 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8718 }
8719
8720 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8721 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8722 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8723
8724 /* Pick the target format to use. */
8725
8726 const char *
8727 i386_target_format (void)
8728 {
8729 if (!strncmp (default_arch, "x86_64", 6))
8730 {
8731 update_code_flag (CODE_64BIT, 1);
8732 if (default_arch[6] == '\0')
8733 x86_elf_abi = X86_64_ABI;
8734 else
8735 x86_elf_abi = X86_64_X32_ABI;
8736 }
8737 else if (!strcmp (default_arch, "i386"))
8738 update_code_flag (CODE_32BIT, 1);
8739 else
8740 as_fatal (_("unknown architecture"));
8741
8742 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8743 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8744 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8745 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8746
8747 switch (OUTPUT_FLAVOR)
8748 {
8749 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8750 case bfd_target_aout_flavour:
8751 return AOUT_TARGET_FORMAT;
8752 #endif
8753 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8754 # if defined (TE_PE) || defined (TE_PEP)
8755 case bfd_target_coff_flavour:
8756 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8757 # elif defined (TE_GO32)
8758 case bfd_target_coff_flavour:
8759 return "coff-go32";
8760 # else
8761 case bfd_target_coff_flavour:
8762 return "coff-i386";
8763 # endif
8764 #endif
8765 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8766 case bfd_target_elf_flavour:
8767 {
8768 const char *format;
8769
8770 switch (x86_elf_abi)
8771 {
8772 default:
8773 format = ELF_TARGET_FORMAT;
8774 break;
8775 case X86_64_ABI:
8776 use_rela_relocations = 1;
8777 object_64bit = 1;
8778 format = ELF_TARGET_FORMAT64;
8779 break;
8780 case X86_64_X32_ABI:
8781 use_rela_relocations = 1;
8782 object_64bit = 1;
8783 disallow_64bit_reloc = 1;
8784 format = ELF_TARGET_FORMAT32;
8785 break;
8786 }
8787 if (cpu_arch_isa == PROCESSOR_L1OM)
8788 {
8789 if (x86_elf_abi != X86_64_ABI)
8790 as_fatal (_("Intel L1OM is 64bit only"));
8791 return ELF_TARGET_L1OM_FORMAT;
8792 }
8793 if (cpu_arch_isa == PROCESSOR_K1OM)
8794 {
8795 if (x86_elf_abi != X86_64_ABI)
8796 as_fatal (_("Intel K1OM is 64bit only"));
8797 return ELF_TARGET_K1OM_FORMAT;
8798 }
8799 else
8800 return format;
8801 }
8802 #endif
8803 #if defined (OBJ_MACH_O)
8804 case bfd_target_mach_o_flavour:
8805 if (flag_code == CODE_64BIT)
8806 {
8807 use_rela_relocations = 1;
8808 object_64bit = 1;
8809 return "mach-o-x86-64";
8810 }
8811 else
8812 return "mach-o-i386";
8813 #endif
8814 default:
8815 abort ();
8816 return NULL;
8817 }
8818 }
8819
8820 #endif /* OBJ_MAYBE_ more than one */
8821
8822 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8823 void
8824 i386_elf_emit_arch_note (void)
8825 {
8826 if (IS_ELF && cpu_arch_name != NULL)
8827 {
8828 char *p;
8829 asection *seg = now_seg;
8830 subsegT subseg = now_subseg;
8831 Elf_Internal_Note i_note;
8832 Elf_External_Note e_note;
8833 asection *note_secp;
8834 int len;
8835
8836 /* Create the .note section. */
8837 note_secp = subseg_new (".note", 0);
8838 bfd_set_section_flags (stdoutput,
8839 note_secp,
8840 SEC_HAS_CONTENTS | SEC_READONLY);
8841
8842 /* Process the arch string. */
8843 len = strlen (cpu_arch_name);
8844
8845 i_note.namesz = len + 1;
8846 i_note.descsz = 0;
8847 i_note.type = NT_ARCH;
8848 p = frag_more (sizeof (e_note.namesz));
8849 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8850 p = frag_more (sizeof (e_note.descsz));
8851 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8852 p = frag_more (sizeof (e_note.type));
8853 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8854 p = frag_more (len + 1);
8855 strcpy (p, cpu_arch_name);
8856
8857 frag_align (2, 0, 0);
8858
8859 subseg_set (seg, subseg);
8860 }
8861 }
8862 #endif
8863 \f
8864 symbolS *
8865 md_undefined_symbol (char *name)
8866 {
8867 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8868 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8869 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8870 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8871 {
8872 if (!GOT_symbol)
8873 {
8874 if (symbol_find (name))
8875 as_bad (_("GOT already in symbol table"));
8876 GOT_symbol = symbol_new (name, undefined_section,
8877 (valueT) 0, &zero_address_frag);
8878 };
8879 return GOT_symbol;
8880 }
8881 return 0;
8882 }
8883
8884 /* Round up a section size to the appropriate boundary. */
8885
8886 valueT
8887 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8888 {
8889 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8890 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8891 {
8892 /* For a.out, force the section size to be aligned. If we don't do
8893 this, BFD will align it for us, but it will not write out the
8894 final bytes of the section. This may be a bug in BFD, but it is
8895 easier to fix it here since that is how the other a.out targets
8896 work. */
8897 int align;
8898
8899 align = bfd_get_section_alignment (stdoutput, segment);
8900 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8901 }
8902 #endif
8903
8904 return size;
8905 }
8906
8907 /* On the i386, PC-relative offsets are relative to the start of the
8908 next instruction. That is, the address of the offset, plus its
8909 size, since the offset is always the last part of the insn. */
8910
8911 long
8912 md_pcrel_from (fixS *fixP)
8913 {
8914 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8915 }
8916
8917 #ifndef I386COFF
8918
8919 static void
8920 s_bss (int ignore ATTRIBUTE_UNUSED)
8921 {
8922 int temp;
8923
8924 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8925 if (IS_ELF)
8926 obj_elf_section_change_hook ();
8927 #endif
8928 temp = get_absolute_expression ();
8929 subseg_set (bss_section, (subsegT) temp);
8930 demand_empty_rest_of_line ();
8931 }
8932
8933 #endif
8934
8935 void
8936 i386_validate_fix (fixS *fixp)
8937 {
8938 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8939 {
8940 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8941 {
8942 if (!object_64bit)
8943 abort ();
8944 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8945 }
8946 else
8947 {
8948 if (!object_64bit)
8949 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8950 else
8951 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8952 }
8953 fixp->fx_subsy = 0;
8954 }
8955 }
8956
8957 arelent *
8958 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
8959 {
8960 arelent *rel;
8961 bfd_reloc_code_real_type code;
8962
8963 switch (fixp->fx_r_type)
8964 {
8965 case BFD_RELOC_X86_64_PLT32:
8966 case BFD_RELOC_X86_64_GOT32:
8967 case BFD_RELOC_X86_64_GOTPCREL:
8968 case BFD_RELOC_386_PLT32:
8969 case BFD_RELOC_386_GOT32:
8970 case BFD_RELOC_386_GOTOFF:
8971 case BFD_RELOC_386_GOTPC:
8972 case BFD_RELOC_386_TLS_GD:
8973 case BFD_RELOC_386_TLS_LDM:
8974 case BFD_RELOC_386_TLS_LDO_32:
8975 case BFD_RELOC_386_TLS_IE_32:
8976 case BFD_RELOC_386_TLS_IE:
8977 case BFD_RELOC_386_TLS_GOTIE:
8978 case BFD_RELOC_386_TLS_LE_32:
8979 case BFD_RELOC_386_TLS_LE:
8980 case BFD_RELOC_386_TLS_GOTDESC:
8981 case BFD_RELOC_386_TLS_DESC_CALL:
8982 case BFD_RELOC_X86_64_TLSGD:
8983 case BFD_RELOC_X86_64_TLSLD:
8984 case BFD_RELOC_X86_64_DTPOFF32:
8985 case BFD_RELOC_X86_64_DTPOFF64:
8986 case BFD_RELOC_X86_64_GOTTPOFF:
8987 case BFD_RELOC_X86_64_TPOFF32:
8988 case BFD_RELOC_X86_64_TPOFF64:
8989 case BFD_RELOC_X86_64_GOTOFF64:
8990 case BFD_RELOC_X86_64_GOTPC32:
8991 case BFD_RELOC_X86_64_GOT64:
8992 case BFD_RELOC_X86_64_GOTPCREL64:
8993 case BFD_RELOC_X86_64_GOTPC64:
8994 case BFD_RELOC_X86_64_GOTPLT64:
8995 case BFD_RELOC_X86_64_PLTOFF64:
8996 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8997 case BFD_RELOC_X86_64_TLSDESC_CALL:
8998 case BFD_RELOC_RVA:
8999 case BFD_RELOC_VTABLE_ENTRY:
9000 case BFD_RELOC_VTABLE_INHERIT:
9001 #ifdef TE_PE
9002 case BFD_RELOC_32_SECREL:
9003 #endif
9004 code = fixp->fx_r_type;
9005 break;
9006 case BFD_RELOC_X86_64_32S:
9007 if (!fixp->fx_pcrel)
9008 {
9009 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9010 code = fixp->fx_r_type;
9011 break;
9012 }
9013 default:
9014 if (fixp->fx_pcrel)
9015 {
9016 switch (fixp->fx_size)
9017 {
9018 default:
9019 as_bad_where (fixp->fx_file, fixp->fx_line,
9020 _("can not do %d byte pc-relative relocation"),
9021 fixp->fx_size);
9022 code = BFD_RELOC_32_PCREL;
9023 break;
9024 case 1: code = BFD_RELOC_8_PCREL; break;
9025 case 2: code = BFD_RELOC_16_PCREL; break;
9026 case 4: code = BFD_RELOC_32_PCREL; break;
9027 #ifdef BFD64
9028 case 8: code = BFD_RELOC_64_PCREL; break;
9029 #endif
9030 }
9031 }
9032 else
9033 {
9034 switch (fixp->fx_size)
9035 {
9036 default:
9037 as_bad_where (fixp->fx_file, fixp->fx_line,
9038 _("can not do %d byte relocation"),
9039 fixp->fx_size);
9040 code = BFD_RELOC_32;
9041 break;
9042 case 1: code = BFD_RELOC_8; break;
9043 case 2: code = BFD_RELOC_16; break;
9044 case 4: code = BFD_RELOC_32; break;
9045 #ifdef BFD64
9046 case 8: code = BFD_RELOC_64; break;
9047 #endif
9048 }
9049 }
9050 break;
9051 }
9052
9053 if ((code == BFD_RELOC_32
9054 || code == BFD_RELOC_32_PCREL
9055 || code == BFD_RELOC_X86_64_32S)
9056 && GOT_symbol
9057 && fixp->fx_addsy == GOT_symbol)
9058 {
9059 if (!object_64bit)
9060 code = BFD_RELOC_386_GOTPC;
9061 else
9062 code = BFD_RELOC_X86_64_GOTPC32;
9063 }
9064 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9065 && GOT_symbol
9066 && fixp->fx_addsy == GOT_symbol)
9067 {
9068 code = BFD_RELOC_X86_64_GOTPC64;
9069 }
9070
9071 rel = (arelent *) xmalloc (sizeof (arelent));
9072 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9073 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9074
9075 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9076
9077 if (!use_rela_relocations)
9078 {
9079 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9080 vtable entry to be used in the relocation's section offset. */
9081 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9082 rel->address = fixp->fx_offset;
9083 #if defined (OBJ_COFF) && defined (TE_PE)
9084 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9085 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9086 else
9087 #endif
9088 rel->addend = 0;
9089 }
9090 /* Use the rela in 64bit mode. */
9091 else
9092 {
9093 if (disallow_64bit_reloc)
9094 switch (code)
9095 {
9096 case BFD_RELOC_X86_64_DTPOFF64:
9097 case BFD_RELOC_X86_64_TPOFF64:
9098 case BFD_RELOC_64_PCREL:
9099 case BFD_RELOC_X86_64_GOTOFF64:
9100 case BFD_RELOC_X86_64_GOT64:
9101 case BFD_RELOC_X86_64_GOTPCREL64:
9102 case BFD_RELOC_X86_64_GOTPC64:
9103 case BFD_RELOC_X86_64_GOTPLT64:
9104 case BFD_RELOC_X86_64_PLTOFF64:
9105 as_bad_where (fixp->fx_file, fixp->fx_line,
9106 _("cannot represent relocation type %s in x32 mode"),
9107 bfd_get_reloc_code_name (code));
9108 break;
9109 default:
9110 break;
9111 }
9112
9113 if (!fixp->fx_pcrel)
9114 rel->addend = fixp->fx_offset;
9115 else
9116 switch (code)
9117 {
9118 case BFD_RELOC_X86_64_PLT32:
9119 case BFD_RELOC_X86_64_GOT32:
9120 case BFD_RELOC_X86_64_GOTPCREL:
9121 case BFD_RELOC_X86_64_TLSGD:
9122 case BFD_RELOC_X86_64_TLSLD:
9123 case BFD_RELOC_X86_64_GOTTPOFF:
9124 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9125 case BFD_RELOC_X86_64_TLSDESC_CALL:
9126 rel->addend = fixp->fx_offset - fixp->fx_size;
9127 break;
9128 default:
9129 rel->addend = (section->vma
9130 - fixp->fx_size
9131 + fixp->fx_addnumber
9132 + md_pcrel_from (fixp));
9133 break;
9134 }
9135 }
9136
9137 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9138 if (rel->howto == NULL)
9139 {
9140 as_bad_where (fixp->fx_file, fixp->fx_line,
9141 _("cannot represent relocation type %s"),
9142 bfd_get_reloc_code_name (code));
9143 /* Set howto to a garbage value so that we can keep going. */
9144 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9145 gas_assert (rel->howto != NULL);
9146 }
9147
9148 return rel;
9149 }
9150
9151 #include "tc-i386-intel.c"
9152
9153 void
9154 tc_x86_parse_to_dw2regnum (expressionS *exp)
9155 {
9156 int saved_naked_reg;
9157 char saved_register_dot;
9158
9159 saved_naked_reg = allow_naked_reg;
9160 allow_naked_reg = 1;
9161 saved_register_dot = register_chars['.'];
9162 register_chars['.'] = '.';
9163 allow_pseudo_reg = 1;
9164 expression_and_evaluate (exp);
9165 allow_pseudo_reg = 0;
9166 register_chars['.'] = saved_register_dot;
9167 allow_naked_reg = saved_naked_reg;
9168
9169 if (exp->X_op == O_register && exp->X_add_number >= 0)
9170 {
9171 if ((addressT) exp->X_add_number < i386_regtab_size)
9172 {
9173 exp->X_op = O_constant;
9174 exp->X_add_number = i386_regtab[exp->X_add_number]
9175 .dw2_regnum[flag_code >> 1];
9176 }
9177 else
9178 exp->X_op = O_illegal;
9179 }
9180 }
9181
9182 void
9183 tc_x86_frame_initial_instructions (void)
9184 {
9185 static unsigned int sp_regno[2];
9186
9187 if (!sp_regno[flag_code >> 1])
9188 {
9189 char *saved_input = input_line_pointer;
9190 char sp[][4] = {"esp", "rsp"};
9191 expressionS exp;
9192
9193 input_line_pointer = sp[flag_code >> 1];
9194 tc_x86_parse_to_dw2regnum (&exp);
9195 gas_assert (exp.X_op == O_constant);
9196 sp_regno[flag_code >> 1] = exp.X_add_number;
9197 input_line_pointer = saved_input;
9198 }
9199
9200 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9201 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9202 }
9203
9204 int
9205 x86_dwarf2_addr_size (void)
9206 {
9207 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9208 if (x86_elf_abi == X86_64_X32_ABI)
9209 return 4;
9210 #endif
9211 return bfd_arch_bits_per_address (stdoutput) / 8;
9212 }
9213
9214 int
9215 i386_elf_section_type (const char *str, size_t len)
9216 {
9217 if (flag_code == CODE_64BIT
9218 && len == sizeof ("unwind") - 1
9219 && strncmp (str, "unwind", 6) == 0)
9220 return SHT_X86_64_UNWIND;
9221
9222 return -1;
9223 }
9224
9225 #ifdef TE_SOLARIS
9226 void
9227 i386_solaris_fix_up_eh_frame (segT sec)
9228 {
9229 if (flag_code == CODE_64BIT)
9230 elf_section_type (sec) = SHT_X86_64_UNWIND;
9231 }
9232 #endif
9233
9234 #ifdef TE_PE
9235 void
9236 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9237 {
9238 expressionS exp;
9239
9240 exp.X_op = O_secrel;
9241 exp.X_add_symbol = symbol;
9242 exp.X_add_number = 0;
9243 emit_expr (&exp, size);
9244 }
9245 #endif
9246
9247 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9248 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9249
9250 bfd_vma
9251 x86_64_section_letter (int letter, char **ptr_msg)
9252 {
9253 if (flag_code == CODE_64BIT)
9254 {
9255 if (letter == 'l')
9256 return SHF_X86_64_LARGE;
9257
9258 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9259 }
9260 else
9261 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9262 return -1;
9263 }
9264
9265 bfd_vma
9266 x86_64_section_word (char *str, size_t len)
9267 {
9268 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9269 return SHF_X86_64_LARGE;
9270
9271 return -1;
9272 }
9273
9274 static void
9275 handle_large_common (int small ATTRIBUTE_UNUSED)
9276 {
9277 if (flag_code != CODE_64BIT)
9278 {
9279 s_comm_internal (0, elf_common_parse);
9280 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9281 }
9282 else
9283 {
9284 static segT lbss_section;
9285 asection *saved_com_section_ptr = elf_com_section_ptr;
9286 asection *saved_bss_section = bss_section;
9287
9288 if (lbss_section == NULL)
9289 {
9290 flagword applicable;
9291 segT seg = now_seg;
9292 subsegT subseg = now_subseg;
9293
9294 /* The .lbss section is for local .largecomm symbols. */
9295 lbss_section = subseg_new (".lbss", 0);
9296 applicable = bfd_applicable_section_flags (stdoutput);
9297 bfd_set_section_flags (stdoutput, lbss_section,
9298 applicable & SEC_ALLOC);
9299 seg_info (lbss_section)->bss = 1;
9300
9301 subseg_set (seg, subseg);
9302 }
9303
9304 elf_com_section_ptr = &_bfd_elf_large_com_section;
9305 bss_section = lbss_section;
9306
9307 s_comm_internal (0, elf_common_parse);
9308
9309 elf_com_section_ptr = saved_com_section_ptr;
9310 bss_section = saved_bss_section;
9311 }
9312 }
9313 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.223567 seconds and 4 git commands to generate.