9e83a4d0a469f8fdf3252b5c9a22cb01b0eb1db3
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define LOCK_PREFIX 5
70 #define REX_PREFIX 6 /* must come last. */
71 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72
73 /* we define the syntax here (modulo base,index,scale syntax) */
74 #define REGISTER_PREFIX '%'
75 #define IMMEDIATE_PREFIX '$'
76 #define ABSOLUTE_PREFIX '*'
77
78 /* these are the instruction mnemonic suffixes in AT&T syntax or
79 memory operand size in Intel syntax. */
80 #define WORD_MNEM_SUFFIX 'w'
81 #define BYTE_MNEM_SUFFIX 'b'
82 #define SHORT_MNEM_SUFFIX 's'
83 #define LONG_MNEM_SUFFIX 'l'
84 #define QWORD_MNEM_SUFFIX 'q'
85 #define XMMWORD_MNEM_SUFFIX 'x'
86 #define YMMWORD_MNEM_SUFFIX 'y'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
88 in instructions. */
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90
91 #define END_OF_INSN '\0'
92
93 /*
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
98 END.
99 */
100 typedef struct
101 {
102 const insn_template *start;
103 const insn_template *end;
104 }
105 templates;
106
107 /* 386 operand encoding bytes: see 386 book for details of this. */
108 typedef struct
109 {
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
113 }
114 modrm_byte;
115
116 /* x86-64 extension prefix. */
117 typedef int rex_byte;
118
119 /* 386 opcode byte to code indirect addressing. */
120 typedef struct
121 {
122 unsigned base;
123 unsigned index;
124 unsigned scale;
125 }
126 sib_byte;
127
128 /* x86 arch names, types and features */
129 typedef struct
130 {
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 unsigned int negated; /* turn off indicated flags. */
137 }
138 arch_entry;
139
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_sse_check (int);
147 static void set_cpu_arch (int);
148 #ifdef TE_PE
149 static void pe_directive_secrel (int);
150 #endif
151 static void signed_cons (int);
152 static char *output_invalid (int c);
153 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
154 const char *);
155 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
156 const char *);
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS *);
160 static int i386_intel_parse_name (const char *, expressionS *);
161 static const reg_entry *parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template *match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry *build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS *, offsetT);
180 static void output_disp (fragS *, offsetT);
181 #ifndef I386COFF
182 static void s_bss (int);
183 #endif
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* VEX prefix. */
191 typedef struct
192 {
193 /* VEX prefix is either 2 byte or 3 byte. */
194 unsigned char bytes[3];
195 unsigned int length;
196 /* Destination or source register specifier. */
197 const reg_entry *register_specifier;
198 } vex_prefix;
199
200 /* 'md_assemble ()' gathers together information and puts it into a
201 i386_insn. */
202
203 union i386_op
204 {
205 expressionS *disps;
206 expressionS *imms;
207 const reg_entry *regs;
208 };
209
210 enum i386_error
211 {
212 operand_size_mismatch,
213 operand_type_mismatch,
214 register_type_mismatch,
215 number_of_operands_mismatch,
216 invalid_instruction_suffix,
217 bad_imm4,
218 old_gcc_only,
219 unsupported_with_intel_mnemonic,
220 unsupported_syntax,
221 unsupported,
222 invalid_vsib_address,
223 unsupported_vector_index_register
224 };
225
226 struct _i386_insn
227 {
228 /* TM holds the template for the insn were currently assembling. */
229 insn_template tm;
230
231 /* SUFFIX holds the instruction size suffix for byte, word, dword
232 or qword, if given. */
233 char suffix;
234
235 /* OPERANDS gives the number of given operands. */
236 unsigned int operands;
237
238 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
239 of given register, displacement, memory operands and immediate
240 operands. */
241 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
242
243 /* TYPES [i] is the type (see above #defines) which tells us how to
244 use OP[i] for the corresponding operand. */
245 i386_operand_type types[MAX_OPERANDS];
246
247 /* Displacement expression, immediate expression, or register for each
248 operand. */
249 union i386_op op[MAX_OPERANDS];
250
251 /* Flags for operands. */
252 unsigned int flags[MAX_OPERANDS];
253 #define Operand_PCrel 1
254
255 /* Relocation type for operand */
256 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
257
258 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
259 the base index byte below. */
260 const reg_entry *base_reg;
261 const reg_entry *index_reg;
262 unsigned int log2_scale_factor;
263
264 /* SEG gives the seg_entries of this insn. They are zero unless
265 explicit segment overrides are given. */
266 const seg_entry *seg[2];
267
268 /* PREFIX holds all the given prefix opcodes (usually null).
269 PREFIXES is the number of prefix opcodes. */
270 unsigned int prefixes;
271 unsigned char prefix[MAX_PREFIXES];
272
273 /* RM and SIB are the modrm byte and the sib byte where the
274 addressing modes of this insn are encoded. */
275 modrm_byte rm;
276 rex_byte rex;
277 sib_byte sib;
278 vex_prefix vex;
279
280 /* Swap operand in encoding. */
281 unsigned int swap_operand;
282
283 /* Force 32bit displacement in encoding. */
284 unsigned int disp32_encoding;
285
286 /* Error message. */
287 enum i386_error error;
288 };
289
290 typedef struct _i386_insn i386_insn;
291
292 /* List of chars besides those in app.c:symbol_chars that can start an
293 operand. Used to prevent the scrubber eating vital white-space. */
294 const char extra_symbol_chars[] = "*%-(["
295 #ifdef LEX_AT
296 "@"
297 #endif
298 #ifdef LEX_QM
299 "?"
300 #endif
301 ;
302
303 #if (defined (TE_I386AIX) \
304 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
305 && !defined (TE_GNU) \
306 && !defined (TE_LINUX) \
307 && !defined (TE_NETWARE) \
308 && !defined (TE_FreeBSD) \
309 && !defined (TE_DragonFly) \
310 && !defined (TE_NetBSD)))
311 /* This array holds the chars that always start a comment. If the
312 pre-processor is disabled, these aren't very useful. The option
313 --divide will remove '/' from this list. */
314 const char *i386_comment_chars = "#/";
315 #define SVR4_COMMENT_CHARS 1
316 #define PREFIX_SEPARATOR '\\'
317
318 #else
319 const char *i386_comment_chars = "#";
320 #define PREFIX_SEPARATOR '/'
321 #endif
322
323 /* This array holds the chars that only start a comment at the beginning of
324 a line. If the line seems to have the form '# 123 filename'
325 .line and .file directives will appear in the pre-processed output.
326 Note that input_file.c hand checks for '#' at the beginning of the
327 first line of the input file. This is because the compiler outputs
328 #NO_APP at the beginning of its output.
329 Also note that comments started like this one will always work if
330 '/' isn't otherwise defined. */
331 const char line_comment_chars[] = "#/";
332
333 const char line_separator_chars[] = ";";
334
335 /* Chars that can be used to separate mant from exp in floating point
336 nums. */
337 const char EXP_CHARS[] = "eE";
338
339 /* Chars that mean this number is a floating point constant
340 As in 0f12.456
341 or 0d1.2345e12. */
342 const char FLT_CHARS[] = "fFdDxX";
343
344 /* Tables for lexical analysis. */
345 static char mnemonic_chars[256];
346 static char register_chars[256];
347 static char operand_chars[256];
348 static char identifier_chars[256];
349 static char digit_chars[256];
350
351 /* Lexical macros. */
352 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
353 #define is_operand_char(x) (operand_chars[(unsigned char) x])
354 #define is_register_char(x) (register_chars[(unsigned char) x])
355 #define is_space_char(x) ((x) == ' ')
356 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
357 #define is_digit_char(x) (digit_chars[(unsigned char) x])
358
359 /* All non-digit non-letter characters that may occur in an operand. */
360 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
361
362 /* md_assemble() always leaves the strings it's passed unaltered. To
363 effect this we maintain a stack of saved characters that we've smashed
364 with '\0's (indicating end of strings for various sub-fields of the
365 assembler instruction). */
366 static char save_stack[32];
367 static char *save_stack_p;
368 #define END_STRING_AND_SAVE(s) \
369 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
370 #define RESTORE_END_STRING(s) \
371 do { *(s) = *--save_stack_p; } while (0)
372
373 /* The instruction we're assembling. */
374 static i386_insn i;
375
376 /* Possible templates for current insn. */
377 static const templates *current_templates;
378
379 /* Per instruction expressionS buffers: max displacements & immediates. */
380 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
381 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
382
383 /* Current operand we are working on. */
384 static int this_operand = -1;
385
386 /* We support four different modes. FLAG_CODE variable is used to distinguish
387 these. */
388
389 enum flag_code {
390 CODE_32BIT,
391 CODE_16BIT,
392 CODE_64BIT };
393
394 static enum flag_code flag_code;
395 static unsigned int object_64bit;
396 static unsigned int disallow_64bit_reloc;
397 static int use_rela_relocations = 0;
398
399 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
400 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
401 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
402
403 /* The ELF ABI to use. */
404 enum x86_elf_abi
405 {
406 I386_ABI,
407 X86_64_ABI,
408 X86_64_X32_ABI
409 };
410
411 static enum x86_elf_abi x86_elf_abi = I386_ABI;
412 #endif
413
414 /* The names used to print error messages. */
415 static const char *flag_code_names[] =
416 {
417 "32",
418 "16",
419 "64"
420 };
421
422 /* 1 for intel syntax,
423 0 if att syntax. */
424 static int intel_syntax = 0;
425
426 /* 1 for intel mnemonic,
427 0 if att mnemonic. */
428 static int intel_mnemonic = !SYSV386_COMPAT;
429
430 /* 1 if support old (<= 2.8.1) versions of gcc. */
431 static int old_gcc = OLDGCC_COMPAT;
432
433 /* 1 if pseudo registers are permitted. */
434 static int allow_pseudo_reg = 0;
435
436 /* 1 if register prefix % not required. */
437 static int allow_naked_reg = 0;
438
439 /* 1 if pseudo index register, eiz/riz, is allowed . */
440 static int allow_index_reg = 0;
441
442 static enum
443 {
444 sse_check_none = 0,
445 sse_check_warning,
446 sse_check_error
447 }
448 sse_check;
449
450 /* Register prefix used for error message. */
451 static const char *register_prefix = "%";
452
453 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
454 leave, push, and pop instructions so that gcc has the same stack
455 frame as in 32 bit mode. */
456 static char stackop_size = '\0';
457
458 /* Non-zero to optimize code alignment. */
459 int optimize_align_code = 1;
460
461 /* Non-zero to quieten some warnings. */
462 static int quiet_warnings = 0;
463
464 /* CPU name. */
465 static const char *cpu_arch_name = NULL;
466 static char *cpu_sub_arch_name = NULL;
467
468 /* CPU feature flags. */
469 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
470
471 /* If we have selected a cpu we are generating instructions for. */
472 static int cpu_arch_tune_set = 0;
473
474 /* Cpu we are generating instructions for. */
475 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
476
477 /* CPU feature flags of cpu we are generating instructions for. */
478 static i386_cpu_flags cpu_arch_tune_flags;
479
480 /* CPU instruction set architecture used. */
481 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
482
483 /* CPU feature flags of instruction set architecture used. */
484 i386_cpu_flags cpu_arch_isa_flags;
485
486 /* If set, conditional jumps are not automatically promoted to handle
487 larger than a byte offset. */
488 static unsigned int no_cond_jump_promotion = 0;
489
490 /* Encode SSE instructions with VEX prefix. */
491 static unsigned int sse2avx;
492
493 /* Encode scalar AVX instructions with specific vector length. */
494 static enum
495 {
496 vex128 = 0,
497 vex256
498 } avxscalar;
499
500 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
501 static symbolS *GOT_symbol;
502
503 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
504 unsigned int x86_dwarf2_return_column;
505
506 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
507 int x86_cie_data_alignment;
508
509 /* Interface to relax_segment.
510 There are 3 major relax states for 386 jump insns because the
511 different types of jumps add different sizes to frags when we're
512 figuring out what sort of jump to choose to reach a given label. */
513
514 /* Types. */
515 #define UNCOND_JUMP 0
516 #define COND_JUMP 1
517 #define COND_JUMP86 2
518
519 /* Sizes. */
520 #define CODE16 1
521 #define SMALL 0
522 #define SMALL16 (SMALL | CODE16)
523 #define BIG 2
524 #define BIG16 (BIG | CODE16)
525
526 #ifndef INLINE
527 #ifdef __GNUC__
528 #define INLINE __inline__
529 #else
530 #define INLINE
531 #endif
532 #endif
533
534 #define ENCODE_RELAX_STATE(type, size) \
535 ((relax_substateT) (((type) << 2) | (size)))
536 #define TYPE_FROM_RELAX_STATE(s) \
537 ((s) >> 2)
538 #define DISP_SIZE_FROM_RELAX_STATE(s) \
539 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
540
541 /* This table is used by relax_frag to promote short jumps to long
542 ones where necessary. SMALL (short) jumps may be promoted to BIG
543 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
544 don't allow a short jump in a 32 bit code segment to be promoted to
545 a 16 bit offset jump because it's slower (requires data size
546 prefix), and doesn't work, unless the destination is in the bottom
547 64k of the code segment (The top 16 bits of eip are zeroed). */
548
549 const relax_typeS md_relax_table[] =
550 {
551 /* The fields are:
552 1) most positive reach of this state,
553 2) most negative reach of this state,
554 3) how many bytes this mode will have in the variable part of the frag
555 4) which index into the table to try if we can't fit into this one. */
556
557 /* UNCOND_JUMP states. */
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
559 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
560 /* dword jmp adds 4 bytes to frag:
561 0 extra opcode bytes, 4 displacement bytes. */
562 {0, 0, 4, 0},
563 /* word jmp adds 2 byte2 to frag:
564 0 extra opcode bytes, 2 displacement bytes. */
565 {0, 0, 2, 0},
566
567 /* COND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
570 /* dword conditionals adds 5 bytes to frag:
571 1 extra opcode byte, 4 displacement bytes. */
572 {0, 0, 5, 0},
573 /* word conditionals add 3 bytes to frag:
574 1 extra opcode byte, 2 displacement bytes. */
575 {0, 0, 3, 0},
576
577 /* COND_JUMP86 states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
582 {0, 0, 5, 0},
583 /* word conditionals add 4 bytes to frag:
584 1 displacement byte and a 3 byte long branch insn. */
585 {0, 0, 4, 0}
586 };
587
588 static const arch_entry cpu_arch[] =
589 {
590 /* Do not replace the first two entries - i386_target_format()
591 relies on them being there in this order. */
592 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
593 CPU_GENERIC32_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
595 CPU_GENERIC64_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
597 CPU_NONE_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
599 CPU_I186_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
601 CPU_I286_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
603 CPU_I386_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
605 CPU_I486_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
609 CPU_I686_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
611 CPU_I586_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
613 CPU_PENTIUMPRO_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
615 CPU_P2_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
617 CPU_P3_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
619 CPU_P4_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
621 CPU_CORE_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
623 CPU_NOCONA_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
625 CPU_CORE_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
627 CPU_CORE_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
629 CPU_CORE2_FLAGS, 1, 0 },
630 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
631 CPU_CORE2_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
633 CPU_COREI7_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
635 CPU_L1OM_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
637 CPU_K1OM_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
639 CPU_K6_FLAGS, 0, 0 },
640 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
641 CPU_K6_2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
643 CPU_ATHLON_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
645 CPU_K8_FLAGS, 1, 0 },
646 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
647 CPU_K8_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
649 CPU_K8_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
651 CPU_AMDFAM10_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
653 CPU_BDVER1_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
655 CPU_BDVER2_FLAGS, 0, 0 },
656 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
657 CPU_8087_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
659 CPU_287_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
661 CPU_387_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
663 CPU_ANY87_FLAGS, 0, 1 },
664 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
665 CPU_MMX_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
667 CPU_3DNOWA_FLAGS, 0, 1 },
668 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
669 CPU_SSE_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
671 CPU_SSE2_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
673 CPU_SSE3_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
675 CPU_SSSE3_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
677 CPU_SSE4_1_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
679 CPU_SSE4_2_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
681 CPU_SSE4_2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
683 CPU_ANY_SSE_FLAGS, 0, 1 },
684 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
685 CPU_AVX_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
687 CPU_AVX2_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
689 CPU_ANY_AVX_FLAGS, 0, 1 },
690 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
691 CPU_VMX_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
693 CPU_VMFUNC_FLAGS, 0, 0 },
694 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
695 CPU_SMX_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
697 CPU_XSAVE_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
699 CPU_XSAVEOPT_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
701 CPU_AES_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
703 CPU_PCLMUL_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
705 CPU_PCLMUL_FLAGS, 1, 0 },
706 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
707 CPU_FSGSBASE_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
709 CPU_RDRND_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
711 CPU_F16C_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
713 CPU_BMI2_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
715 CPU_FMA_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
717 CPU_FMA4_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
719 CPU_XOP_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
721 CPU_LWP_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
723 CPU_MOVBE_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
725 CPU_EPT_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
727 CPU_LZCNT_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
729 CPU_INVPCID_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
731 CPU_CLFLUSH_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
733 CPU_NOP_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
735 CPU_SYSCALL_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
737 CPU_RDTSCP_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
739 CPU_3DNOW_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
741 CPU_3DNOWA_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
743 CPU_PADLOCK_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
745 CPU_SVME_FLAGS, 1, 0 },
746 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
747 CPU_SVME_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
749 CPU_SSE4A_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
751 CPU_ABM_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
753 CPU_BMI_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
755 CPU_TBM_FLAGS, 0, 0 },
756 };
757
758 #ifdef I386COFF
759 /* Like s_lcomm_internal in gas/read.c but the alignment string
760 is allowed to be optional. */
761
762 static symbolS *
763 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
764 {
765 addressT align = 0;
766
767 SKIP_WHITESPACE ();
768
769 if (needs_align
770 && *input_line_pointer == ',')
771 {
772 align = parse_align (needs_align - 1);
773
774 if (align == (addressT) -1)
775 return NULL;
776 }
777 else
778 {
779 if (size >= 8)
780 align = 3;
781 else if (size >= 4)
782 align = 2;
783 else if (size >= 2)
784 align = 1;
785 else
786 align = 0;
787 }
788
789 bss_alloc (symbolP, size, align);
790 return symbolP;
791 }
792
793 static void
794 pe_lcomm (int needs_align)
795 {
796 s_comm_internal (needs_align * 2, pe_lcomm_internal);
797 }
798 #endif
799
800 const pseudo_typeS md_pseudo_table[] =
801 {
802 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
803 {"align", s_align_bytes, 0},
804 #else
805 {"align", s_align_ptwo, 0},
806 #endif
807 {"arch", set_cpu_arch, 0},
808 #ifndef I386COFF
809 {"bss", s_bss, 0},
810 #else
811 {"lcomm", pe_lcomm, 1},
812 #endif
813 {"ffloat", float_cons, 'f'},
814 {"dfloat", float_cons, 'd'},
815 {"tfloat", float_cons, 'x'},
816 {"value", cons, 2},
817 {"slong", signed_cons, 4},
818 {"noopt", s_ignore, 0},
819 {"optim", s_ignore, 0},
820 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
821 {"code16", set_code_flag, CODE_16BIT},
822 {"code32", set_code_flag, CODE_32BIT},
823 {"code64", set_code_flag, CODE_64BIT},
824 {"intel_syntax", set_intel_syntax, 1},
825 {"att_syntax", set_intel_syntax, 0},
826 {"intel_mnemonic", set_intel_mnemonic, 1},
827 {"att_mnemonic", set_intel_mnemonic, 0},
828 {"allow_index_reg", set_allow_index_reg, 1},
829 {"disallow_index_reg", set_allow_index_reg, 0},
830 {"sse_check", set_sse_check, 0},
831 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
832 {"largecomm", handle_large_common, 0},
833 #else
834 {"file", (void (*) (int)) dwarf2_directive_file, 0},
835 {"loc", dwarf2_directive_loc, 0},
836 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
837 #endif
838 #ifdef TE_PE
839 {"secrel32", pe_directive_secrel, 0},
840 #endif
841 {0, 0, 0}
842 };
843
844 /* For interface with expression (). */
845 extern char *input_line_pointer;
846
847 /* Hash table for instruction mnemonic lookup. */
848 static struct hash_control *op_hash;
849
850 /* Hash table for register lookup. */
851 static struct hash_control *reg_hash;
852 \f
853 void
854 i386_align_code (fragS *fragP, int count)
855 {
856 /* Various efficient no-op patterns for aligning code labels.
857 Note: Don't try to assemble the instructions in the comments.
858 0L and 0w are not legal. */
859 static const char f32_1[] =
860 {0x90}; /* nop */
861 static const char f32_2[] =
862 {0x66,0x90}; /* xchg %ax,%ax */
863 static const char f32_3[] =
864 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
865 static const char f32_4[] =
866 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
867 static const char f32_5[] =
868 {0x90, /* nop */
869 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
870 static const char f32_6[] =
871 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
872 static const char f32_7[] =
873 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
874 static const char f32_8[] =
875 {0x90, /* nop */
876 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
877 static const char f32_9[] =
878 {0x89,0xf6, /* movl %esi,%esi */
879 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
880 static const char f32_10[] =
881 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
882 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
883 static const char f32_11[] =
884 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
885 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
886 static const char f32_12[] =
887 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
888 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
889 static const char f32_13[] =
890 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
891 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
892 static const char f32_14[] =
893 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
894 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
895 static const char f16_3[] =
896 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
897 static const char f16_4[] =
898 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
899 static const char f16_5[] =
900 {0x90, /* nop */
901 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
902 static const char f16_6[] =
903 {0x89,0xf6, /* mov %si,%si */
904 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
905 static const char f16_7[] =
906 {0x8d,0x74,0x00, /* lea 0(%si),%si */
907 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
908 static const char f16_8[] =
909 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
910 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
911 static const char jump_31[] =
912 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
913 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
914 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
915 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
916 static const char *const f32_patt[] = {
917 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
918 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
919 };
920 static const char *const f16_patt[] = {
921 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
922 };
923 /* nopl (%[re]ax) */
924 static const char alt_3[] =
925 {0x0f,0x1f,0x00};
926 /* nopl 0(%[re]ax) */
927 static const char alt_4[] =
928 {0x0f,0x1f,0x40,0x00};
929 /* nopl 0(%[re]ax,%[re]ax,1) */
930 static const char alt_5[] =
931 {0x0f,0x1f,0x44,0x00,0x00};
932 /* nopw 0(%[re]ax,%[re]ax,1) */
933 static const char alt_6[] =
934 {0x66,0x0f,0x1f,0x44,0x00,0x00};
935 /* nopl 0L(%[re]ax) */
936 static const char alt_7[] =
937 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
938 /* nopl 0L(%[re]ax,%[re]ax,1) */
939 static const char alt_8[] =
940 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* nopw 0L(%[re]ax,%[re]ax,1) */
942 static const char alt_9[] =
943 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
944 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
945 static const char alt_10[] =
946 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
947 /* data16
948 nopw %cs:0L(%[re]ax,%[re]ax,1) */
949 static const char alt_long_11[] =
950 {0x66,
951 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
952 /* data16
953 data16
954 nopw %cs:0L(%[re]ax,%[re]ax,1) */
955 static const char alt_long_12[] =
956 {0x66,
957 0x66,
958 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
959 /* data16
960 data16
961 data16
962 nopw %cs:0L(%[re]ax,%[re]ax,1) */
963 static const char alt_long_13[] =
964 {0x66,
965 0x66,
966 0x66,
967 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
968 /* data16
969 data16
970 data16
971 data16
972 nopw %cs:0L(%[re]ax,%[re]ax,1) */
973 static const char alt_long_14[] =
974 {0x66,
975 0x66,
976 0x66,
977 0x66,
978 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
979 /* data16
980 data16
981 data16
982 data16
983 data16
984 nopw %cs:0L(%[re]ax,%[re]ax,1) */
985 static const char alt_long_15[] =
986 {0x66,
987 0x66,
988 0x66,
989 0x66,
990 0x66,
991 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
992 /* nopl 0(%[re]ax,%[re]ax,1)
993 nopw 0(%[re]ax,%[re]ax,1) */
994 static const char alt_short_11[] =
995 {0x0f,0x1f,0x44,0x00,0x00,
996 0x66,0x0f,0x1f,0x44,0x00,0x00};
997 /* nopw 0(%[re]ax,%[re]ax,1)
998 nopw 0(%[re]ax,%[re]ax,1) */
999 static const char alt_short_12[] =
1000 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1001 0x66,0x0f,0x1f,0x44,0x00,0x00};
1002 /* nopw 0(%[re]ax,%[re]ax,1)
1003 nopl 0L(%[re]ax) */
1004 static const char alt_short_13[] =
1005 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1006 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1007 /* nopl 0L(%[re]ax)
1008 nopl 0L(%[re]ax) */
1009 static const char alt_short_14[] =
1010 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1011 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1012 /* nopl 0L(%[re]ax)
1013 nopl 0L(%[re]ax,%[re]ax,1) */
1014 static const char alt_short_15[] =
1015 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1016 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1017 static const char *const alt_short_patt[] = {
1018 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1019 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1020 alt_short_14, alt_short_15
1021 };
1022 static const char *const alt_long_patt[] = {
1023 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1024 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1025 alt_long_14, alt_long_15
1026 };
1027
1028 /* Only align for at least a positive non-zero boundary. */
1029 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1030 return;
1031
1032 /* We need to decide which NOP sequence to use for 32bit and
1033 64bit. When -mtune= is used:
1034
1035 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1036 PROCESSOR_GENERIC32, f32_patt will be used.
1037 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1038 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1039 PROCESSOR_GENERIC64, alt_long_patt will be used.
1040 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1041 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1042 will be used.
1043
1044 When -mtune= isn't used, alt_long_patt will be used if
1045 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1046 be used.
1047
1048 When -march= or .arch is used, we can't use anything beyond
1049 cpu_arch_isa_flags. */
1050
1051 if (flag_code == CODE_16BIT)
1052 {
1053 if (count > 8)
1054 {
1055 memcpy (fragP->fr_literal + fragP->fr_fix,
1056 jump_31, count);
1057 /* Adjust jump offset. */
1058 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1059 }
1060 else
1061 memcpy (fragP->fr_literal + fragP->fr_fix,
1062 f16_patt[count - 1], count);
1063 }
1064 else
1065 {
1066 const char *const *patt = NULL;
1067
1068 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1069 {
1070 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1071 switch (cpu_arch_tune)
1072 {
1073 case PROCESSOR_UNKNOWN:
1074 /* We use cpu_arch_isa_flags to check if we SHOULD
1075 optimize with nops. */
1076 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1077 patt = alt_long_patt;
1078 else
1079 patt = f32_patt;
1080 break;
1081 case PROCESSOR_PENTIUM4:
1082 case PROCESSOR_NOCONA:
1083 case PROCESSOR_CORE:
1084 case PROCESSOR_CORE2:
1085 case PROCESSOR_COREI7:
1086 case PROCESSOR_L1OM:
1087 case PROCESSOR_K1OM:
1088 case PROCESSOR_GENERIC64:
1089 patt = alt_long_patt;
1090 break;
1091 case PROCESSOR_K6:
1092 case PROCESSOR_ATHLON:
1093 case PROCESSOR_K8:
1094 case PROCESSOR_AMDFAM10:
1095 case PROCESSOR_BD:
1096 patt = alt_short_patt;
1097 break;
1098 case PROCESSOR_I386:
1099 case PROCESSOR_I486:
1100 case PROCESSOR_PENTIUM:
1101 case PROCESSOR_PENTIUMPRO:
1102 case PROCESSOR_GENERIC32:
1103 patt = f32_patt;
1104 break;
1105 }
1106 }
1107 else
1108 {
1109 switch (fragP->tc_frag_data.tune)
1110 {
1111 case PROCESSOR_UNKNOWN:
1112 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1113 PROCESSOR_UNKNOWN. */
1114 abort ();
1115 break;
1116
1117 case PROCESSOR_I386:
1118 case PROCESSOR_I486:
1119 case PROCESSOR_PENTIUM:
1120 case PROCESSOR_K6:
1121 case PROCESSOR_ATHLON:
1122 case PROCESSOR_K8:
1123 case PROCESSOR_AMDFAM10:
1124 case PROCESSOR_BD:
1125 case PROCESSOR_GENERIC32:
1126 /* We use cpu_arch_isa_flags to check if we CAN optimize
1127 with nops. */
1128 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1129 patt = alt_short_patt;
1130 else
1131 patt = f32_patt;
1132 break;
1133 case PROCESSOR_PENTIUMPRO:
1134 case PROCESSOR_PENTIUM4:
1135 case PROCESSOR_NOCONA:
1136 case PROCESSOR_CORE:
1137 case PROCESSOR_CORE2:
1138 case PROCESSOR_COREI7:
1139 case PROCESSOR_L1OM:
1140 case PROCESSOR_K1OM:
1141 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1142 patt = alt_long_patt;
1143 else
1144 patt = f32_patt;
1145 break;
1146 case PROCESSOR_GENERIC64:
1147 patt = alt_long_patt;
1148 break;
1149 }
1150 }
1151
1152 if (patt == f32_patt)
1153 {
1154 /* If the padding is less than 15 bytes, we use the normal
1155 ones. Otherwise, we use a jump instruction and adjust
1156 its offset. */
1157 int limit;
1158
1159 /* For 64bit, the limit is 3 bytes. */
1160 if (flag_code == CODE_64BIT
1161 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1162 limit = 3;
1163 else
1164 limit = 15;
1165 if (count < limit)
1166 memcpy (fragP->fr_literal + fragP->fr_fix,
1167 patt[count - 1], count);
1168 else
1169 {
1170 memcpy (fragP->fr_literal + fragP->fr_fix,
1171 jump_31, count);
1172 /* Adjust jump offset. */
1173 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1174 }
1175 }
1176 else
1177 {
1178 /* Maximum length of an instruction is 15 byte. If the
1179 padding is greater than 15 bytes and we don't use jump,
1180 we have to break it into smaller pieces. */
1181 int padding = count;
1182 while (padding > 15)
1183 {
1184 padding -= 15;
1185 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1186 patt [14], 15);
1187 }
1188
1189 if (padding)
1190 memcpy (fragP->fr_literal + fragP->fr_fix,
1191 patt [padding - 1], padding);
1192 }
1193 }
1194 fragP->fr_var = count;
1195 }
1196
1197 static INLINE int
1198 operand_type_all_zero (const union i386_operand_type *x)
1199 {
1200 switch (ARRAY_SIZE(x->array))
1201 {
1202 case 3:
1203 if (x->array[2])
1204 return 0;
1205 case 2:
1206 if (x->array[1])
1207 return 0;
1208 case 1:
1209 return !x->array[0];
1210 default:
1211 abort ();
1212 }
1213 }
1214
1215 static INLINE void
1216 operand_type_set (union i386_operand_type *x, unsigned int v)
1217 {
1218 switch (ARRAY_SIZE(x->array))
1219 {
1220 case 3:
1221 x->array[2] = v;
1222 case 2:
1223 x->array[1] = v;
1224 case 1:
1225 x->array[0] = v;
1226 break;
1227 default:
1228 abort ();
1229 }
1230 }
1231
1232 static INLINE int
1233 operand_type_equal (const union i386_operand_type *x,
1234 const union i386_operand_type *y)
1235 {
1236 switch (ARRAY_SIZE(x->array))
1237 {
1238 case 3:
1239 if (x->array[2] != y->array[2])
1240 return 0;
1241 case 2:
1242 if (x->array[1] != y->array[1])
1243 return 0;
1244 case 1:
1245 return x->array[0] == y->array[0];
1246 break;
1247 default:
1248 abort ();
1249 }
1250 }
1251
1252 static INLINE int
1253 cpu_flags_all_zero (const union i386_cpu_flags *x)
1254 {
1255 switch (ARRAY_SIZE(x->array))
1256 {
1257 case 3:
1258 if (x->array[2])
1259 return 0;
1260 case 2:
1261 if (x->array[1])
1262 return 0;
1263 case 1:
1264 return !x->array[0];
1265 default:
1266 abort ();
1267 }
1268 }
1269
1270 static INLINE void
1271 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1272 {
1273 switch (ARRAY_SIZE(x->array))
1274 {
1275 case 3:
1276 x->array[2] = v;
1277 case 2:
1278 x->array[1] = v;
1279 case 1:
1280 x->array[0] = v;
1281 break;
1282 default:
1283 abort ();
1284 }
1285 }
1286
1287 static INLINE int
1288 cpu_flags_equal (const union i386_cpu_flags *x,
1289 const union i386_cpu_flags *y)
1290 {
1291 switch (ARRAY_SIZE(x->array))
1292 {
1293 case 3:
1294 if (x->array[2] != y->array[2])
1295 return 0;
1296 case 2:
1297 if (x->array[1] != y->array[1])
1298 return 0;
1299 case 1:
1300 return x->array[0] == y->array[0];
1301 break;
1302 default:
1303 abort ();
1304 }
1305 }
1306
1307 static INLINE int
1308 cpu_flags_check_cpu64 (i386_cpu_flags f)
1309 {
1310 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1311 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1312 }
1313
1314 static INLINE i386_cpu_flags
1315 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1316 {
1317 switch (ARRAY_SIZE (x.array))
1318 {
1319 case 3:
1320 x.array [2] &= y.array [2];
1321 case 2:
1322 x.array [1] &= y.array [1];
1323 case 1:
1324 x.array [0] &= y.array [0];
1325 break;
1326 default:
1327 abort ();
1328 }
1329 return x;
1330 }
1331
1332 static INLINE i386_cpu_flags
1333 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1334 {
1335 switch (ARRAY_SIZE (x.array))
1336 {
1337 case 3:
1338 x.array [2] |= y.array [2];
1339 case 2:
1340 x.array [1] |= y.array [1];
1341 case 1:
1342 x.array [0] |= y.array [0];
1343 break;
1344 default:
1345 abort ();
1346 }
1347 return x;
1348 }
1349
1350 static INLINE i386_cpu_flags
1351 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1352 {
1353 switch (ARRAY_SIZE (x.array))
1354 {
1355 case 3:
1356 x.array [2] &= ~y.array [2];
1357 case 2:
1358 x.array [1] &= ~y.array [1];
1359 case 1:
1360 x.array [0] &= ~y.array [0];
1361 break;
1362 default:
1363 abort ();
1364 }
1365 return x;
1366 }
1367
1368 #define CPU_FLAGS_ARCH_MATCH 0x1
1369 #define CPU_FLAGS_64BIT_MATCH 0x2
1370 #define CPU_FLAGS_AES_MATCH 0x4
1371 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1372 #define CPU_FLAGS_AVX_MATCH 0x10
1373
1374 #define CPU_FLAGS_32BIT_MATCH \
1375 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1376 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1377 #define CPU_FLAGS_PERFECT_MATCH \
1378 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1379
1380 /* Return CPU flags match bits. */
1381
1382 static int
1383 cpu_flags_match (const insn_template *t)
1384 {
1385 i386_cpu_flags x = t->cpu_flags;
1386 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1387
1388 x.bitfield.cpu64 = 0;
1389 x.bitfield.cpuno64 = 0;
1390
1391 if (cpu_flags_all_zero (&x))
1392 {
1393 /* This instruction is available on all archs. */
1394 match |= CPU_FLAGS_32BIT_MATCH;
1395 }
1396 else
1397 {
1398 /* This instruction is available only on some archs. */
1399 i386_cpu_flags cpu = cpu_arch_flags;
1400
1401 cpu.bitfield.cpu64 = 0;
1402 cpu.bitfield.cpuno64 = 0;
1403 cpu = cpu_flags_and (x, cpu);
1404 if (!cpu_flags_all_zero (&cpu))
1405 {
1406 if (x.bitfield.cpuavx)
1407 {
1408 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1409 if (cpu.bitfield.cpuavx)
1410 {
1411 /* Check SSE2AVX. */
1412 if (!t->opcode_modifier.sse2avx|| sse2avx)
1413 {
1414 match |= (CPU_FLAGS_ARCH_MATCH
1415 | CPU_FLAGS_AVX_MATCH);
1416 /* Check AES. */
1417 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1418 match |= CPU_FLAGS_AES_MATCH;
1419 /* Check PCLMUL. */
1420 if (!x.bitfield.cpupclmul
1421 || cpu.bitfield.cpupclmul)
1422 match |= CPU_FLAGS_PCLMUL_MATCH;
1423 }
1424 }
1425 else
1426 match |= CPU_FLAGS_ARCH_MATCH;
1427 }
1428 else
1429 match |= CPU_FLAGS_32BIT_MATCH;
1430 }
1431 }
1432 return match;
1433 }
1434
1435 static INLINE i386_operand_type
1436 operand_type_and (i386_operand_type x, i386_operand_type y)
1437 {
1438 switch (ARRAY_SIZE (x.array))
1439 {
1440 case 3:
1441 x.array [2] &= y.array [2];
1442 case 2:
1443 x.array [1] &= y.array [1];
1444 case 1:
1445 x.array [0] &= y.array [0];
1446 break;
1447 default:
1448 abort ();
1449 }
1450 return x;
1451 }
1452
1453 static INLINE i386_operand_type
1454 operand_type_or (i386_operand_type x, i386_operand_type y)
1455 {
1456 switch (ARRAY_SIZE (x.array))
1457 {
1458 case 3:
1459 x.array [2] |= y.array [2];
1460 case 2:
1461 x.array [1] |= y.array [1];
1462 case 1:
1463 x.array [0] |= y.array [0];
1464 break;
1465 default:
1466 abort ();
1467 }
1468 return x;
1469 }
1470
1471 static INLINE i386_operand_type
1472 operand_type_xor (i386_operand_type x, i386_operand_type y)
1473 {
1474 switch (ARRAY_SIZE (x.array))
1475 {
1476 case 3:
1477 x.array [2] ^= y.array [2];
1478 case 2:
1479 x.array [1] ^= y.array [1];
1480 case 1:
1481 x.array [0] ^= y.array [0];
1482 break;
1483 default:
1484 abort ();
1485 }
1486 return x;
1487 }
1488
1489 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1490 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1491 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1492 static const i386_operand_type inoutportreg
1493 = OPERAND_TYPE_INOUTPORTREG;
1494 static const i386_operand_type reg16_inoutportreg
1495 = OPERAND_TYPE_REG16_INOUTPORTREG;
1496 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1497 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1498 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1499 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1500 static const i386_operand_type anydisp
1501 = OPERAND_TYPE_ANYDISP;
1502 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1503 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1504 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1505 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1506 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1507 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1508 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1509 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1510 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1511 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1512 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1513 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1514
1515 enum operand_type
1516 {
1517 reg,
1518 imm,
1519 disp,
1520 anymem
1521 };
1522
1523 static INLINE int
1524 operand_type_check (i386_operand_type t, enum operand_type c)
1525 {
1526 switch (c)
1527 {
1528 case reg:
1529 return (t.bitfield.reg8
1530 || t.bitfield.reg16
1531 || t.bitfield.reg32
1532 || t.bitfield.reg64);
1533
1534 case imm:
1535 return (t.bitfield.imm8
1536 || t.bitfield.imm8s
1537 || t.bitfield.imm16
1538 || t.bitfield.imm32
1539 || t.bitfield.imm32s
1540 || t.bitfield.imm64);
1541
1542 case disp:
1543 return (t.bitfield.disp8
1544 || t.bitfield.disp16
1545 || t.bitfield.disp32
1546 || t.bitfield.disp32s
1547 || t.bitfield.disp64);
1548
1549 case anymem:
1550 return (t.bitfield.disp8
1551 || t.bitfield.disp16
1552 || t.bitfield.disp32
1553 || t.bitfield.disp32s
1554 || t.bitfield.disp64
1555 || t.bitfield.baseindex);
1556
1557 default:
1558 abort ();
1559 }
1560
1561 return 0;
1562 }
1563
1564 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1565 operand J for instruction template T. */
1566
1567 static INLINE int
1568 match_reg_size (const insn_template *t, unsigned int j)
1569 {
1570 return !((i.types[j].bitfield.byte
1571 && !t->operand_types[j].bitfield.byte)
1572 || (i.types[j].bitfield.word
1573 && !t->operand_types[j].bitfield.word)
1574 || (i.types[j].bitfield.dword
1575 && !t->operand_types[j].bitfield.dword)
1576 || (i.types[j].bitfield.qword
1577 && !t->operand_types[j].bitfield.qword));
1578 }
1579
1580 /* Return 1 if there is no conflict in any size on operand J for
1581 instruction template T. */
1582
1583 static INLINE int
1584 match_mem_size (const insn_template *t, unsigned int j)
1585 {
1586 return (match_reg_size (t, j)
1587 && !((i.types[j].bitfield.unspecified
1588 && !t->operand_types[j].bitfield.unspecified)
1589 || (i.types[j].bitfield.fword
1590 && !t->operand_types[j].bitfield.fword)
1591 || (i.types[j].bitfield.tbyte
1592 && !t->operand_types[j].bitfield.tbyte)
1593 || (i.types[j].bitfield.xmmword
1594 && !t->operand_types[j].bitfield.xmmword)
1595 || (i.types[j].bitfield.ymmword
1596 && !t->operand_types[j].bitfield.ymmword)));
1597 }
1598
1599 /* Return 1 if there is no size conflict on any operands for
1600 instruction template T. */
1601
1602 static INLINE int
1603 operand_size_match (const insn_template *t)
1604 {
1605 unsigned int j;
1606 int match = 1;
1607
1608 /* Don't check jump instructions. */
1609 if (t->opcode_modifier.jump
1610 || t->opcode_modifier.jumpbyte
1611 || t->opcode_modifier.jumpdword
1612 || t->opcode_modifier.jumpintersegment)
1613 return match;
1614
1615 /* Check memory and accumulator operand size. */
1616 for (j = 0; j < i.operands; j++)
1617 {
1618 if (t->operand_types[j].bitfield.anysize)
1619 continue;
1620
1621 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1622 {
1623 match = 0;
1624 break;
1625 }
1626
1627 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1628 {
1629 match = 0;
1630 break;
1631 }
1632 }
1633
1634 if (match)
1635 return match;
1636 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1637 {
1638 mismatch:
1639 i.error = operand_size_mismatch;
1640 return 0;
1641 }
1642
1643 /* Check reverse. */
1644 gas_assert (i.operands == 2);
1645
1646 match = 1;
1647 for (j = 0; j < 2; j++)
1648 {
1649 if (t->operand_types[j].bitfield.acc
1650 && !match_reg_size (t, j ? 0 : 1))
1651 goto mismatch;
1652
1653 if (i.types[j].bitfield.mem
1654 && !match_mem_size (t, j ? 0 : 1))
1655 goto mismatch;
1656 }
1657
1658 return match;
1659 }
1660
1661 static INLINE int
1662 operand_type_match (i386_operand_type overlap,
1663 i386_operand_type given)
1664 {
1665 i386_operand_type temp = overlap;
1666
1667 temp.bitfield.jumpabsolute = 0;
1668 temp.bitfield.unspecified = 0;
1669 temp.bitfield.byte = 0;
1670 temp.bitfield.word = 0;
1671 temp.bitfield.dword = 0;
1672 temp.bitfield.fword = 0;
1673 temp.bitfield.qword = 0;
1674 temp.bitfield.tbyte = 0;
1675 temp.bitfield.xmmword = 0;
1676 temp.bitfield.ymmword = 0;
1677 if (operand_type_all_zero (&temp))
1678 goto mismatch;
1679
1680 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1681 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1682 return 1;
1683
1684 mismatch:
1685 i.error = operand_type_mismatch;
1686 return 0;
1687 }
1688
1689 /* If given types g0 and g1 are registers they must be of the same type
1690 unless the expected operand type register overlap is null.
1691 Note that Acc in a template matches every size of reg. */
1692
1693 static INLINE int
1694 operand_type_register_match (i386_operand_type m0,
1695 i386_operand_type g0,
1696 i386_operand_type t0,
1697 i386_operand_type m1,
1698 i386_operand_type g1,
1699 i386_operand_type t1)
1700 {
1701 if (!operand_type_check (g0, reg))
1702 return 1;
1703
1704 if (!operand_type_check (g1, reg))
1705 return 1;
1706
1707 if (g0.bitfield.reg8 == g1.bitfield.reg8
1708 && g0.bitfield.reg16 == g1.bitfield.reg16
1709 && g0.bitfield.reg32 == g1.bitfield.reg32
1710 && g0.bitfield.reg64 == g1.bitfield.reg64)
1711 return 1;
1712
1713 if (m0.bitfield.acc)
1714 {
1715 t0.bitfield.reg8 = 1;
1716 t0.bitfield.reg16 = 1;
1717 t0.bitfield.reg32 = 1;
1718 t0.bitfield.reg64 = 1;
1719 }
1720
1721 if (m1.bitfield.acc)
1722 {
1723 t1.bitfield.reg8 = 1;
1724 t1.bitfield.reg16 = 1;
1725 t1.bitfield.reg32 = 1;
1726 t1.bitfield.reg64 = 1;
1727 }
1728
1729 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1730 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1731 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1732 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1733 return 1;
1734
1735 i.error = register_type_mismatch;
1736
1737 return 0;
1738 }
1739
1740 static INLINE unsigned int
1741 mode_from_disp_size (i386_operand_type t)
1742 {
1743 if (t.bitfield.disp8)
1744 return 1;
1745 else if (t.bitfield.disp16
1746 || t.bitfield.disp32
1747 || t.bitfield.disp32s)
1748 return 2;
1749 else
1750 return 0;
1751 }
1752
1753 static INLINE int
1754 fits_in_signed_byte (offsetT num)
1755 {
1756 return (num >= -128) && (num <= 127);
1757 }
1758
1759 static INLINE int
1760 fits_in_unsigned_byte (offsetT num)
1761 {
1762 return (num & 0xff) == num;
1763 }
1764
1765 static INLINE int
1766 fits_in_unsigned_word (offsetT num)
1767 {
1768 return (num & 0xffff) == num;
1769 }
1770
1771 static INLINE int
1772 fits_in_signed_word (offsetT num)
1773 {
1774 return (-32768 <= num) && (num <= 32767);
1775 }
1776
1777 static INLINE int
1778 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1779 {
1780 #ifndef BFD64
1781 return 1;
1782 #else
1783 return (!(((offsetT) -1 << 31) & num)
1784 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1785 #endif
1786 } /* fits_in_signed_long() */
1787
1788 static INLINE int
1789 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1790 {
1791 #ifndef BFD64
1792 return 1;
1793 #else
1794 return (num & (((offsetT) 2 << 31) - 1)) == num;
1795 #endif
1796 } /* fits_in_unsigned_long() */
1797
1798 static INLINE int
1799 fits_in_imm4 (offsetT num)
1800 {
1801 return (num & 0xf) == num;
1802 }
1803
1804 static i386_operand_type
1805 smallest_imm_type (offsetT num)
1806 {
1807 i386_operand_type t;
1808
1809 operand_type_set (&t, 0);
1810 t.bitfield.imm64 = 1;
1811
1812 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1813 {
1814 /* This code is disabled on the 486 because all the Imm1 forms
1815 in the opcode table are slower on the i486. They're the
1816 versions with the implicitly specified single-position
1817 displacement, which has another syntax if you really want to
1818 use that form. */
1819 t.bitfield.imm1 = 1;
1820 t.bitfield.imm8 = 1;
1821 t.bitfield.imm8s = 1;
1822 t.bitfield.imm16 = 1;
1823 t.bitfield.imm32 = 1;
1824 t.bitfield.imm32s = 1;
1825 }
1826 else if (fits_in_signed_byte (num))
1827 {
1828 t.bitfield.imm8 = 1;
1829 t.bitfield.imm8s = 1;
1830 t.bitfield.imm16 = 1;
1831 t.bitfield.imm32 = 1;
1832 t.bitfield.imm32s = 1;
1833 }
1834 else if (fits_in_unsigned_byte (num))
1835 {
1836 t.bitfield.imm8 = 1;
1837 t.bitfield.imm16 = 1;
1838 t.bitfield.imm32 = 1;
1839 t.bitfield.imm32s = 1;
1840 }
1841 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1842 {
1843 t.bitfield.imm16 = 1;
1844 t.bitfield.imm32 = 1;
1845 t.bitfield.imm32s = 1;
1846 }
1847 else if (fits_in_signed_long (num))
1848 {
1849 t.bitfield.imm32 = 1;
1850 t.bitfield.imm32s = 1;
1851 }
1852 else if (fits_in_unsigned_long (num))
1853 t.bitfield.imm32 = 1;
1854
1855 return t;
1856 }
1857
1858 static offsetT
1859 offset_in_range (offsetT val, int size)
1860 {
1861 addressT mask;
1862
1863 switch (size)
1864 {
1865 case 1: mask = ((addressT) 1 << 8) - 1; break;
1866 case 2: mask = ((addressT) 1 << 16) - 1; break;
1867 case 4: mask = ((addressT) 2 << 31) - 1; break;
1868 #ifdef BFD64
1869 case 8: mask = ((addressT) 2 << 63) - 1; break;
1870 #endif
1871 default: abort ();
1872 }
1873
1874 #ifdef BFD64
1875 /* If BFD64, sign extend val for 32bit address mode. */
1876 if (flag_code != CODE_64BIT
1877 || i.prefix[ADDR_PREFIX])
1878 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1879 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1880 #endif
1881
1882 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1883 {
1884 char buf1[40], buf2[40];
1885
1886 sprint_value (buf1, val);
1887 sprint_value (buf2, val & mask);
1888 as_warn (_("%s shortened to %s"), buf1, buf2);
1889 }
1890 return val & mask;
1891 }
1892
1893 enum PREFIX_GROUP
1894 {
1895 PREFIX_EXIST = 0,
1896 PREFIX_LOCK,
1897 PREFIX_REP,
1898 PREFIX_OTHER
1899 };
1900
1901 /* Returns
1902 a. PREFIX_EXIST if attempting to add a prefix where one from the
1903 same class already exists.
1904 b. PREFIX_LOCK if lock prefix is added.
1905 c. PREFIX_REP if rep/repne prefix is added.
1906 d. PREFIX_OTHER if other prefix is added.
1907 */
1908
1909 static enum PREFIX_GROUP
1910 add_prefix (unsigned int prefix)
1911 {
1912 enum PREFIX_GROUP ret = PREFIX_OTHER;
1913 unsigned int q;
1914
1915 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1916 && flag_code == CODE_64BIT)
1917 {
1918 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1919 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1920 && (prefix & (REX_R | REX_X | REX_B))))
1921 ret = PREFIX_EXIST;
1922 q = REX_PREFIX;
1923 }
1924 else
1925 {
1926 switch (prefix)
1927 {
1928 default:
1929 abort ();
1930
1931 case CS_PREFIX_OPCODE:
1932 case DS_PREFIX_OPCODE:
1933 case ES_PREFIX_OPCODE:
1934 case FS_PREFIX_OPCODE:
1935 case GS_PREFIX_OPCODE:
1936 case SS_PREFIX_OPCODE:
1937 q = SEG_PREFIX;
1938 break;
1939
1940 case REPNE_PREFIX_OPCODE:
1941 case REPE_PREFIX_OPCODE:
1942 q = REP_PREFIX;
1943 ret = PREFIX_REP;
1944 break;
1945
1946 case LOCK_PREFIX_OPCODE:
1947 q = LOCK_PREFIX;
1948 ret = PREFIX_LOCK;
1949 break;
1950
1951 case FWAIT_OPCODE:
1952 q = WAIT_PREFIX;
1953 break;
1954
1955 case ADDR_PREFIX_OPCODE:
1956 q = ADDR_PREFIX;
1957 break;
1958
1959 case DATA_PREFIX_OPCODE:
1960 q = DATA_PREFIX;
1961 break;
1962 }
1963 if (i.prefix[q] != 0)
1964 ret = PREFIX_EXIST;
1965 }
1966
1967 if (ret)
1968 {
1969 if (!i.prefix[q])
1970 ++i.prefixes;
1971 i.prefix[q] |= prefix;
1972 }
1973 else
1974 as_bad (_("same type of prefix used twice"));
1975
1976 return ret;
1977 }
1978
1979 static void
1980 update_code_flag (int value, int check)
1981 {
1982 PRINTF_LIKE ((*as_error));
1983
1984 flag_code = (enum flag_code) value;
1985 if (flag_code == CODE_64BIT)
1986 {
1987 cpu_arch_flags.bitfield.cpu64 = 1;
1988 cpu_arch_flags.bitfield.cpuno64 = 0;
1989 }
1990 else
1991 {
1992 cpu_arch_flags.bitfield.cpu64 = 0;
1993 cpu_arch_flags.bitfield.cpuno64 = 1;
1994 }
1995 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1996 {
1997 if (check)
1998 as_error = as_fatal;
1999 else
2000 as_error = as_bad;
2001 (*as_error) (_("64bit mode not supported on `%s'."),
2002 cpu_arch_name ? cpu_arch_name : default_arch);
2003 }
2004 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2005 {
2006 if (check)
2007 as_error = as_fatal;
2008 else
2009 as_error = as_bad;
2010 (*as_error) (_("32bit mode not supported on `%s'."),
2011 cpu_arch_name ? cpu_arch_name : default_arch);
2012 }
2013 stackop_size = '\0';
2014 }
2015
2016 static void
2017 set_code_flag (int value)
2018 {
2019 update_code_flag (value, 0);
2020 }
2021
2022 static void
2023 set_16bit_gcc_code_flag (int new_code_flag)
2024 {
2025 flag_code = (enum flag_code) new_code_flag;
2026 if (flag_code != CODE_16BIT)
2027 abort ();
2028 cpu_arch_flags.bitfield.cpu64 = 0;
2029 cpu_arch_flags.bitfield.cpuno64 = 1;
2030 stackop_size = LONG_MNEM_SUFFIX;
2031 }
2032
2033 static void
2034 set_intel_syntax (int syntax_flag)
2035 {
2036 /* Find out if register prefixing is specified. */
2037 int ask_naked_reg = 0;
2038
2039 SKIP_WHITESPACE ();
2040 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2041 {
2042 char *string = input_line_pointer;
2043 int e = get_symbol_end ();
2044
2045 if (strcmp (string, "prefix") == 0)
2046 ask_naked_reg = 1;
2047 else if (strcmp (string, "noprefix") == 0)
2048 ask_naked_reg = -1;
2049 else
2050 as_bad (_("bad argument to syntax directive."));
2051 *input_line_pointer = e;
2052 }
2053 demand_empty_rest_of_line ();
2054
2055 intel_syntax = syntax_flag;
2056
2057 if (ask_naked_reg == 0)
2058 allow_naked_reg = (intel_syntax
2059 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2060 else
2061 allow_naked_reg = (ask_naked_reg < 0);
2062
2063 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2064
2065 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2066 identifier_chars['$'] = intel_syntax ? '$' : 0;
2067 register_prefix = allow_naked_reg ? "" : "%";
2068 }
2069
2070 static void
2071 set_intel_mnemonic (int mnemonic_flag)
2072 {
2073 intel_mnemonic = mnemonic_flag;
2074 }
2075
2076 static void
2077 set_allow_index_reg (int flag)
2078 {
2079 allow_index_reg = flag;
2080 }
2081
2082 static void
2083 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2084 {
2085 SKIP_WHITESPACE ();
2086
2087 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2088 {
2089 char *string = input_line_pointer;
2090 int e = get_symbol_end ();
2091
2092 if (strcmp (string, "none") == 0)
2093 sse_check = sse_check_none;
2094 else if (strcmp (string, "warning") == 0)
2095 sse_check = sse_check_warning;
2096 else if (strcmp (string, "error") == 0)
2097 sse_check = sse_check_error;
2098 else
2099 as_bad (_("bad argument to sse_check directive."));
2100 *input_line_pointer = e;
2101 }
2102 else
2103 as_bad (_("missing argument for sse_check directive"));
2104
2105 demand_empty_rest_of_line ();
2106 }
2107
2108 static void
2109 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2110 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2111 {
2112 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2113 static const char *arch;
2114
2115 /* Intel LIOM is only supported on ELF. */
2116 if (!IS_ELF)
2117 return;
2118
2119 if (!arch)
2120 {
2121 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2122 use default_arch. */
2123 arch = cpu_arch_name;
2124 if (!arch)
2125 arch = default_arch;
2126 }
2127
2128 /* If we are targeting Intel L1OM, we must enable it. */
2129 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2130 || new_flag.bitfield.cpul1om)
2131 return;
2132
2133 /* If we are targeting Intel K1OM, we must enable it. */
2134 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2135 || new_flag.bitfield.cpuk1om)
2136 return;
2137
2138 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2139 #endif
2140 }
2141
2142 static void
2143 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2144 {
2145 SKIP_WHITESPACE ();
2146
2147 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2148 {
2149 char *string = input_line_pointer;
2150 int e = get_symbol_end ();
2151 unsigned int j;
2152 i386_cpu_flags flags;
2153
2154 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2155 {
2156 if (strcmp (string, cpu_arch[j].name) == 0)
2157 {
2158 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2159
2160 if (*string != '.')
2161 {
2162 cpu_arch_name = cpu_arch[j].name;
2163 cpu_sub_arch_name = NULL;
2164 cpu_arch_flags = cpu_arch[j].flags;
2165 if (flag_code == CODE_64BIT)
2166 {
2167 cpu_arch_flags.bitfield.cpu64 = 1;
2168 cpu_arch_flags.bitfield.cpuno64 = 0;
2169 }
2170 else
2171 {
2172 cpu_arch_flags.bitfield.cpu64 = 0;
2173 cpu_arch_flags.bitfield.cpuno64 = 1;
2174 }
2175 cpu_arch_isa = cpu_arch[j].type;
2176 cpu_arch_isa_flags = cpu_arch[j].flags;
2177 if (!cpu_arch_tune_set)
2178 {
2179 cpu_arch_tune = cpu_arch_isa;
2180 cpu_arch_tune_flags = cpu_arch_isa_flags;
2181 }
2182 break;
2183 }
2184
2185 if (!cpu_arch[j].negated)
2186 flags = cpu_flags_or (cpu_arch_flags,
2187 cpu_arch[j].flags);
2188 else
2189 flags = cpu_flags_and_not (cpu_arch_flags,
2190 cpu_arch[j].flags);
2191 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2192 {
2193 if (cpu_sub_arch_name)
2194 {
2195 char *name = cpu_sub_arch_name;
2196 cpu_sub_arch_name = concat (name,
2197 cpu_arch[j].name,
2198 (const char *) NULL);
2199 free (name);
2200 }
2201 else
2202 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2203 cpu_arch_flags = flags;
2204 cpu_arch_isa_flags = flags;
2205 }
2206 *input_line_pointer = e;
2207 demand_empty_rest_of_line ();
2208 return;
2209 }
2210 }
2211 if (j >= ARRAY_SIZE (cpu_arch))
2212 as_bad (_("no such architecture: `%s'"), string);
2213
2214 *input_line_pointer = e;
2215 }
2216 else
2217 as_bad (_("missing cpu architecture"));
2218
2219 no_cond_jump_promotion = 0;
2220 if (*input_line_pointer == ','
2221 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2222 {
2223 char *string = ++input_line_pointer;
2224 int e = get_symbol_end ();
2225
2226 if (strcmp (string, "nojumps") == 0)
2227 no_cond_jump_promotion = 1;
2228 else if (strcmp (string, "jumps") == 0)
2229 ;
2230 else
2231 as_bad (_("no such architecture modifier: `%s'"), string);
2232
2233 *input_line_pointer = e;
2234 }
2235
2236 demand_empty_rest_of_line ();
2237 }
2238
2239 enum bfd_architecture
2240 i386_arch (void)
2241 {
2242 if (cpu_arch_isa == PROCESSOR_L1OM)
2243 {
2244 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2245 || flag_code != CODE_64BIT)
2246 as_fatal (_("Intel L1OM is 64bit ELF only"));
2247 return bfd_arch_l1om;
2248 }
2249 else if (cpu_arch_isa == PROCESSOR_K1OM)
2250 {
2251 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2252 || flag_code != CODE_64BIT)
2253 as_fatal (_("Intel K1OM is 64bit ELF only"));
2254 return bfd_arch_k1om;
2255 }
2256 else
2257 return bfd_arch_i386;
2258 }
2259
2260 unsigned long
2261 i386_mach (void)
2262 {
2263 if (!strncmp (default_arch, "x86_64", 6))
2264 {
2265 if (cpu_arch_isa == PROCESSOR_L1OM)
2266 {
2267 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2268 || default_arch[6] != '\0')
2269 as_fatal (_("Intel L1OM is 64bit ELF only"));
2270 return bfd_mach_l1om;
2271 }
2272 else if (cpu_arch_isa == PROCESSOR_K1OM)
2273 {
2274 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2275 || default_arch[6] != '\0')
2276 as_fatal (_("Intel K1OM is 64bit ELF only"));
2277 return bfd_mach_k1om;
2278 }
2279 else if (default_arch[6] == '\0')
2280 return bfd_mach_x86_64;
2281 else
2282 return bfd_mach_x64_32;
2283 }
2284 else if (!strcmp (default_arch, "i386"))
2285 return bfd_mach_i386_i386;
2286 else
2287 as_fatal (_("unknown architecture"));
2288 }
2289 \f
2290 void
2291 md_begin (void)
2292 {
2293 const char *hash_err;
2294
2295 /* Initialize op_hash hash table. */
2296 op_hash = hash_new ();
2297
2298 {
2299 const insn_template *optab;
2300 templates *core_optab;
2301
2302 /* Setup for loop. */
2303 optab = i386_optab;
2304 core_optab = (templates *) xmalloc (sizeof (templates));
2305 core_optab->start = optab;
2306
2307 while (1)
2308 {
2309 ++optab;
2310 if (optab->name == NULL
2311 || strcmp (optab->name, (optab - 1)->name) != 0)
2312 {
2313 /* different name --> ship out current template list;
2314 add to hash table; & begin anew. */
2315 core_optab->end = optab;
2316 hash_err = hash_insert (op_hash,
2317 (optab - 1)->name,
2318 (void *) core_optab);
2319 if (hash_err)
2320 {
2321 as_fatal (_("internal Error: Can't hash %s: %s"),
2322 (optab - 1)->name,
2323 hash_err);
2324 }
2325 if (optab->name == NULL)
2326 break;
2327 core_optab = (templates *) xmalloc (sizeof (templates));
2328 core_optab->start = optab;
2329 }
2330 }
2331 }
2332
2333 /* Initialize reg_hash hash table. */
2334 reg_hash = hash_new ();
2335 {
2336 const reg_entry *regtab;
2337 unsigned int regtab_size = i386_regtab_size;
2338
2339 for (regtab = i386_regtab; regtab_size--; regtab++)
2340 {
2341 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2342 if (hash_err)
2343 as_fatal (_("internal Error: Can't hash %s: %s"),
2344 regtab->reg_name,
2345 hash_err);
2346 }
2347 }
2348
2349 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2350 {
2351 int c;
2352 char *p;
2353
2354 for (c = 0; c < 256; c++)
2355 {
2356 if (ISDIGIT (c))
2357 {
2358 digit_chars[c] = c;
2359 mnemonic_chars[c] = c;
2360 register_chars[c] = c;
2361 operand_chars[c] = c;
2362 }
2363 else if (ISLOWER (c))
2364 {
2365 mnemonic_chars[c] = c;
2366 register_chars[c] = c;
2367 operand_chars[c] = c;
2368 }
2369 else if (ISUPPER (c))
2370 {
2371 mnemonic_chars[c] = TOLOWER (c);
2372 register_chars[c] = mnemonic_chars[c];
2373 operand_chars[c] = c;
2374 }
2375
2376 if (ISALPHA (c) || ISDIGIT (c))
2377 identifier_chars[c] = c;
2378 else if (c >= 128)
2379 {
2380 identifier_chars[c] = c;
2381 operand_chars[c] = c;
2382 }
2383 }
2384
2385 #ifdef LEX_AT
2386 identifier_chars['@'] = '@';
2387 #endif
2388 #ifdef LEX_QM
2389 identifier_chars['?'] = '?';
2390 operand_chars['?'] = '?';
2391 #endif
2392 digit_chars['-'] = '-';
2393 mnemonic_chars['_'] = '_';
2394 mnemonic_chars['-'] = '-';
2395 mnemonic_chars['.'] = '.';
2396 identifier_chars['_'] = '_';
2397 identifier_chars['.'] = '.';
2398
2399 for (p = operand_special_chars; *p != '\0'; p++)
2400 operand_chars[(unsigned char) *p] = *p;
2401 }
2402
2403 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2404 if (IS_ELF)
2405 {
2406 record_alignment (text_section, 2);
2407 record_alignment (data_section, 2);
2408 record_alignment (bss_section, 2);
2409 }
2410 #endif
2411
2412 if (flag_code == CODE_64BIT)
2413 {
2414 #if defined (OBJ_COFF) && defined (TE_PE)
2415 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2416 ? 32 : 16);
2417 #else
2418 x86_dwarf2_return_column = 16;
2419 #endif
2420 x86_cie_data_alignment = -8;
2421 }
2422 else
2423 {
2424 x86_dwarf2_return_column = 8;
2425 x86_cie_data_alignment = -4;
2426 }
2427 }
2428
2429 void
2430 i386_print_statistics (FILE *file)
2431 {
2432 hash_print_statistics (file, "i386 opcode", op_hash);
2433 hash_print_statistics (file, "i386 register", reg_hash);
2434 }
2435 \f
2436 #ifdef DEBUG386
2437
2438 /* Debugging routines for md_assemble. */
2439 static void pte (insn_template *);
2440 static void pt (i386_operand_type);
2441 static void pe (expressionS *);
2442 static void ps (symbolS *);
2443
2444 static void
2445 pi (char *line, i386_insn *x)
2446 {
2447 unsigned int j;
2448
2449 fprintf (stdout, "%s: template ", line);
2450 pte (&x->tm);
2451 fprintf (stdout, " address: base %s index %s scale %x\n",
2452 x->base_reg ? x->base_reg->reg_name : "none",
2453 x->index_reg ? x->index_reg->reg_name : "none",
2454 x->log2_scale_factor);
2455 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2456 x->rm.mode, x->rm.reg, x->rm.regmem);
2457 fprintf (stdout, " sib: base %x index %x scale %x\n",
2458 x->sib.base, x->sib.index, x->sib.scale);
2459 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2460 (x->rex & REX_W) != 0,
2461 (x->rex & REX_R) != 0,
2462 (x->rex & REX_X) != 0,
2463 (x->rex & REX_B) != 0);
2464 for (j = 0; j < x->operands; j++)
2465 {
2466 fprintf (stdout, " #%d: ", j + 1);
2467 pt (x->types[j]);
2468 fprintf (stdout, "\n");
2469 if (x->types[j].bitfield.reg8
2470 || x->types[j].bitfield.reg16
2471 || x->types[j].bitfield.reg32
2472 || x->types[j].bitfield.reg64
2473 || x->types[j].bitfield.regmmx
2474 || x->types[j].bitfield.regxmm
2475 || x->types[j].bitfield.regymm
2476 || x->types[j].bitfield.sreg2
2477 || x->types[j].bitfield.sreg3
2478 || x->types[j].bitfield.control
2479 || x->types[j].bitfield.debug
2480 || x->types[j].bitfield.test)
2481 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2482 if (operand_type_check (x->types[j], imm))
2483 pe (x->op[j].imms);
2484 if (operand_type_check (x->types[j], disp))
2485 pe (x->op[j].disps);
2486 }
2487 }
2488
2489 static void
2490 pte (insn_template *t)
2491 {
2492 unsigned int j;
2493 fprintf (stdout, " %d operands ", t->operands);
2494 fprintf (stdout, "opcode %x ", t->base_opcode);
2495 if (t->extension_opcode != None)
2496 fprintf (stdout, "ext %x ", t->extension_opcode);
2497 if (t->opcode_modifier.d)
2498 fprintf (stdout, "D");
2499 if (t->opcode_modifier.w)
2500 fprintf (stdout, "W");
2501 fprintf (stdout, "\n");
2502 for (j = 0; j < t->operands; j++)
2503 {
2504 fprintf (stdout, " #%d type ", j + 1);
2505 pt (t->operand_types[j]);
2506 fprintf (stdout, "\n");
2507 }
2508 }
2509
2510 static void
2511 pe (expressionS *e)
2512 {
2513 fprintf (stdout, " operation %d\n", e->X_op);
2514 fprintf (stdout, " add_number %ld (%lx)\n",
2515 (long) e->X_add_number, (long) e->X_add_number);
2516 if (e->X_add_symbol)
2517 {
2518 fprintf (stdout, " add_symbol ");
2519 ps (e->X_add_symbol);
2520 fprintf (stdout, "\n");
2521 }
2522 if (e->X_op_symbol)
2523 {
2524 fprintf (stdout, " op_symbol ");
2525 ps (e->X_op_symbol);
2526 fprintf (stdout, "\n");
2527 }
2528 }
2529
2530 static void
2531 ps (symbolS *s)
2532 {
2533 fprintf (stdout, "%s type %s%s",
2534 S_GET_NAME (s),
2535 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2536 segment_name (S_GET_SEGMENT (s)));
2537 }
2538
2539 static struct type_name
2540 {
2541 i386_operand_type mask;
2542 const char *name;
2543 }
2544 const type_names[] =
2545 {
2546 { OPERAND_TYPE_REG8, "r8" },
2547 { OPERAND_TYPE_REG16, "r16" },
2548 { OPERAND_TYPE_REG32, "r32" },
2549 { OPERAND_TYPE_REG64, "r64" },
2550 { OPERAND_TYPE_IMM8, "i8" },
2551 { OPERAND_TYPE_IMM8, "i8s" },
2552 { OPERAND_TYPE_IMM16, "i16" },
2553 { OPERAND_TYPE_IMM32, "i32" },
2554 { OPERAND_TYPE_IMM32S, "i32s" },
2555 { OPERAND_TYPE_IMM64, "i64" },
2556 { OPERAND_TYPE_IMM1, "i1" },
2557 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2558 { OPERAND_TYPE_DISP8, "d8" },
2559 { OPERAND_TYPE_DISP16, "d16" },
2560 { OPERAND_TYPE_DISP32, "d32" },
2561 { OPERAND_TYPE_DISP32S, "d32s" },
2562 { OPERAND_TYPE_DISP64, "d64" },
2563 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2564 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2565 { OPERAND_TYPE_CONTROL, "control reg" },
2566 { OPERAND_TYPE_TEST, "test reg" },
2567 { OPERAND_TYPE_DEBUG, "debug reg" },
2568 { OPERAND_TYPE_FLOATREG, "FReg" },
2569 { OPERAND_TYPE_FLOATACC, "FAcc" },
2570 { OPERAND_TYPE_SREG2, "SReg2" },
2571 { OPERAND_TYPE_SREG3, "SReg3" },
2572 { OPERAND_TYPE_ACC, "Acc" },
2573 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2574 { OPERAND_TYPE_REGMMX, "rMMX" },
2575 { OPERAND_TYPE_REGXMM, "rXMM" },
2576 { OPERAND_TYPE_REGYMM, "rYMM" },
2577 { OPERAND_TYPE_ESSEG, "es" },
2578 };
2579
2580 static void
2581 pt (i386_operand_type t)
2582 {
2583 unsigned int j;
2584 i386_operand_type a;
2585
2586 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2587 {
2588 a = operand_type_and (t, type_names[j].mask);
2589 if (!operand_type_all_zero (&a))
2590 fprintf (stdout, "%s, ", type_names[j].name);
2591 }
2592 fflush (stdout);
2593 }
2594
2595 #endif /* DEBUG386 */
2596 \f
2597 static bfd_reloc_code_real_type
2598 reloc (unsigned int size,
2599 int pcrel,
2600 int sign,
2601 bfd_reloc_code_real_type other)
2602 {
2603 if (other != NO_RELOC)
2604 {
2605 reloc_howto_type *rel;
2606
2607 if (size == 8)
2608 switch (other)
2609 {
2610 case BFD_RELOC_X86_64_GOT32:
2611 return BFD_RELOC_X86_64_GOT64;
2612 break;
2613 case BFD_RELOC_X86_64_PLTOFF64:
2614 return BFD_RELOC_X86_64_PLTOFF64;
2615 break;
2616 case BFD_RELOC_X86_64_GOTPC32:
2617 other = BFD_RELOC_X86_64_GOTPC64;
2618 break;
2619 case BFD_RELOC_X86_64_GOTPCREL:
2620 other = BFD_RELOC_X86_64_GOTPCREL64;
2621 break;
2622 case BFD_RELOC_X86_64_TPOFF32:
2623 other = BFD_RELOC_X86_64_TPOFF64;
2624 break;
2625 case BFD_RELOC_X86_64_DTPOFF32:
2626 other = BFD_RELOC_X86_64_DTPOFF64;
2627 break;
2628 default:
2629 break;
2630 }
2631
2632 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2633 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2634 sign = -1;
2635
2636 rel = bfd_reloc_type_lookup (stdoutput, other);
2637 if (!rel)
2638 as_bad (_("unknown relocation (%u)"), other);
2639 else if (size != bfd_get_reloc_size (rel))
2640 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2641 bfd_get_reloc_size (rel),
2642 size);
2643 else if (pcrel && !rel->pc_relative)
2644 as_bad (_("non-pc-relative relocation for pc-relative field"));
2645 else if ((rel->complain_on_overflow == complain_overflow_signed
2646 && !sign)
2647 || (rel->complain_on_overflow == complain_overflow_unsigned
2648 && sign > 0))
2649 as_bad (_("relocated field and relocation type differ in signedness"));
2650 else
2651 return other;
2652 return NO_RELOC;
2653 }
2654
2655 if (pcrel)
2656 {
2657 if (!sign)
2658 as_bad (_("there are no unsigned pc-relative relocations"));
2659 switch (size)
2660 {
2661 case 1: return BFD_RELOC_8_PCREL;
2662 case 2: return BFD_RELOC_16_PCREL;
2663 case 4: return BFD_RELOC_32_PCREL;
2664 case 8: return BFD_RELOC_64_PCREL;
2665 }
2666 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2667 }
2668 else
2669 {
2670 if (sign > 0)
2671 switch (size)
2672 {
2673 case 4: return BFD_RELOC_X86_64_32S;
2674 }
2675 else
2676 switch (size)
2677 {
2678 case 1: return BFD_RELOC_8;
2679 case 2: return BFD_RELOC_16;
2680 case 4: return BFD_RELOC_32;
2681 case 8: return BFD_RELOC_64;
2682 }
2683 as_bad (_("cannot do %s %u byte relocation"),
2684 sign > 0 ? "signed" : "unsigned", size);
2685 }
2686
2687 return NO_RELOC;
2688 }
2689
2690 /* Here we decide which fixups can be adjusted to make them relative to
2691 the beginning of the section instead of the symbol. Basically we need
2692 to make sure that the dynamic relocations are done correctly, so in
2693 some cases we force the original symbol to be used. */
2694
2695 int
2696 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2697 {
2698 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2699 if (!IS_ELF)
2700 return 1;
2701
2702 /* Don't adjust pc-relative references to merge sections in 64-bit
2703 mode. */
2704 if (use_rela_relocations
2705 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2706 && fixP->fx_pcrel)
2707 return 0;
2708
2709 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2710 and changed later by validate_fix. */
2711 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2712 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2713 return 0;
2714
2715 /* adjust_reloc_syms doesn't know about the GOT. */
2716 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2717 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2718 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2719 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2720 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2721 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2722 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2723 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2724 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2725 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2726 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2727 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2728 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2729 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2730 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2731 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2732 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2733 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2734 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2735 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2736 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2737 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2738 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2739 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2740 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2741 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2742 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2743 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2744 return 0;
2745 #endif
2746 return 1;
2747 }
2748
2749 static int
2750 intel_float_operand (const char *mnemonic)
2751 {
2752 /* Note that the value returned is meaningful only for opcodes with (memory)
2753 operands, hence the code here is free to improperly handle opcodes that
2754 have no operands (for better performance and smaller code). */
2755
2756 if (mnemonic[0] != 'f')
2757 return 0; /* non-math */
2758
2759 switch (mnemonic[1])
2760 {
2761 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2762 the fs segment override prefix not currently handled because no
2763 call path can make opcodes without operands get here */
2764 case 'i':
2765 return 2 /* integer op */;
2766 case 'l':
2767 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2768 return 3; /* fldcw/fldenv */
2769 break;
2770 case 'n':
2771 if (mnemonic[2] != 'o' /* fnop */)
2772 return 3; /* non-waiting control op */
2773 break;
2774 case 'r':
2775 if (mnemonic[2] == 's')
2776 return 3; /* frstor/frstpm */
2777 break;
2778 case 's':
2779 if (mnemonic[2] == 'a')
2780 return 3; /* fsave */
2781 if (mnemonic[2] == 't')
2782 {
2783 switch (mnemonic[3])
2784 {
2785 case 'c': /* fstcw */
2786 case 'd': /* fstdw */
2787 case 'e': /* fstenv */
2788 case 's': /* fsts[gw] */
2789 return 3;
2790 }
2791 }
2792 break;
2793 case 'x':
2794 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2795 return 0; /* fxsave/fxrstor are not really math ops */
2796 break;
2797 }
2798
2799 return 1;
2800 }
2801
2802 /* Build the VEX prefix. */
2803
2804 static void
2805 build_vex_prefix (const insn_template *t)
2806 {
2807 unsigned int register_specifier;
2808 unsigned int implied_prefix;
2809 unsigned int vector_length;
2810
2811 /* Check register specifier. */
2812 if (i.vex.register_specifier)
2813 {
2814 register_specifier = i.vex.register_specifier->reg_num;
2815 if ((i.vex.register_specifier->reg_flags & RegRex))
2816 register_specifier += 8;
2817 register_specifier = ~register_specifier & 0xf;
2818 }
2819 else
2820 register_specifier = 0xf;
2821
2822 /* Use 2-byte VEX prefix by swappping destination and source
2823 operand. */
2824 if (!i.swap_operand
2825 && i.operands == i.reg_operands
2826 && i.tm.opcode_modifier.vexopcode == VEX0F
2827 && i.tm.opcode_modifier.s
2828 && i.rex == REX_B)
2829 {
2830 unsigned int xchg = i.operands - 1;
2831 union i386_op temp_op;
2832 i386_operand_type temp_type;
2833
2834 temp_type = i.types[xchg];
2835 i.types[xchg] = i.types[0];
2836 i.types[0] = temp_type;
2837 temp_op = i.op[xchg];
2838 i.op[xchg] = i.op[0];
2839 i.op[0] = temp_op;
2840
2841 gas_assert (i.rm.mode == 3);
2842
2843 i.rex = REX_R;
2844 xchg = i.rm.regmem;
2845 i.rm.regmem = i.rm.reg;
2846 i.rm.reg = xchg;
2847
2848 /* Use the next insn. */
2849 i.tm = t[1];
2850 }
2851
2852 if (i.tm.opcode_modifier.vex == VEXScalar)
2853 vector_length = avxscalar;
2854 else
2855 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2856
2857 switch ((i.tm.base_opcode >> 8) & 0xff)
2858 {
2859 case 0:
2860 implied_prefix = 0;
2861 break;
2862 case DATA_PREFIX_OPCODE:
2863 implied_prefix = 1;
2864 break;
2865 case REPE_PREFIX_OPCODE:
2866 implied_prefix = 2;
2867 break;
2868 case REPNE_PREFIX_OPCODE:
2869 implied_prefix = 3;
2870 break;
2871 default:
2872 abort ();
2873 }
2874
2875 /* Use 2-byte VEX prefix if possible. */
2876 if (i.tm.opcode_modifier.vexopcode == VEX0F
2877 && i.tm.opcode_modifier.vexw != VEXW1
2878 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2879 {
2880 /* 2-byte VEX prefix. */
2881 unsigned int r;
2882
2883 i.vex.length = 2;
2884 i.vex.bytes[0] = 0xc5;
2885
2886 /* Check the REX.R bit. */
2887 r = (i.rex & REX_R) ? 0 : 1;
2888 i.vex.bytes[1] = (r << 7
2889 | register_specifier << 3
2890 | vector_length << 2
2891 | implied_prefix);
2892 }
2893 else
2894 {
2895 /* 3-byte VEX prefix. */
2896 unsigned int m, w;
2897
2898 i.vex.length = 3;
2899
2900 switch (i.tm.opcode_modifier.vexopcode)
2901 {
2902 case VEX0F:
2903 m = 0x1;
2904 i.vex.bytes[0] = 0xc4;
2905 break;
2906 case VEX0F38:
2907 m = 0x2;
2908 i.vex.bytes[0] = 0xc4;
2909 break;
2910 case VEX0F3A:
2911 m = 0x3;
2912 i.vex.bytes[0] = 0xc4;
2913 break;
2914 case XOP08:
2915 m = 0x8;
2916 i.vex.bytes[0] = 0x8f;
2917 break;
2918 case XOP09:
2919 m = 0x9;
2920 i.vex.bytes[0] = 0x8f;
2921 break;
2922 case XOP0A:
2923 m = 0xa;
2924 i.vex.bytes[0] = 0x8f;
2925 break;
2926 default:
2927 abort ();
2928 }
2929
2930 /* The high 3 bits of the second VEX byte are 1's compliment
2931 of RXB bits from REX. */
2932 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2933
2934 /* Check the REX.W bit. */
2935 w = (i.rex & REX_W) ? 1 : 0;
2936 if (i.tm.opcode_modifier.vexw)
2937 {
2938 if (w)
2939 abort ();
2940
2941 if (i.tm.opcode_modifier.vexw == VEXW1)
2942 w = 1;
2943 }
2944
2945 i.vex.bytes[2] = (w << 7
2946 | register_specifier << 3
2947 | vector_length << 2
2948 | implied_prefix);
2949 }
2950 }
2951
2952 static void
2953 process_immext (void)
2954 {
2955 expressionS *exp;
2956
2957 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2958 {
2959 /* SSE3 Instructions have the fixed operands with an opcode
2960 suffix which is coded in the same place as an 8-bit immediate
2961 field would be. Here we check those operands and remove them
2962 afterwards. */
2963 unsigned int x;
2964
2965 for (x = 0; x < i.operands; x++)
2966 if (i.op[x].regs->reg_num != x)
2967 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2968 register_prefix, i.op[x].regs->reg_name, x + 1,
2969 i.tm.name);
2970
2971 i.operands = 0;
2972 }
2973
2974 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2975 which is coded in the same place as an 8-bit immediate field
2976 would be. Here we fake an 8-bit immediate operand from the
2977 opcode suffix stored in tm.extension_opcode.
2978
2979 AVX instructions also use this encoding, for some of
2980 3 argument instructions. */
2981
2982 gas_assert (i.imm_operands == 0
2983 && (i.operands <= 2
2984 || (i.tm.opcode_modifier.vex
2985 && i.operands <= 4)));
2986
2987 exp = &im_expressions[i.imm_operands++];
2988 i.op[i.operands].imms = exp;
2989 i.types[i.operands] = imm8;
2990 i.operands++;
2991 exp->X_op = O_constant;
2992 exp->X_add_number = i.tm.extension_opcode;
2993 i.tm.extension_opcode = None;
2994 }
2995
2996 /* This is the guts of the machine-dependent assembler. LINE points to a
2997 machine dependent instruction. This function is supposed to emit
2998 the frags/bytes it assembles to. */
2999
3000 void
3001 md_assemble (char *line)
3002 {
3003 unsigned int j;
3004 char mnemonic[MAX_MNEM_SIZE];
3005 const insn_template *t;
3006
3007 /* Initialize globals. */
3008 memset (&i, '\0', sizeof (i));
3009 for (j = 0; j < MAX_OPERANDS; j++)
3010 i.reloc[j] = NO_RELOC;
3011 memset (disp_expressions, '\0', sizeof (disp_expressions));
3012 memset (im_expressions, '\0', sizeof (im_expressions));
3013 save_stack_p = save_stack;
3014
3015 /* First parse an instruction mnemonic & call i386_operand for the operands.
3016 We assume that the scrubber has arranged it so that line[0] is the valid
3017 start of a (possibly prefixed) mnemonic. */
3018
3019 line = parse_insn (line, mnemonic);
3020 if (line == NULL)
3021 return;
3022
3023 line = parse_operands (line, mnemonic);
3024 this_operand = -1;
3025 if (line == NULL)
3026 return;
3027
3028 /* Now we've parsed the mnemonic into a set of templates, and have the
3029 operands at hand. */
3030
3031 /* All intel opcodes have reversed operands except for "bound" and
3032 "enter". We also don't reverse intersegment "jmp" and "call"
3033 instructions with 2 immediate operands so that the immediate segment
3034 precedes the offset, as it does when in AT&T mode. */
3035 if (intel_syntax
3036 && i.operands > 1
3037 && (strcmp (mnemonic, "bound") != 0)
3038 && (strcmp (mnemonic, "invlpga") != 0)
3039 && !(operand_type_check (i.types[0], imm)
3040 && operand_type_check (i.types[1], imm)))
3041 swap_operands ();
3042
3043 /* The order of the immediates should be reversed
3044 for 2 immediates extrq and insertq instructions */
3045 if (i.imm_operands == 2
3046 && (strcmp (mnemonic, "extrq") == 0
3047 || strcmp (mnemonic, "insertq") == 0))
3048 swap_2_operands (0, 1);
3049
3050 if (i.imm_operands)
3051 optimize_imm ();
3052
3053 /* Don't optimize displacement for movabs since it only takes 64bit
3054 displacement. */
3055 if (i.disp_operands
3056 && !i.disp32_encoding
3057 && (flag_code != CODE_64BIT
3058 || strcmp (mnemonic, "movabs") != 0))
3059 optimize_disp ();
3060
3061 /* Next, we find a template that matches the given insn,
3062 making sure the overlap of the given operands types is consistent
3063 with the template operand types. */
3064
3065 if (!(t = match_template ()))
3066 return;
3067
3068 if (sse_check != sse_check_none
3069 && !i.tm.opcode_modifier.noavx
3070 && (i.tm.cpu_flags.bitfield.cpusse
3071 || i.tm.cpu_flags.bitfield.cpusse2
3072 || i.tm.cpu_flags.bitfield.cpusse3
3073 || i.tm.cpu_flags.bitfield.cpussse3
3074 || i.tm.cpu_flags.bitfield.cpusse4_1
3075 || i.tm.cpu_flags.bitfield.cpusse4_2))
3076 {
3077 (sse_check == sse_check_warning
3078 ? as_warn
3079 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3080 }
3081
3082 /* Zap movzx and movsx suffix. The suffix has been set from
3083 "word ptr" or "byte ptr" on the source operand in Intel syntax
3084 or extracted from mnemonic in AT&T syntax. But we'll use
3085 the destination register to choose the suffix for encoding. */
3086 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3087 {
3088 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3089 there is no suffix, the default will be byte extension. */
3090 if (i.reg_operands != 2
3091 && !i.suffix
3092 && intel_syntax)
3093 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3094
3095 i.suffix = 0;
3096 }
3097
3098 if (i.tm.opcode_modifier.fwait)
3099 if (!add_prefix (FWAIT_OPCODE))
3100 return;
3101
3102 /* Check for lock without a lockable instruction. Destination operand
3103 must be memory unless it is xchg (0x86). */
3104 if (i.prefix[LOCK_PREFIX]
3105 && (!i.tm.opcode_modifier.islockable
3106 || i.mem_operands == 0
3107 || (i.tm.base_opcode != 0x86
3108 && !operand_type_check (i.types[i.operands - 1], anymem))))
3109 {
3110 as_bad (_("expecting lockable instruction after `lock'"));
3111 return;
3112 }
3113
3114 /* Check string instruction segment overrides. */
3115 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3116 {
3117 if (!check_string ())
3118 return;
3119 i.disp_operands = 0;
3120 }
3121
3122 if (!process_suffix ())
3123 return;
3124
3125 /* Update operand types. */
3126 for (j = 0; j < i.operands; j++)
3127 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3128
3129 /* Make still unresolved immediate matches conform to size of immediate
3130 given in i.suffix. */
3131 if (!finalize_imm ())
3132 return;
3133
3134 if (i.types[0].bitfield.imm1)
3135 i.imm_operands = 0; /* kludge for shift insns. */
3136
3137 /* We only need to check those implicit registers for instructions
3138 with 3 operands or less. */
3139 if (i.operands <= 3)
3140 for (j = 0; j < i.operands; j++)
3141 if (i.types[j].bitfield.inoutportreg
3142 || i.types[j].bitfield.shiftcount
3143 || i.types[j].bitfield.acc
3144 || i.types[j].bitfield.floatacc)
3145 i.reg_operands--;
3146
3147 /* ImmExt should be processed after SSE2AVX. */
3148 if (!i.tm.opcode_modifier.sse2avx
3149 && i.tm.opcode_modifier.immext)
3150 process_immext ();
3151
3152 /* For insns with operands there are more diddles to do to the opcode. */
3153 if (i.operands)
3154 {
3155 if (!process_operands ())
3156 return;
3157 }
3158 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3159 {
3160 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3161 as_warn (_("translating to `%sp'"), i.tm.name);
3162 }
3163
3164 if (i.tm.opcode_modifier.vex)
3165 build_vex_prefix (t);
3166
3167 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3168 instructions may define INT_OPCODE as well, so avoid this corner
3169 case for those instructions that use MODRM. */
3170 if (i.tm.base_opcode == INT_OPCODE
3171 && !i.tm.opcode_modifier.modrm
3172 && i.op[0].imms->X_add_number == 3)
3173 {
3174 i.tm.base_opcode = INT3_OPCODE;
3175 i.imm_operands = 0;
3176 }
3177
3178 if ((i.tm.opcode_modifier.jump
3179 || i.tm.opcode_modifier.jumpbyte
3180 || i.tm.opcode_modifier.jumpdword)
3181 && i.op[0].disps->X_op == O_constant)
3182 {
3183 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3184 the absolute address given by the constant. Since ix86 jumps and
3185 calls are pc relative, we need to generate a reloc. */
3186 i.op[0].disps->X_add_symbol = &abs_symbol;
3187 i.op[0].disps->X_op = O_symbol;
3188 }
3189
3190 if (i.tm.opcode_modifier.rex64)
3191 i.rex |= REX_W;
3192
3193 /* For 8 bit registers we need an empty rex prefix. Also if the
3194 instruction already has a prefix, we need to convert old
3195 registers to new ones. */
3196
3197 if ((i.types[0].bitfield.reg8
3198 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3199 || (i.types[1].bitfield.reg8
3200 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3201 || ((i.types[0].bitfield.reg8
3202 || i.types[1].bitfield.reg8)
3203 && i.rex != 0))
3204 {
3205 int x;
3206
3207 i.rex |= REX_OPCODE;
3208 for (x = 0; x < 2; x++)
3209 {
3210 /* Look for 8 bit operand that uses old registers. */
3211 if (i.types[x].bitfield.reg8
3212 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3213 {
3214 /* In case it is "hi" register, give up. */
3215 if (i.op[x].regs->reg_num > 3)
3216 as_bad (_("can't encode register '%s%s' in an "
3217 "instruction requiring REX prefix."),
3218 register_prefix, i.op[x].regs->reg_name);
3219
3220 /* Otherwise it is equivalent to the extended register.
3221 Since the encoding doesn't change this is merely
3222 cosmetic cleanup for debug output. */
3223
3224 i.op[x].regs = i.op[x].regs + 8;
3225 }
3226 }
3227 }
3228
3229 if (i.rex != 0)
3230 add_prefix (REX_OPCODE | i.rex);
3231
3232 /* We are ready to output the insn. */
3233 output_insn ();
3234 }
3235
3236 static char *
3237 parse_insn (char *line, char *mnemonic)
3238 {
3239 char *l = line;
3240 char *token_start = l;
3241 char *mnem_p;
3242 int supported;
3243 const insn_template *t;
3244 char *dot_p = NULL;
3245
3246 /* Non-zero if we found a prefix only acceptable with string insns. */
3247 const char *expecting_string_instruction = NULL;
3248
3249 while (1)
3250 {
3251 mnem_p = mnemonic;
3252 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3253 {
3254 if (*mnem_p == '.')
3255 dot_p = mnem_p;
3256 mnem_p++;
3257 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3258 {
3259 as_bad (_("no such instruction: `%s'"), token_start);
3260 return NULL;
3261 }
3262 l++;
3263 }
3264 if (!is_space_char (*l)
3265 && *l != END_OF_INSN
3266 && (intel_syntax
3267 || (*l != PREFIX_SEPARATOR
3268 && *l != ',')))
3269 {
3270 as_bad (_("invalid character %s in mnemonic"),
3271 output_invalid (*l));
3272 return NULL;
3273 }
3274 if (token_start == l)
3275 {
3276 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3277 as_bad (_("expecting prefix; got nothing"));
3278 else
3279 as_bad (_("expecting mnemonic; got nothing"));
3280 return NULL;
3281 }
3282
3283 /* Look up instruction (or prefix) via hash table. */
3284 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3285
3286 if (*l != END_OF_INSN
3287 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3288 && current_templates
3289 && current_templates->start->opcode_modifier.isprefix)
3290 {
3291 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3292 {
3293 as_bad ((flag_code != CODE_64BIT
3294 ? _("`%s' is only supported in 64-bit mode")
3295 : _("`%s' is not supported in 64-bit mode")),
3296 current_templates->start->name);
3297 return NULL;
3298 }
3299 /* If we are in 16-bit mode, do not allow addr16 or data16.
3300 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3301 if ((current_templates->start->opcode_modifier.size16
3302 || current_templates->start->opcode_modifier.size32)
3303 && flag_code != CODE_64BIT
3304 && (current_templates->start->opcode_modifier.size32
3305 ^ (flag_code == CODE_16BIT)))
3306 {
3307 as_bad (_("redundant %s prefix"),
3308 current_templates->start->name);
3309 return NULL;
3310 }
3311 /* Add prefix, checking for repeated prefixes. */
3312 switch (add_prefix (current_templates->start->base_opcode))
3313 {
3314 case PREFIX_EXIST:
3315 return NULL;
3316 case PREFIX_REP:
3317 expecting_string_instruction = current_templates->start->name;
3318 break;
3319 default:
3320 break;
3321 }
3322 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3323 token_start = ++l;
3324 }
3325 else
3326 break;
3327 }
3328
3329 if (!current_templates)
3330 {
3331 /* Check if we should swap operand or force 32bit displacement in
3332 encoding. */
3333 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3334 i.swap_operand = 1;
3335 else if (mnem_p - 4 == dot_p
3336 && dot_p[1] == 'd'
3337 && dot_p[2] == '3'
3338 && dot_p[3] == '2')
3339 i.disp32_encoding = 1;
3340 else
3341 goto check_suffix;
3342 mnem_p = dot_p;
3343 *dot_p = '\0';
3344 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3345 }
3346
3347 if (!current_templates)
3348 {
3349 check_suffix:
3350 /* See if we can get a match by trimming off a suffix. */
3351 switch (mnem_p[-1])
3352 {
3353 case WORD_MNEM_SUFFIX:
3354 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3355 i.suffix = SHORT_MNEM_SUFFIX;
3356 else
3357 case BYTE_MNEM_SUFFIX:
3358 case QWORD_MNEM_SUFFIX:
3359 i.suffix = mnem_p[-1];
3360 mnem_p[-1] = '\0';
3361 current_templates = (const templates *) hash_find (op_hash,
3362 mnemonic);
3363 break;
3364 case SHORT_MNEM_SUFFIX:
3365 case LONG_MNEM_SUFFIX:
3366 if (!intel_syntax)
3367 {
3368 i.suffix = mnem_p[-1];
3369 mnem_p[-1] = '\0';
3370 current_templates = (const templates *) hash_find (op_hash,
3371 mnemonic);
3372 }
3373 break;
3374
3375 /* Intel Syntax. */
3376 case 'd':
3377 if (intel_syntax)
3378 {
3379 if (intel_float_operand (mnemonic) == 1)
3380 i.suffix = SHORT_MNEM_SUFFIX;
3381 else
3382 i.suffix = LONG_MNEM_SUFFIX;
3383 mnem_p[-1] = '\0';
3384 current_templates = (const templates *) hash_find (op_hash,
3385 mnemonic);
3386 }
3387 break;
3388 }
3389 if (!current_templates)
3390 {
3391 as_bad (_("no such instruction: `%s'"), token_start);
3392 return NULL;
3393 }
3394 }
3395
3396 if (current_templates->start->opcode_modifier.jump
3397 || current_templates->start->opcode_modifier.jumpbyte)
3398 {
3399 /* Check for a branch hint. We allow ",pt" and ",pn" for
3400 predict taken and predict not taken respectively.
3401 I'm not sure that branch hints actually do anything on loop
3402 and jcxz insns (JumpByte) for current Pentium4 chips. They
3403 may work in the future and it doesn't hurt to accept them
3404 now. */
3405 if (l[0] == ',' && l[1] == 'p')
3406 {
3407 if (l[2] == 't')
3408 {
3409 if (!add_prefix (DS_PREFIX_OPCODE))
3410 return NULL;
3411 l += 3;
3412 }
3413 else if (l[2] == 'n')
3414 {
3415 if (!add_prefix (CS_PREFIX_OPCODE))
3416 return NULL;
3417 l += 3;
3418 }
3419 }
3420 }
3421 /* Any other comma loses. */
3422 if (*l == ',')
3423 {
3424 as_bad (_("invalid character %s in mnemonic"),
3425 output_invalid (*l));
3426 return NULL;
3427 }
3428
3429 /* Check if instruction is supported on specified architecture. */
3430 supported = 0;
3431 for (t = current_templates->start; t < current_templates->end; ++t)
3432 {
3433 supported |= cpu_flags_match (t);
3434 if (supported == CPU_FLAGS_PERFECT_MATCH)
3435 goto skip;
3436 }
3437
3438 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3439 {
3440 as_bad (flag_code == CODE_64BIT
3441 ? _("`%s' is not supported in 64-bit mode")
3442 : _("`%s' is only supported in 64-bit mode"),
3443 current_templates->start->name);
3444 return NULL;
3445 }
3446 if (supported != CPU_FLAGS_PERFECT_MATCH)
3447 {
3448 as_bad (_("`%s' is not supported on `%s%s'"),
3449 current_templates->start->name,
3450 cpu_arch_name ? cpu_arch_name : default_arch,
3451 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3452 return NULL;
3453 }
3454
3455 skip:
3456 if (!cpu_arch_flags.bitfield.cpui386
3457 && (flag_code != CODE_16BIT))
3458 {
3459 as_warn (_("use .code16 to ensure correct addressing mode"));
3460 }
3461
3462 /* Check for rep/repne without a string instruction. */
3463 if (expecting_string_instruction)
3464 {
3465 static templates override;
3466
3467 for (t = current_templates->start; t < current_templates->end; ++t)
3468 if (t->opcode_modifier.isstring)
3469 break;
3470 if (t >= current_templates->end)
3471 {
3472 as_bad (_("expecting string instruction after `%s'"),
3473 expecting_string_instruction);
3474 return NULL;
3475 }
3476 for (override.start = t; t < current_templates->end; ++t)
3477 if (!t->opcode_modifier.isstring)
3478 break;
3479 override.end = t;
3480 current_templates = &override;
3481 }
3482
3483 return l;
3484 }
3485
3486 static char *
3487 parse_operands (char *l, const char *mnemonic)
3488 {
3489 char *token_start;
3490
3491 /* 1 if operand is pending after ','. */
3492 unsigned int expecting_operand = 0;
3493
3494 /* Non-zero if operand parens not balanced. */
3495 unsigned int paren_not_balanced;
3496
3497 while (*l != END_OF_INSN)
3498 {
3499 /* Skip optional white space before operand. */
3500 if (is_space_char (*l))
3501 ++l;
3502 if (!is_operand_char (*l) && *l != END_OF_INSN)
3503 {
3504 as_bad (_("invalid character %s before operand %d"),
3505 output_invalid (*l),
3506 i.operands + 1);
3507 return NULL;
3508 }
3509 token_start = l; /* after white space */
3510 paren_not_balanced = 0;
3511 while (paren_not_balanced || *l != ',')
3512 {
3513 if (*l == END_OF_INSN)
3514 {
3515 if (paren_not_balanced)
3516 {
3517 if (!intel_syntax)
3518 as_bad (_("unbalanced parenthesis in operand %d."),
3519 i.operands + 1);
3520 else
3521 as_bad (_("unbalanced brackets in operand %d."),
3522 i.operands + 1);
3523 return NULL;
3524 }
3525 else
3526 break; /* we are done */
3527 }
3528 else if (!is_operand_char (*l) && !is_space_char (*l))
3529 {
3530 as_bad (_("invalid character %s in operand %d"),
3531 output_invalid (*l),
3532 i.operands + 1);
3533 return NULL;
3534 }
3535 if (!intel_syntax)
3536 {
3537 if (*l == '(')
3538 ++paren_not_balanced;
3539 if (*l == ')')
3540 --paren_not_balanced;
3541 }
3542 else
3543 {
3544 if (*l == '[')
3545 ++paren_not_balanced;
3546 if (*l == ']')
3547 --paren_not_balanced;
3548 }
3549 l++;
3550 }
3551 if (l != token_start)
3552 { /* Yes, we've read in another operand. */
3553 unsigned int operand_ok;
3554 this_operand = i.operands++;
3555 i.types[this_operand].bitfield.unspecified = 1;
3556 if (i.operands > MAX_OPERANDS)
3557 {
3558 as_bad (_("spurious operands; (%d operands/instruction max)"),
3559 MAX_OPERANDS);
3560 return NULL;
3561 }
3562 /* Now parse operand adding info to 'i' as we go along. */
3563 END_STRING_AND_SAVE (l);
3564
3565 if (intel_syntax)
3566 operand_ok =
3567 i386_intel_operand (token_start,
3568 intel_float_operand (mnemonic));
3569 else
3570 operand_ok = i386_att_operand (token_start);
3571
3572 RESTORE_END_STRING (l);
3573 if (!operand_ok)
3574 return NULL;
3575 }
3576 else
3577 {
3578 if (expecting_operand)
3579 {
3580 expecting_operand_after_comma:
3581 as_bad (_("expecting operand after ','; got nothing"));
3582 return NULL;
3583 }
3584 if (*l == ',')
3585 {
3586 as_bad (_("expecting operand before ','; got nothing"));
3587 return NULL;
3588 }
3589 }
3590
3591 /* Now *l must be either ',' or END_OF_INSN. */
3592 if (*l == ',')
3593 {
3594 if (*++l == END_OF_INSN)
3595 {
3596 /* Just skip it, if it's \n complain. */
3597 goto expecting_operand_after_comma;
3598 }
3599 expecting_operand = 1;
3600 }
3601 }
3602 return l;
3603 }
3604
3605 static void
3606 swap_2_operands (int xchg1, int xchg2)
3607 {
3608 union i386_op temp_op;
3609 i386_operand_type temp_type;
3610 enum bfd_reloc_code_real temp_reloc;
3611
3612 temp_type = i.types[xchg2];
3613 i.types[xchg2] = i.types[xchg1];
3614 i.types[xchg1] = temp_type;
3615 temp_op = i.op[xchg2];
3616 i.op[xchg2] = i.op[xchg1];
3617 i.op[xchg1] = temp_op;
3618 temp_reloc = i.reloc[xchg2];
3619 i.reloc[xchg2] = i.reloc[xchg1];
3620 i.reloc[xchg1] = temp_reloc;
3621 }
3622
3623 static void
3624 swap_operands (void)
3625 {
3626 switch (i.operands)
3627 {
3628 case 5:
3629 case 4:
3630 swap_2_operands (1, i.operands - 2);
3631 case 3:
3632 case 2:
3633 swap_2_operands (0, i.operands - 1);
3634 break;
3635 default:
3636 abort ();
3637 }
3638
3639 if (i.mem_operands == 2)
3640 {
3641 const seg_entry *temp_seg;
3642 temp_seg = i.seg[0];
3643 i.seg[0] = i.seg[1];
3644 i.seg[1] = temp_seg;
3645 }
3646 }
3647
3648 /* Try to ensure constant immediates are represented in the smallest
3649 opcode possible. */
3650 static void
3651 optimize_imm (void)
3652 {
3653 char guess_suffix = 0;
3654 int op;
3655
3656 if (i.suffix)
3657 guess_suffix = i.suffix;
3658 else if (i.reg_operands)
3659 {
3660 /* Figure out a suffix from the last register operand specified.
3661 We can't do this properly yet, ie. excluding InOutPortReg,
3662 but the following works for instructions with immediates.
3663 In any case, we can't set i.suffix yet. */
3664 for (op = i.operands; --op >= 0;)
3665 if (i.types[op].bitfield.reg8)
3666 {
3667 guess_suffix = BYTE_MNEM_SUFFIX;
3668 break;
3669 }
3670 else if (i.types[op].bitfield.reg16)
3671 {
3672 guess_suffix = WORD_MNEM_SUFFIX;
3673 break;
3674 }
3675 else if (i.types[op].bitfield.reg32)
3676 {
3677 guess_suffix = LONG_MNEM_SUFFIX;
3678 break;
3679 }
3680 else if (i.types[op].bitfield.reg64)
3681 {
3682 guess_suffix = QWORD_MNEM_SUFFIX;
3683 break;
3684 }
3685 }
3686 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3687 guess_suffix = WORD_MNEM_SUFFIX;
3688
3689 for (op = i.operands; --op >= 0;)
3690 if (operand_type_check (i.types[op], imm))
3691 {
3692 switch (i.op[op].imms->X_op)
3693 {
3694 case O_constant:
3695 /* If a suffix is given, this operand may be shortened. */
3696 switch (guess_suffix)
3697 {
3698 case LONG_MNEM_SUFFIX:
3699 i.types[op].bitfield.imm32 = 1;
3700 i.types[op].bitfield.imm64 = 1;
3701 break;
3702 case WORD_MNEM_SUFFIX:
3703 i.types[op].bitfield.imm16 = 1;
3704 i.types[op].bitfield.imm32 = 1;
3705 i.types[op].bitfield.imm32s = 1;
3706 i.types[op].bitfield.imm64 = 1;
3707 break;
3708 case BYTE_MNEM_SUFFIX:
3709 i.types[op].bitfield.imm8 = 1;
3710 i.types[op].bitfield.imm8s = 1;
3711 i.types[op].bitfield.imm16 = 1;
3712 i.types[op].bitfield.imm32 = 1;
3713 i.types[op].bitfield.imm32s = 1;
3714 i.types[op].bitfield.imm64 = 1;
3715 break;
3716 }
3717
3718 /* If this operand is at most 16 bits, convert it
3719 to a signed 16 bit number before trying to see
3720 whether it will fit in an even smaller size.
3721 This allows a 16-bit operand such as $0xffe0 to
3722 be recognised as within Imm8S range. */
3723 if ((i.types[op].bitfield.imm16)
3724 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3725 {
3726 i.op[op].imms->X_add_number =
3727 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3728 }
3729 if ((i.types[op].bitfield.imm32)
3730 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3731 == 0))
3732 {
3733 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3734 ^ ((offsetT) 1 << 31))
3735 - ((offsetT) 1 << 31));
3736 }
3737 i.types[op]
3738 = operand_type_or (i.types[op],
3739 smallest_imm_type (i.op[op].imms->X_add_number));
3740
3741 /* We must avoid matching of Imm32 templates when 64bit
3742 only immediate is available. */
3743 if (guess_suffix == QWORD_MNEM_SUFFIX)
3744 i.types[op].bitfield.imm32 = 0;
3745 break;
3746
3747 case O_absent:
3748 case O_register:
3749 abort ();
3750
3751 /* Symbols and expressions. */
3752 default:
3753 /* Convert symbolic operand to proper sizes for matching, but don't
3754 prevent matching a set of insns that only supports sizes other
3755 than those matching the insn suffix. */
3756 {
3757 i386_operand_type mask, allowed;
3758 const insn_template *t;
3759
3760 operand_type_set (&mask, 0);
3761 operand_type_set (&allowed, 0);
3762
3763 for (t = current_templates->start;
3764 t < current_templates->end;
3765 ++t)
3766 allowed = operand_type_or (allowed,
3767 t->operand_types[op]);
3768 switch (guess_suffix)
3769 {
3770 case QWORD_MNEM_SUFFIX:
3771 mask.bitfield.imm64 = 1;
3772 mask.bitfield.imm32s = 1;
3773 break;
3774 case LONG_MNEM_SUFFIX:
3775 mask.bitfield.imm32 = 1;
3776 break;
3777 case WORD_MNEM_SUFFIX:
3778 mask.bitfield.imm16 = 1;
3779 break;
3780 case BYTE_MNEM_SUFFIX:
3781 mask.bitfield.imm8 = 1;
3782 break;
3783 default:
3784 break;
3785 }
3786 allowed = operand_type_and (mask, allowed);
3787 if (!operand_type_all_zero (&allowed))
3788 i.types[op] = operand_type_and (i.types[op], mask);
3789 }
3790 break;
3791 }
3792 }
3793 }
3794
3795 /* Try to use the smallest displacement type too. */
3796 static void
3797 optimize_disp (void)
3798 {
3799 int op;
3800
3801 for (op = i.operands; --op >= 0;)
3802 if (operand_type_check (i.types[op], disp))
3803 {
3804 if (i.op[op].disps->X_op == O_constant)
3805 {
3806 offsetT op_disp = i.op[op].disps->X_add_number;
3807
3808 if (i.types[op].bitfield.disp16
3809 && (op_disp & ~(offsetT) 0xffff) == 0)
3810 {
3811 /* If this operand is at most 16 bits, convert
3812 to a signed 16 bit number and don't use 64bit
3813 displacement. */
3814 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3815 i.types[op].bitfield.disp64 = 0;
3816 }
3817 if (i.types[op].bitfield.disp32
3818 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3819 {
3820 /* If this operand is at most 32 bits, convert
3821 to a signed 32 bit number and don't use 64bit
3822 displacement. */
3823 op_disp &= (((offsetT) 2 << 31) - 1);
3824 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3825 i.types[op].bitfield.disp64 = 0;
3826 }
3827 if (!op_disp && i.types[op].bitfield.baseindex)
3828 {
3829 i.types[op].bitfield.disp8 = 0;
3830 i.types[op].bitfield.disp16 = 0;
3831 i.types[op].bitfield.disp32 = 0;
3832 i.types[op].bitfield.disp32s = 0;
3833 i.types[op].bitfield.disp64 = 0;
3834 i.op[op].disps = 0;
3835 i.disp_operands--;
3836 }
3837 else if (flag_code == CODE_64BIT)
3838 {
3839 if (fits_in_signed_long (op_disp))
3840 {
3841 i.types[op].bitfield.disp64 = 0;
3842 i.types[op].bitfield.disp32s = 1;
3843 }
3844 if (i.prefix[ADDR_PREFIX]
3845 && fits_in_unsigned_long (op_disp))
3846 i.types[op].bitfield.disp32 = 1;
3847 }
3848 if ((i.types[op].bitfield.disp32
3849 || i.types[op].bitfield.disp32s
3850 || i.types[op].bitfield.disp16)
3851 && fits_in_signed_byte (op_disp))
3852 i.types[op].bitfield.disp8 = 1;
3853 }
3854 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3855 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3856 {
3857 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3858 i.op[op].disps, 0, i.reloc[op]);
3859 i.types[op].bitfield.disp8 = 0;
3860 i.types[op].bitfield.disp16 = 0;
3861 i.types[op].bitfield.disp32 = 0;
3862 i.types[op].bitfield.disp32s = 0;
3863 i.types[op].bitfield.disp64 = 0;
3864 }
3865 else
3866 /* We only support 64bit displacement on constants. */
3867 i.types[op].bitfield.disp64 = 0;
3868 }
3869 }
3870
3871 /* Check if operands are valid for the instruction. */
3872
3873 static int
3874 check_VecOperands (const insn_template *t)
3875 {
3876 /* Without VSIB byte, we can't have a vector register for index. */
3877 if (!t->opcode_modifier.vecsib
3878 && i.index_reg
3879 && (i.index_reg->reg_type.bitfield.regxmm
3880 || i.index_reg->reg_type.bitfield.regymm))
3881 {
3882 i.error = unsupported_vector_index_register;
3883 return 1;
3884 }
3885
3886 /* For VSIB byte, we need a vector register for index and no PC
3887 relative addressing is allowed. */
3888 if (t->opcode_modifier.vecsib
3889 && (!i.index_reg
3890 || !((t->opcode_modifier.vecsib == VecSIB128
3891 && i.index_reg->reg_type.bitfield.regxmm)
3892 || (t->opcode_modifier.vecsib == VecSIB256
3893 && i.index_reg->reg_type.bitfield.regymm))
3894 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3895 {
3896 i.error = invalid_vsib_address;
3897 return 1;
3898 }
3899
3900 return 0;
3901 }
3902
3903 /* Check if operands are valid for the instruction. Update VEX
3904 operand types. */
3905
3906 static int
3907 VEX_check_operands (const insn_template *t)
3908 {
3909 if (!t->opcode_modifier.vex)
3910 return 0;
3911
3912 /* Only check VEX_Imm4, which must be the first operand. */
3913 if (t->operand_types[0].bitfield.vec_imm4)
3914 {
3915 if (i.op[0].imms->X_op != O_constant
3916 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3917 {
3918 i.error = bad_imm4;
3919 return 1;
3920 }
3921
3922 /* Turn off Imm8 so that update_imm won't complain. */
3923 i.types[0] = vec_imm4;
3924 }
3925
3926 return 0;
3927 }
3928
3929 static const insn_template *
3930 match_template (void)
3931 {
3932 /* Points to template once we've found it. */
3933 const insn_template *t;
3934 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3935 i386_operand_type overlap4;
3936 unsigned int found_reverse_match;
3937 i386_opcode_modifier suffix_check;
3938 i386_operand_type operand_types [MAX_OPERANDS];
3939 int addr_prefix_disp;
3940 unsigned int j;
3941 unsigned int found_cpu_match;
3942 unsigned int check_register;
3943
3944 #if MAX_OPERANDS != 5
3945 # error "MAX_OPERANDS must be 5."
3946 #endif
3947
3948 found_reverse_match = 0;
3949 addr_prefix_disp = -1;
3950
3951 memset (&suffix_check, 0, sizeof (suffix_check));
3952 if (i.suffix == BYTE_MNEM_SUFFIX)
3953 suffix_check.no_bsuf = 1;
3954 else if (i.suffix == WORD_MNEM_SUFFIX)
3955 suffix_check.no_wsuf = 1;
3956 else if (i.suffix == SHORT_MNEM_SUFFIX)
3957 suffix_check.no_ssuf = 1;
3958 else if (i.suffix == LONG_MNEM_SUFFIX)
3959 suffix_check.no_lsuf = 1;
3960 else if (i.suffix == QWORD_MNEM_SUFFIX)
3961 suffix_check.no_qsuf = 1;
3962 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3963 suffix_check.no_ldsuf = 1;
3964
3965 /* Must have right number of operands. */
3966 i.error = number_of_operands_mismatch;
3967
3968 for (t = current_templates->start; t < current_templates->end; t++)
3969 {
3970 addr_prefix_disp = -1;
3971
3972 if (i.operands != t->operands)
3973 continue;
3974
3975 /* Check processor support. */
3976 i.error = unsupported;
3977 found_cpu_match = (cpu_flags_match (t)
3978 == CPU_FLAGS_PERFECT_MATCH);
3979 if (!found_cpu_match)
3980 continue;
3981
3982 /* Check old gcc support. */
3983 i.error = old_gcc_only;
3984 if (!old_gcc && t->opcode_modifier.oldgcc)
3985 continue;
3986
3987 /* Check AT&T mnemonic. */
3988 i.error = unsupported_with_intel_mnemonic;
3989 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3990 continue;
3991
3992 /* Check AT&T/Intel syntax. */
3993 i.error = unsupported_syntax;
3994 if ((intel_syntax && t->opcode_modifier.attsyntax)
3995 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3996 continue;
3997
3998 /* Check the suffix, except for some instructions in intel mode. */
3999 i.error = invalid_instruction_suffix;
4000 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4001 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4002 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4003 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4004 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4005 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4006 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4007 continue;
4008
4009 if (!operand_size_match (t))
4010 continue;
4011
4012 for (j = 0; j < MAX_OPERANDS; j++)
4013 operand_types[j] = t->operand_types[j];
4014
4015 /* In general, don't allow 64-bit operands in 32-bit mode. */
4016 if (i.suffix == QWORD_MNEM_SUFFIX
4017 && flag_code != CODE_64BIT
4018 && (intel_syntax
4019 ? (!t->opcode_modifier.ignoresize
4020 && !intel_float_operand (t->name))
4021 : intel_float_operand (t->name) != 2)
4022 && ((!operand_types[0].bitfield.regmmx
4023 && !operand_types[0].bitfield.regxmm
4024 && !operand_types[0].bitfield.regymm)
4025 || (!operand_types[t->operands > 1].bitfield.regmmx
4026 && !!operand_types[t->operands > 1].bitfield.regxmm
4027 && !!operand_types[t->operands > 1].bitfield.regymm))
4028 && (t->base_opcode != 0x0fc7
4029 || t->extension_opcode != 1 /* cmpxchg8b */))
4030 continue;
4031
4032 /* In general, don't allow 32-bit operands on pre-386. */
4033 else if (i.suffix == LONG_MNEM_SUFFIX
4034 && !cpu_arch_flags.bitfield.cpui386
4035 && (intel_syntax
4036 ? (!t->opcode_modifier.ignoresize
4037 && !intel_float_operand (t->name))
4038 : intel_float_operand (t->name) != 2)
4039 && ((!operand_types[0].bitfield.regmmx
4040 && !operand_types[0].bitfield.regxmm)
4041 || (!operand_types[t->operands > 1].bitfield.regmmx
4042 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4043 continue;
4044
4045 /* Do not verify operands when there are none. */
4046 else
4047 {
4048 if (!t->operands)
4049 /* We've found a match; break out of loop. */
4050 break;
4051 }
4052
4053 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4054 into Disp32/Disp16/Disp32 operand. */
4055 if (i.prefix[ADDR_PREFIX] != 0)
4056 {
4057 /* There should be only one Disp operand. */
4058 switch (flag_code)
4059 {
4060 case CODE_16BIT:
4061 for (j = 0; j < MAX_OPERANDS; j++)
4062 {
4063 if (operand_types[j].bitfield.disp16)
4064 {
4065 addr_prefix_disp = j;
4066 operand_types[j].bitfield.disp32 = 1;
4067 operand_types[j].bitfield.disp16 = 0;
4068 break;
4069 }
4070 }
4071 break;
4072 case CODE_32BIT:
4073 for (j = 0; j < MAX_OPERANDS; j++)
4074 {
4075 if (operand_types[j].bitfield.disp32)
4076 {
4077 addr_prefix_disp = j;
4078 operand_types[j].bitfield.disp32 = 0;
4079 operand_types[j].bitfield.disp16 = 1;
4080 break;
4081 }
4082 }
4083 break;
4084 case CODE_64BIT:
4085 for (j = 0; j < MAX_OPERANDS; j++)
4086 {
4087 if (operand_types[j].bitfield.disp64)
4088 {
4089 addr_prefix_disp = j;
4090 operand_types[j].bitfield.disp64 = 0;
4091 operand_types[j].bitfield.disp32 = 1;
4092 break;
4093 }
4094 }
4095 break;
4096 }
4097 }
4098
4099 /* We check register size if needed. */
4100 check_register = t->opcode_modifier.checkregsize;
4101 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4102 switch (t->operands)
4103 {
4104 case 1:
4105 if (!operand_type_match (overlap0, i.types[0]))
4106 continue;
4107 break;
4108 case 2:
4109 /* xchg %eax, %eax is a special case. It is an aliase for nop
4110 only in 32bit mode and we can use opcode 0x90. In 64bit
4111 mode, we can't use 0x90 for xchg %eax, %eax since it should
4112 zero-extend %eax to %rax. */
4113 if (flag_code == CODE_64BIT
4114 && t->base_opcode == 0x90
4115 && operand_type_equal (&i.types [0], &acc32)
4116 && operand_type_equal (&i.types [1], &acc32))
4117 continue;
4118 if (i.swap_operand)
4119 {
4120 /* If we swap operand in encoding, we either match
4121 the next one or reverse direction of operands. */
4122 if (t->opcode_modifier.s)
4123 continue;
4124 else if (t->opcode_modifier.d)
4125 goto check_reverse;
4126 }
4127
4128 case 3:
4129 /* If we swap operand in encoding, we match the next one. */
4130 if (i.swap_operand && t->opcode_modifier.s)
4131 continue;
4132 case 4:
4133 case 5:
4134 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4135 if (!operand_type_match (overlap0, i.types[0])
4136 || !operand_type_match (overlap1, i.types[1])
4137 || (check_register
4138 && !operand_type_register_match (overlap0, i.types[0],
4139 operand_types[0],
4140 overlap1, i.types[1],
4141 operand_types[1])))
4142 {
4143 /* Check if other direction is valid ... */
4144 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4145 continue;
4146
4147 check_reverse:
4148 /* Try reversing direction of operands. */
4149 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4150 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4151 if (!operand_type_match (overlap0, i.types[0])
4152 || !operand_type_match (overlap1, i.types[1])
4153 || (check_register
4154 && !operand_type_register_match (overlap0,
4155 i.types[0],
4156 operand_types[1],
4157 overlap1,
4158 i.types[1],
4159 operand_types[0])))
4160 {
4161 /* Does not match either direction. */
4162 continue;
4163 }
4164 /* found_reverse_match holds which of D or FloatDR
4165 we've found. */
4166 if (t->opcode_modifier.d)
4167 found_reverse_match = Opcode_D;
4168 else if (t->opcode_modifier.floatd)
4169 found_reverse_match = Opcode_FloatD;
4170 else
4171 found_reverse_match = 0;
4172 if (t->opcode_modifier.floatr)
4173 found_reverse_match |= Opcode_FloatR;
4174 }
4175 else
4176 {
4177 /* Found a forward 2 operand match here. */
4178 switch (t->operands)
4179 {
4180 case 5:
4181 overlap4 = operand_type_and (i.types[4],
4182 operand_types[4]);
4183 case 4:
4184 overlap3 = operand_type_and (i.types[3],
4185 operand_types[3]);
4186 case 3:
4187 overlap2 = operand_type_and (i.types[2],
4188 operand_types[2]);
4189 break;
4190 }
4191
4192 switch (t->operands)
4193 {
4194 case 5:
4195 if (!operand_type_match (overlap4, i.types[4])
4196 || !operand_type_register_match (overlap3,
4197 i.types[3],
4198 operand_types[3],
4199 overlap4,
4200 i.types[4],
4201 operand_types[4]))
4202 continue;
4203 case 4:
4204 if (!operand_type_match (overlap3, i.types[3])
4205 || (check_register
4206 && !operand_type_register_match (overlap2,
4207 i.types[2],
4208 operand_types[2],
4209 overlap3,
4210 i.types[3],
4211 operand_types[3])))
4212 continue;
4213 case 3:
4214 /* Here we make use of the fact that there are no
4215 reverse match 3 operand instructions, and all 3
4216 operand instructions only need to be checked for
4217 register consistency between operands 2 and 3. */
4218 if (!operand_type_match (overlap2, i.types[2])
4219 || (check_register
4220 && !operand_type_register_match (overlap1,
4221 i.types[1],
4222 operand_types[1],
4223 overlap2,
4224 i.types[2],
4225 operand_types[2])))
4226 continue;
4227 break;
4228 }
4229 }
4230 /* Found either forward/reverse 2, 3 or 4 operand match here:
4231 slip through to break. */
4232 }
4233 if (!found_cpu_match)
4234 {
4235 found_reverse_match = 0;
4236 continue;
4237 }
4238
4239 /* Check if vector operands are valid. */
4240 if (check_VecOperands (t))
4241 continue;
4242
4243 /* Check if VEX operands are valid. */
4244 if (VEX_check_operands (t))
4245 continue;
4246
4247 /* We've found a match; break out of loop. */
4248 break;
4249 }
4250
4251 if (t == current_templates->end)
4252 {
4253 /* We found no match. */
4254 const char *err_msg;
4255 switch (i.error)
4256 {
4257 default:
4258 abort ();
4259 case operand_size_mismatch:
4260 err_msg = _("operand size mismatch");
4261 break;
4262 case operand_type_mismatch:
4263 err_msg = _("operand type mismatch");
4264 break;
4265 case register_type_mismatch:
4266 err_msg = _("register type mismatch");
4267 break;
4268 case number_of_operands_mismatch:
4269 err_msg = _("number of operands mismatch");
4270 break;
4271 case invalid_instruction_suffix:
4272 err_msg = _("invalid instruction suffix");
4273 break;
4274 case bad_imm4:
4275 err_msg = _("Imm4 isn't the first operand");
4276 break;
4277 case old_gcc_only:
4278 err_msg = _("only supported with old gcc");
4279 break;
4280 case unsupported_with_intel_mnemonic:
4281 err_msg = _("unsupported with Intel mnemonic");
4282 break;
4283 case unsupported_syntax:
4284 err_msg = _("unsupported syntax");
4285 break;
4286 case unsupported:
4287 err_msg = _("unsupported");
4288 break;
4289 case invalid_vsib_address:
4290 err_msg = _("invalid VSIB address");
4291 break;
4292 case unsupported_vector_index_register:
4293 err_msg = _("unsupported vector index register");
4294 break;
4295 }
4296 as_bad (_("%s for `%s'"), err_msg,
4297 current_templates->start->name);
4298 return NULL;
4299 }
4300
4301 if (!quiet_warnings)
4302 {
4303 if (!intel_syntax
4304 && (i.types[0].bitfield.jumpabsolute
4305 != operand_types[0].bitfield.jumpabsolute))
4306 {
4307 as_warn (_("indirect %s without `*'"), t->name);
4308 }
4309
4310 if (t->opcode_modifier.isprefix
4311 && t->opcode_modifier.ignoresize)
4312 {
4313 /* Warn them that a data or address size prefix doesn't
4314 affect assembly of the next line of code. */
4315 as_warn (_("stand-alone `%s' prefix"), t->name);
4316 }
4317 }
4318
4319 /* Copy the template we found. */
4320 i.tm = *t;
4321
4322 if (addr_prefix_disp != -1)
4323 i.tm.operand_types[addr_prefix_disp]
4324 = operand_types[addr_prefix_disp];
4325
4326 if (found_reverse_match)
4327 {
4328 /* If we found a reverse match we must alter the opcode
4329 direction bit. found_reverse_match holds bits to change
4330 (different for int & float insns). */
4331
4332 i.tm.base_opcode ^= found_reverse_match;
4333
4334 i.tm.operand_types[0] = operand_types[1];
4335 i.tm.operand_types[1] = operand_types[0];
4336 }
4337
4338 return t;
4339 }
4340
4341 static int
4342 check_string (void)
4343 {
4344 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4345 if (i.tm.operand_types[mem_op].bitfield.esseg)
4346 {
4347 if (i.seg[0] != NULL && i.seg[0] != &es)
4348 {
4349 as_bad (_("`%s' operand %d must use `%ses' segment"),
4350 i.tm.name,
4351 mem_op + 1,
4352 register_prefix);
4353 return 0;
4354 }
4355 /* There's only ever one segment override allowed per instruction.
4356 This instruction possibly has a legal segment override on the
4357 second operand, so copy the segment to where non-string
4358 instructions store it, allowing common code. */
4359 i.seg[0] = i.seg[1];
4360 }
4361 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4362 {
4363 if (i.seg[1] != NULL && i.seg[1] != &es)
4364 {
4365 as_bad (_("`%s' operand %d must use `%ses' segment"),
4366 i.tm.name,
4367 mem_op + 2,
4368 register_prefix);
4369 return 0;
4370 }
4371 }
4372 return 1;
4373 }
4374
4375 static int
4376 process_suffix (void)
4377 {
4378 /* If matched instruction specifies an explicit instruction mnemonic
4379 suffix, use it. */
4380 if (i.tm.opcode_modifier.size16)
4381 i.suffix = WORD_MNEM_SUFFIX;
4382 else if (i.tm.opcode_modifier.size32)
4383 i.suffix = LONG_MNEM_SUFFIX;
4384 else if (i.tm.opcode_modifier.size64)
4385 i.suffix = QWORD_MNEM_SUFFIX;
4386 else if (i.reg_operands)
4387 {
4388 /* If there's no instruction mnemonic suffix we try to invent one
4389 based on register operands. */
4390 if (!i.suffix)
4391 {
4392 /* We take i.suffix from the last register operand specified,
4393 Destination register type is more significant than source
4394 register type. crc32 in SSE4.2 prefers source register
4395 type. */
4396 if (i.tm.base_opcode == 0xf20f38f1)
4397 {
4398 if (i.types[0].bitfield.reg16)
4399 i.suffix = WORD_MNEM_SUFFIX;
4400 else if (i.types[0].bitfield.reg32)
4401 i.suffix = LONG_MNEM_SUFFIX;
4402 else if (i.types[0].bitfield.reg64)
4403 i.suffix = QWORD_MNEM_SUFFIX;
4404 }
4405 else if (i.tm.base_opcode == 0xf20f38f0)
4406 {
4407 if (i.types[0].bitfield.reg8)
4408 i.suffix = BYTE_MNEM_SUFFIX;
4409 }
4410
4411 if (!i.suffix)
4412 {
4413 int op;
4414
4415 if (i.tm.base_opcode == 0xf20f38f1
4416 || i.tm.base_opcode == 0xf20f38f0)
4417 {
4418 /* We have to know the operand size for crc32. */
4419 as_bad (_("ambiguous memory operand size for `%s`"),
4420 i.tm.name);
4421 return 0;
4422 }
4423
4424 for (op = i.operands; --op >= 0;)
4425 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4426 {
4427 if (i.types[op].bitfield.reg8)
4428 {
4429 i.suffix = BYTE_MNEM_SUFFIX;
4430 break;
4431 }
4432 else if (i.types[op].bitfield.reg16)
4433 {
4434 i.suffix = WORD_MNEM_SUFFIX;
4435 break;
4436 }
4437 else if (i.types[op].bitfield.reg32)
4438 {
4439 i.suffix = LONG_MNEM_SUFFIX;
4440 break;
4441 }
4442 else if (i.types[op].bitfield.reg64)
4443 {
4444 i.suffix = QWORD_MNEM_SUFFIX;
4445 break;
4446 }
4447 }
4448 }
4449 }
4450 else if (i.suffix == BYTE_MNEM_SUFFIX)
4451 {
4452 if (intel_syntax
4453 && i.tm.opcode_modifier.ignoresize
4454 && i.tm.opcode_modifier.no_bsuf)
4455 i.suffix = 0;
4456 else if (!check_byte_reg ())
4457 return 0;
4458 }
4459 else if (i.suffix == LONG_MNEM_SUFFIX)
4460 {
4461 if (intel_syntax
4462 && i.tm.opcode_modifier.ignoresize
4463 && i.tm.opcode_modifier.no_lsuf)
4464 i.suffix = 0;
4465 else if (!check_long_reg ())
4466 return 0;
4467 }
4468 else if (i.suffix == QWORD_MNEM_SUFFIX)
4469 {
4470 if (intel_syntax
4471 && i.tm.opcode_modifier.ignoresize
4472 && i.tm.opcode_modifier.no_qsuf)
4473 i.suffix = 0;
4474 else if (!check_qword_reg ())
4475 return 0;
4476 }
4477 else if (i.suffix == WORD_MNEM_SUFFIX)
4478 {
4479 if (intel_syntax
4480 && i.tm.opcode_modifier.ignoresize
4481 && i.tm.opcode_modifier.no_wsuf)
4482 i.suffix = 0;
4483 else if (!check_word_reg ())
4484 return 0;
4485 }
4486 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4487 || i.suffix == YMMWORD_MNEM_SUFFIX)
4488 {
4489 /* Skip if the instruction has x/y suffix. match_template
4490 should check if it is a valid suffix. */
4491 }
4492 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4493 /* Do nothing if the instruction is going to ignore the prefix. */
4494 ;
4495 else
4496 abort ();
4497 }
4498 else if (i.tm.opcode_modifier.defaultsize
4499 && !i.suffix
4500 /* exclude fldenv/frstor/fsave/fstenv */
4501 && i.tm.opcode_modifier.no_ssuf)
4502 {
4503 i.suffix = stackop_size;
4504 }
4505 else if (intel_syntax
4506 && !i.suffix
4507 && (i.tm.operand_types[0].bitfield.jumpabsolute
4508 || i.tm.opcode_modifier.jumpbyte
4509 || i.tm.opcode_modifier.jumpintersegment
4510 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4511 && i.tm.extension_opcode <= 3)))
4512 {
4513 switch (flag_code)
4514 {
4515 case CODE_64BIT:
4516 if (!i.tm.opcode_modifier.no_qsuf)
4517 {
4518 i.suffix = QWORD_MNEM_SUFFIX;
4519 break;
4520 }
4521 case CODE_32BIT:
4522 if (!i.tm.opcode_modifier.no_lsuf)
4523 i.suffix = LONG_MNEM_SUFFIX;
4524 break;
4525 case CODE_16BIT:
4526 if (!i.tm.opcode_modifier.no_wsuf)
4527 i.suffix = WORD_MNEM_SUFFIX;
4528 break;
4529 }
4530 }
4531
4532 if (!i.suffix)
4533 {
4534 if (!intel_syntax)
4535 {
4536 if (i.tm.opcode_modifier.w)
4537 {
4538 as_bad (_("no instruction mnemonic suffix given and "
4539 "no register operands; can't size instruction"));
4540 return 0;
4541 }
4542 }
4543 else
4544 {
4545 unsigned int suffixes;
4546
4547 suffixes = !i.tm.opcode_modifier.no_bsuf;
4548 if (!i.tm.opcode_modifier.no_wsuf)
4549 suffixes |= 1 << 1;
4550 if (!i.tm.opcode_modifier.no_lsuf)
4551 suffixes |= 1 << 2;
4552 if (!i.tm.opcode_modifier.no_ldsuf)
4553 suffixes |= 1 << 3;
4554 if (!i.tm.opcode_modifier.no_ssuf)
4555 suffixes |= 1 << 4;
4556 if (!i.tm.opcode_modifier.no_qsuf)
4557 suffixes |= 1 << 5;
4558
4559 /* There are more than suffix matches. */
4560 if (i.tm.opcode_modifier.w
4561 || ((suffixes & (suffixes - 1))
4562 && !i.tm.opcode_modifier.defaultsize
4563 && !i.tm.opcode_modifier.ignoresize))
4564 {
4565 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4566 return 0;
4567 }
4568 }
4569 }
4570
4571 /* Change the opcode based on the operand size given by i.suffix;
4572 We don't need to change things for byte insns. */
4573
4574 if (i.suffix
4575 && i.suffix != BYTE_MNEM_SUFFIX
4576 && i.suffix != XMMWORD_MNEM_SUFFIX
4577 && i.suffix != YMMWORD_MNEM_SUFFIX)
4578 {
4579 /* It's not a byte, select word/dword operation. */
4580 if (i.tm.opcode_modifier.w)
4581 {
4582 if (i.tm.opcode_modifier.shortform)
4583 i.tm.base_opcode |= 8;
4584 else
4585 i.tm.base_opcode |= 1;
4586 }
4587
4588 /* Now select between word & dword operations via the operand
4589 size prefix, except for instructions that will ignore this
4590 prefix anyway. */
4591 if (i.tm.opcode_modifier.addrprefixop0)
4592 {
4593 /* The address size override prefix changes the size of the
4594 first operand. */
4595 if ((flag_code == CODE_32BIT
4596 && i.op->regs[0].reg_type.bitfield.reg16)
4597 || (flag_code != CODE_32BIT
4598 && i.op->regs[0].reg_type.bitfield.reg32))
4599 if (!add_prefix (ADDR_PREFIX_OPCODE))
4600 return 0;
4601 }
4602 else if (i.suffix != QWORD_MNEM_SUFFIX
4603 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4604 && !i.tm.opcode_modifier.ignoresize
4605 && !i.tm.opcode_modifier.floatmf
4606 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4607 || (flag_code == CODE_64BIT
4608 && i.tm.opcode_modifier.jumpbyte)))
4609 {
4610 unsigned int prefix = DATA_PREFIX_OPCODE;
4611
4612 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4613 prefix = ADDR_PREFIX_OPCODE;
4614
4615 if (!add_prefix (prefix))
4616 return 0;
4617 }
4618
4619 /* Set mode64 for an operand. */
4620 if (i.suffix == QWORD_MNEM_SUFFIX
4621 && flag_code == CODE_64BIT
4622 && !i.tm.opcode_modifier.norex64)
4623 {
4624 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4625 need rex64. cmpxchg8b is also a special case. */
4626 if (! (i.operands == 2
4627 && i.tm.base_opcode == 0x90
4628 && i.tm.extension_opcode == None
4629 && operand_type_equal (&i.types [0], &acc64)
4630 && operand_type_equal (&i.types [1], &acc64))
4631 && ! (i.operands == 1
4632 && i.tm.base_opcode == 0xfc7
4633 && i.tm.extension_opcode == 1
4634 && !operand_type_check (i.types [0], reg)
4635 && operand_type_check (i.types [0], anymem)))
4636 i.rex |= REX_W;
4637 }
4638
4639 /* Size floating point instruction. */
4640 if (i.suffix == LONG_MNEM_SUFFIX)
4641 if (i.tm.opcode_modifier.floatmf)
4642 i.tm.base_opcode ^= 4;
4643 }
4644
4645 return 1;
4646 }
4647
4648 static int
4649 check_byte_reg (void)
4650 {
4651 int op;
4652
4653 for (op = i.operands; --op >= 0;)
4654 {
4655 /* If this is an eight bit register, it's OK. If it's the 16 or
4656 32 bit version of an eight bit register, we will just use the
4657 low portion, and that's OK too. */
4658 if (i.types[op].bitfield.reg8)
4659 continue;
4660
4661 /* crc32 doesn't generate this warning. */
4662 if (i.tm.base_opcode == 0xf20f38f0)
4663 continue;
4664
4665 if ((i.types[op].bitfield.reg16
4666 || i.types[op].bitfield.reg32
4667 || i.types[op].bitfield.reg64)
4668 && i.op[op].regs->reg_num < 4)
4669 {
4670 /* Prohibit these changes in the 64bit mode, since the
4671 lowering is more complicated. */
4672 if (flag_code == CODE_64BIT
4673 && !i.tm.operand_types[op].bitfield.inoutportreg)
4674 {
4675 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4676 register_prefix, i.op[op].regs->reg_name,
4677 i.suffix);
4678 return 0;
4679 }
4680 #if REGISTER_WARNINGS
4681 if (!quiet_warnings
4682 && !i.tm.operand_types[op].bitfield.inoutportreg)
4683 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4684 register_prefix,
4685 (i.op[op].regs + (i.types[op].bitfield.reg16
4686 ? REGNAM_AL - REGNAM_AX
4687 : REGNAM_AL - REGNAM_EAX))->reg_name,
4688 register_prefix,
4689 i.op[op].regs->reg_name,
4690 i.suffix);
4691 #endif
4692 continue;
4693 }
4694 /* Any other register is bad. */
4695 if (i.types[op].bitfield.reg16
4696 || i.types[op].bitfield.reg32
4697 || i.types[op].bitfield.reg64
4698 || i.types[op].bitfield.regmmx
4699 || i.types[op].bitfield.regxmm
4700 || i.types[op].bitfield.regymm
4701 || i.types[op].bitfield.sreg2
4702 || i.types[op].bitfield.sreg3
4703 || i.types[op].bitfield.control
4704 || i.types[op].bitfield.debug
4705 || i.types[op].bitfield.test
4706 || i.types[op].bitfield.floatreg
4707 || i.types[op].bitfield.floatacc)
4708 {
4709 as_bad (_("`%s%s' not allowed with `%s%c'"),
4710 register_prefix,
4711 i.op[op].regs->reg_name,
4712 i.tm.name,
4713 i.suffix);
4714 return 0;
4715 }
4716 }
4717 return 1;
4718 }
4719
4720 static int
4721 check_long_reg (void)
4722 {
4723 int op;
4724
4725 for (op = i.operands; --op >= 0;)
4726 /* Reject eight bit registers, except where the template requires
4727 them. (eg. movzb) */
4728 if (i.types[op].bitfield.reg8
4729 && (i.tm.operand_types[op].bitfield.reg16
4730 || i.tm.operand_types[op].bitfield.reg32
4731 || i.tm.operand_types[op].bitfield.acc))
4732 {
4733 as_bad (_("`%s%s' not allowed with `%s%c'"),
4734 register_prefix,
4735 i.op[op].regs->reg_name,
4736 i.tm.name,
4737 i.suffix);
4738 return 0;
4739 }
4740 /* Warn if the e prefix on a general reg is missing. */
4741 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4742 && i.types[op].bitfield.reg16
4743 && (i.tm.operand_types[op].bitfield.reg32
4744 || i.tm.operand_types[op].bitfield.acc))
4745 {
4746 /* Prohibit these changes in the 64bit mode, since the
4747 lowering is more complicated. */
4748 if (flag_code == CODE_64BIT)
4749 {
4750 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4751 register_prefix, i.op[op].regs->reg_name,
4752 i.suffix);
4753 return 0;
4754 }
4755 #if REGISTER_WARNINGS
4756 else
4757 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4758 register_prefix,
4759 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4760 register_prefix,
4761 i.op[op].regs->reg_name,
4762 i.suffix);
4763 #endif
4764 }
4765 /* Warn if the r prefix on a general reg is missing. */
4766 else if (i.types[op].bitfield.reg64
4767 && (i.tm.operand_types[op].bitfield.reg32
4768 || i.tm.operand_types[op].bitfield.acc))
4769 {
4770 if (intel_syntax
4771 && i.tm.opcode_modifier.toqword
4772 && !i.types[0].bitfield.regxmm)
4773 {
4774 /* Convert to QWORD. We want REX byte. */
4775 i.suffix = QWORD_MNEM_SUFFIX;
4776 }
4777 else
4778 {
4779 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4780 register_prefix, i.op[op].regs->reg_name,
4781 i.suffix);
4782 return 0;
4783 }
4784 }
4785 return 1;
4786 }
4787
4788 static int
4789 check_qword_reg (void)
4790 {
4791 int op;
4792
4793 for (op = i.operands; --op >= 0; )
4794 /* Reject eight bit registers, except where the template requires
4795 them. (eg. movzb) */
4796 if (i.types[op].bitfield.reg8
4797 && (i.tm.operand_types[op].bitfield.reg16
4798 || i.tm.operand_types[op].bitfield.reg32
4799 || i.tm.operand_types[op].bitfield.acc))
4800 {
4801 as_bad (_("`%s%s' not allowed with `%s%c'"),
4802 register_prefix,
4803 i.op[op].regs->reg_name,
4804 i.tm.name,
4805 i.suffix);
4806 return 0;
4807 }
4808 /* Warn if the e prefix on a general reg is missing. */
4809 else if ((i.types[op].bitfield.reg16
4810 || i.types[op].bitfield.reg32)
4811 && (i.tm.operand_types[op].bitfield.reg32
4812 || i.tm.operand_types[op].bitfield.acc))
4813 {
4814 /* Prohibit these changes in the 64bit mode, since the
4815 lowering is more complicated. */
4816 if (intel_syntax
4817 && i.tm.opcode_modifier.todword
4818 && !i.types[0].bitfield.regxmm)
4819 {
4820 /* Convert to DWORD. We don't want REX byte. */
4821 i.suffix = LONG_MNEM_SUFFIX;
4822 }
4823 else
4824 {
4825 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4826 register_prefix, i.op[op].regs->reg_name,
4827 i.suffix);
4828 return 0;
4829 }
4830 }
4831 return 1;
4832 }
4833
4834 static int
4835 check_word_reg (void)
4836 {
4837 int op;
4838 for (op = i.operands; --op >= 0;)
4839 /* Reject eight bit registers, except where the template requires
4840 them. (eg. movzb) */
4841 if (i.types[op].bitfield.reg8
4842 && (i.tm.operand_types[op].bitfield.reg16
4843 || i.tm.operand_types[op].bitfield.reg32
4844 || i.tm.operand_types[op].bitfield.acc))
4845 {
4846 as_bad (_("`%s%s' not allowed with `%s%c'"),
4847 register_prefix,
4848 i.op[op].regs->reg_name,
4849 i.tm.name,
4850 i.suffix);
4851 return 0;
4852 }
4853 /* Warn if the e prefix on a general reg is present. */
4854 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4855 && i.types[op].bitfield.reg32
4856 && (i.tm.operand_types[op].bitfield.reg16
4857 || i.tm.operand_types[op].bitfield.acc))
4858 {
4859 /* Prohibit these changes in the 64bit mode, since the
4860 lowering is more complicated. */
4861 if (flag_code == CODE_64BIT)
4862 {
4863 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4864 register_prefix, i.op[op].regs->reg_name,
4865 i.suffix);
4866 return 0;
4867 }
4868 else
4869 #if REGISTER_WARNINGS
4870 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4871 register_prefix,
4872 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4873 register_prefix,
4874 i.op[op].regs->reg_name,
4875 i.suffix);
4876 #endif
4877 }
4878 return 1;
4879 }
4880
4881 static int
4882 update_imm (unsigned int j)
4883 {
4884 i386_operand_type overlap = i.types[j];
4885 if ((overlap.bitfield.imm8
4886 || overlap.bitfield.imm8s
4887 || overlap.bitfield.imm16
4888 || overlap.bitfield.imm32
4889 || overlap.bitfield.imm32s
4890 || overlap.bitfield.imm64)
4891 && !operand_type_equal (&overlap, &imm8)
4892 && !operand_type_equal (&overlap, &imm8s)
4893 && !operand_type_equal (&overlap, &imm16)
4894 && !operand_type_equal (&overlap, &imm32)
4895 && !operand_type_equal (&overlap, &imm32s)
4896 && !operand_type_equal (&overlap, &imm64))
4897 {
4898 if (i.suffix)
4899 {
4900 i386_operand_type temp;
4901
4902 operand_type_set (&temp, 0);
4903 if (i.suffix == BYTE_MNEM_SUFFIX)
4904 {
4905 temp.bitfield.imm8 = overlap.bitfield.imm8;
4906 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4907 }
4908 else if (i.suffix == WORD_MNEM_SUFFIX)
4909 temp.bitfield.imm16 = overlap.bitfield.imm16;
4910 else if (i.suffix == QWORD_MNEM_SUFFIX)
4911 {
4912 temp.bitfield.imm64 = overlap.bitfield.imm64;
4913 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4914 }
4915 else
4916 temp.bitfield.imm32 = overlap.bitfield.imm32;
4917 overlap = temp;
4918 }
4919 else if (operand_type_equal (&overlap, &imm16_32_32s)
4920 || operand_type_equal (&overlap, &imm16_32)
4921 || operand_type_equal (&overlap, &imm16_32s))
4922 {
4923 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4924 overlap = imm16;
4925 else
4926 overlap = imm32s;
4927 }
4928 if (!operand_type_equal (&overlap, &imm8)
4929 && !operand_type_equal (&overlap, &imm8s)
4930 && !operand_type_equal (&overlap, &imm16)
4931 && !operand_type_equal (&overlap, &imm32)
4932 && !operand_type_equal (&overlap, &imm32s)
4933 && !operand_type_equal (&overlap, &imm64))
4934 {
4935 as_bad (_("no instruction mnemonic suffix given; "
4936 "can't determine immediate size"));
4937 return 0;
4938 }
4939 }
4940 i.types[j] = overlap;
4941
4942 return 1;
4943 }
4944
4945 static int
4946 finalize_imm (void)
4947 {
4948 unsigned int j, n;
4949
4950 /* Update the first 2 immediate operands. */
4951 n = i.operands > 2 ? 2 : i.operands;
4952 if (n)
4953 {
4954 for (j = 0; j < n; j++)
4955 if (update_imm (j) == 0)
4956 return 0;
4957
4958 /* The 3rd operand can't be immediate operand. */
4959 gas_assert (operand_type_check (i.types[2], imm) == 0);
4960 }
4961
4962 return 1;
4963 }
4964
4965 static int
4966 bad_implicit_operand (int xmm)
4967 {
4968 const char *ireg = xmm ? "xmm0" : "ymm0";
4969
4970 if (intel_syntax)
4971 as_bad (_("the last operand of `%s' must be `%s%s'"),
4972 i.tm.name, register_prefix, ireg);
4973 else
4974 as_bad (_("the first operand of `%s' must be `%s%s'"),
4975 i.tm.name, register_prefix, ireg);
4976 return 0;
4977 }
4978
4979 static int
4980 process_operands (void)
4981 {
4982 /* Default segment register this instruction will use for memory
4983 accesses. 0 means unknown. This is only for optimizing out
4984 unnecessary segment overrides. */
4985 const seg_entry *default_seg = 0;
4986
4987 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4988 {
4989 unsigned int dupl = i.operands;
4990 unsigned int dest = dupl - 1;
4991 unsigned int j;
4992
4993 /* The destination must be an xmm register. */
4994 gas_assert (i.reg_operands
4995 && MAX_OPERANDS > dupl
4996 && operand_type_equal (&i.types[dest], &regxmm));
4997
4998 if (i.tm.opcode_modifier.firstxmm0)
4999 {
5000 /* The first operand is implicit and must be xmm0. */
5001 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5002 if (i.op[0].regs->reg_num != 0)
5003 return bad_implicit_operand (1);
5004
5005 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5006 {
5007 /* Keep xmm0 for instructions with VEX prefix and 3
5008 sources. */
5009 goto duplicate;
5010 }
5011 else
5012 {
5013 /* We remove the first xmm0 and keep the number of
5014 operands unchanged, which in fact duplicates the
5015 destination. */
5016 for (j = 1; j < i.operands; j++)
5017 {
5018 i.op[j - 1] = i.op[j];
5019 i.types[j - 1] = i.types[j];
5020 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5021 }
5022 }
5023 }
5024 else if (i.tm.opcode_modifier.implicit1stxmm0)
5025 {
5026 gas_assert ((MAX_OPERANDS - 1) > dupl
5027 && (i.tm.opcode_modifier.vexsources
5028 == VEX3SOURCES));
5029
5030 /* Add the implicit xmm0 for instructions with VEX prefix
5031 and 3 sources. */
5032 for (j = i.operands; j > 0; j--)
5033 {
5034 i.op[j] = i.op[j - 1];
5035 i.types[j] = i.types[j - 1];
5036 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5037 }
5038 i.op[0].regs
5039 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5040 i.types[0] = regxmm;
5041 i.tm.operand_types[0] = regxmm;
5042
5043 i.operands += 2;
5044 i.reg_operands += 2;
5045 i.tm.operands += 2;
5046
5047 dupl++;
5048 dest++;
5049 i.op[dupl] = i.op[dest];
5050 i.types[dupl] = i.types[dest];
5051 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5052 }
5053 else
5054 {
5055 duplicate:
5056 i.operands++;
5057 i.reg_operands++;
5058 i.tm.operands++;
5059
5060 i.op[dupl] = i.op[dest];
5061 i.types[dupl] = i.types[dest];
5062 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5063 }
5064
5065 if (i.tm.opcode_modifier.immext)
5066 process_immext ();
5067 }
5068 else if (i.tm.opcode_modifier.firstxmm0)
5069 {
5070 unsigned int j;
5071
5072 /* The first operand is implicit and must be xmm0/ymm0. */
5073 gas_assert (i.reg_operands
5074 && (operand_type_equal (&i.types[0], &regxmm)
5075 || operand_type_equal (&i.types[0], &regymm)));
5076 if (i.op[0].regs->reg_num != 0)
5077 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5078
5079 for (j = 1; j < i.operands; j++)
5080 {
5081 i.op[j - 1] = i.op[j];
5082 i.types[j - 1] = i.types[j];
5083
5084 /* We need to adjust fields in i.tm since they are used by
5085 build_modrm_byte. */
5086 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5087 }
5088
5089 i.operands--;
5090 i.reg_operands--;
5091 i.tm.operands--;
5092 }
5093 else if (i.tm.opcode_modifier.regkludge)
5094 {
5095 /* The imul $imm, %reg instruction is converted into
5096 imul $imm, %reg, %reg, and the clr %reg instruction
5097 is converted into xor %reg, %reg. */
5098
5099 unsigned int first_reg_op;
5100
5101 if (operand_type_check (i.types[0], reg))
5102 first_reg_op = 0;
5103 else
5104 first_reg_op = 1;
5105 /* Pretend we saw the extra register operand. */
5106 gas_assert (i.reg_operands == 1
5107 && i.op[first_reg_op + 1].regs == 0);
5108 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5109 i.types[first_reg_op + 1] = i.types[first_reg_op];
5110 i.operands++;
5111 i.reg_operands++;
5112 }
5113
5114 if (i.tm.opcode_modifier.shortform)
5115 {
5116 if (i.types[0].bitfield.sreg2
5117 || i.types[0].bitfield.sreg3)
5118 {
5119 if (i.tm.base_opcode == POP_SEG_SHORT
5120 && i.op[0].regs->reg_num == 1)
5121 {
5122 as_bad (_("you can't `pop %scs'"), register_prefix);
5123 return 0;
5124 }
5125 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5126 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5127 i.rex |= REX_B;
5128 }
5129 else
5130 {
5131 /* The register or float register operand is in operand
5132 0 or 1. */
5133 unsigned int op;
5134
5135 if (i.types[0].bitfield.floatreg
5136 || operand_type_check (i.types[0], reg))
5137 op = 0;
5138 else
5139 op = 1;
5140 /* Register goes in low 3 bits of opcode. */
5141 i.tm.base_opcode |= i.op[op].regs->reg_num;
5142 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5143 i.rex |= REX_B;
5144 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5145 {
5146 /* Warn about some common errors, but press on regardless.
5147 The first case can be generated by gcc (<= 2.8.1). */
5148 if (i.operands == 2)
5149 {
5150 /* Reversed arguments on faddp, fsubp, etc. */
5151 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5152 register_prefix, i.op[!intel_syntax].regs->reg_name,
5153 register_prefix, i.op[intel_syntax].regs->reg_name);
5154 }
5155 else
5156 {
5157 /* Extraneous `l' suffix on fp insn. */
5158 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5159 register_prefix, i.op[0].regs->reg_name);
5160 }
5161 }
5162 }
5163 }
5164 else if (i.tm.opcode_modifier.modrm)
5165 {
5166 /* The opcode is completed (modulo i.tm.extension_opcode which
5167 must be put into the modrm byte). Now, we make the modrm and
5168 index base bytes based on all the info we've collected. */
5169
5170 default_seg = build_modrm_byte ();
5171 }
5172 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5173 {
5174 default_seg = &ds;
5175 }
5176 else if (i.tm.opcode_modifier.isstring)
5177 {
5178 /* For the string instructions that allow a segment override
5179 on one of their operands, the default segment is ds. */
5180 default_seg = &ds;
5181 }
5182
5183 if (i.tm.base_opcode == 0x8d /* lea */
5184 && i.seg[0]
5185 && !quiet_warnings)
5186 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5187
5188 /* If a segment was explicitly specified, and the specified segment
5189 is not the default, use an opcode prefix to select it. If we
5190 never figured out what the default segment is, then default_seg
5191 will be zero at this point, and the specified segment prefix will
5192 always be used. */
5193 if ((i.seg[0]) && (i.seg[0] != default_seg))
5194 {
5195 if (!add_prefix (i.seg[0]->seg_prefix))
5196 return 0;
5197 }
5198 return 1;
5199 }
5200
5201 static const seg_entry *
5202 build_modrm_byte (void)
5203 {
5204 const seg_entry *default_seg = 0;
5205 unsigned int source, dest;
5206 int vex_3_sources;
5207
5208 /* The first operand of instructions with VEX prefix and 3 sources
5209 must be VEX_Imm4. */
5210 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5211 if (vex_3_sources)
5212 {
5213 unsigned int nds, reg_slot;
5214 expressionS *exp;
5215
5216 if (i.tm.opcode_modifier.veximmext
5217 && i.tm.opcode_modifier.immext)
5218 {
5219 dest = i.operands - 2;
5220 gas_assert (dest == 3);
5221 }
5222 else
5223 dest = i.operands - 1;
5224 nds = dest - 1;
5225
5226 /* There are 2 kinds of instructions:
5227 1. 5 operands: 4 register operands or 3 register operands
5228 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5229 VexW0 or VexW1. The destination must be either XMM or YMM
5230 register.
5231 2. 4 operands: 4 register operands or 3 register operands
5232 plus 1 memory operand, VexXDS, and VexImmExt */
5233 gas_assert ((i.reg_operands == 4
5234 || (i.reg_operands == 3 && i.mem_operands == 1))
5235 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5236 && (i.tm.opcode_modifier.veximmext
5237 || (i.imm_operands == 1
5238 && i.types[0].bitfield.vec_imm4
5239 && (i.tm.opcode_modifier.vexw == VEXW0
5240 || i.tm.opcode_modifier.vexw == VEXW1)
5241 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5242 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5243
5244 if (i.imm_operands == 0)
5245 {
5246 /* When there is no immediate operand, generate an 8bit
5247 immediate operand to encode the first operand. */
5248 exp = &im_expressions[i.imm_operands++];
5249 i.op[i.operands].imms = exp;
5250 i.types[i.operands] = imm8;
5251 i.operands++;
5252 /* If VexW1 is set, the first operand is the source and
5253 the second operand is encoded in the immediate operand. */
5254 if (i.tm.opcode_modifier.vexw == VEXW1)
5255 {
5256 source = 0;
5257 reg_slot = 1;
5258 }
5259 else
5260 {
5261 source = 1;
5262 reg_slot = 0;
5263 }
5264
5265 /* FMA swaps REG and NDS. */
5266 if (i.tm.cpu_flags.bitfield.cpufma)
5267 {
5268 unsigned int tmp;
5269 tmp = reg_slot;
5270 reg_slot = nds;
5271 nds = tmp;
5272 }
5273
5274 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5275 &regxmm)
5276 || operand_type_equal (&i.tm.operand_types[reg_slot],
5277 &regymm));
5278 exp->X_op = O_constant;
5279 exp->X_add_number
5280 = ((i.op[reg_slot].regs->reg_num
5281 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5282 << 4);
5283 }
5284 else
5285 {
5286 unsigned int imm_slot;
5287
5288 if (i.tm.opcode_modifier.vexw == VEXW0)
5289 {
5290 /* If VexW0 is set, the third operand is the source and
5291 the second operand is encoded in the immediate
5292 operand. */
5293 source = 2;
5294 reg_slot = 1;
5295 }
5296 else
5297 {
5298 /* VexW1 is set, the second operand is the source and
5299 the third operand is encoded in the immediate
5300 operand. */
5301 source = 1;
5302 reg_slot = 2;
5303 }
5304
5305 if (i.tm.opcode_modifier.immext)
5306 {
5307 /* When ImmExt is set, the immdiate byte is the last
5308 operand. */
5309 imm_slot = i.operands - 1;
5310 source--;
5311 reg_slot--;
5312 }
5313 else
5314 {
5315 imm_slot = 0;
5316
5317 /* Turn on Imm8 so that output_imm will generate it. */
5318 i.types[imm_slot].bitfield.imm8 = 1;
5319 }
5320
5321 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5322 &regxmm)
5323 || operand_type_equal (&i.tm.operand_types[reg_slot],
5324 &regymm));
5325 i.op[imm_slot].imms->X_add_number
5326 |= ((i.op[reg_slot].regs->reg_num
5327 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5328 << 4);
5329 }
5330
5331 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5332 || operand_type_equal (&i.tm.operand_types[nds],
5333 &regymm));
5334 i.vex.register_specifier = i.op[nds].regs;
5335 }
5336 else
5337 source = dest = 0;
5338
5339 /* i.reg_operands MUST be the number of real register operands;
5340 implicit registers do not count. If there are 3 register
5341 operands, it must be a instruction with VexNDS. For a
5342 instruction with VexNDD, the destination register is encoded
5343 in VEX prefix. If there are 4 register operands, it must be
5344 a instruction with VEX prefix and 3 sources. */
5345 if (i.mem_operands == 0
5346 && ((i.reg_operands == 2
5347 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5348 || (i.reg_operands == 3
5349 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5350 || (i.reg_operands == 4 && vex_3_sources)))
5351 {
5352 switch (i.operands)
5353 {
5354 case 2:
5355 source = 0;
5356 break;
5357 case 3:
5358 /* When there are 3 operands, one of them may be immediate,
5359 which may be the first or the last operand. Otherwise,
5360 the first operand must be shift count register (cl) or it
5361 is an instruction with VexNDS. */
5362 gas_assert (i.imm_operands == 1
5363 || (i.imm_operands == 0
5364 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5365 || i.types[0].bitfield.shiftcount)));
5366 if (operand_type_check (i.types[0], imm)
5367 || i.types[0].bitfield.shiftcount)
5368 source = 1;
5369 else
5370 source = 0;
5371 break;
5372 case 4:
5373 /* When there are 4 operands, the first two must be 8bit
5374 immediate operands. The source operand will be the 3rd
5375 one.
5376
5377 For instructions with VexNDS, if the first operand
5378 an imm8, the source operand is the 2nd one. If the last
5379 operand is imm8, the source operand is the first one. */
5380 gas_assert ((i.imm_operands == 2
5381 && i.types[0].bitfield.imm8
5382 && i.types[1].bitfield.imm8)
5383 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5384 && i.imm_operands == 1
5385 && (i.types[0].bitfield.imm8
5386 || i.types[i.operands - 1].bitfield.imm8)));
5387 if (i.imm_operands == 2)
5388 source = 2;
5389 else
5390 {
5391 if (i.types[0].bitfield.imm8)
5392 source = 1;
5393 else
5394 source = 0;
5395 }
5396 break;
5397 case 5:
5398 break;
5399 default:
5400 abort ();
5401 }
5402
5403 if (!vex_3_sources)
5404 {
5405 dest = source + 1;
5406
5407 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5408 {
5409 /* For instructions with VexNDS, the register-only
5410 source operand must be 32/64bit integer, XMM or
5411 YMM register. It is encoded in VEX prefix. We
5412 need to clear RegMem bit before calling
5413 operand_type_equal. */
5414
5415 i386_operand_type op;
5416 unsigned int vvvv;
5417
5418 /* Check register-only source operand when two source
5419 operands are swapped. */
5420 if (!i.tm.operand_types[source].bitfield.baseindex
5421 && i.tm.operand_types[dest].bitfield.baseindex)
5422 {
5423 vvvv = source;
5424 source = dest;
5425 }
5426 else
5427 vvvv = dest;
5428
5429 op = i.tm.operand_types[vvvv];
5430 op.bitfield.regmem = 0;
5431 if ((dest + 1) >= i.operands
5432 || (op.bitfield.reg32 != 1
5433 && !op.bitfield.reg64 != 1
5434 && !operand_type_equal (&op, &regxmm)
5435 && !operand_type_equal (&op, &regymm)))
5436 abort ();
5437 i.vex.register_specifier = i.op[vvvv].regs;
5438 dest++;
5439 }
5440 }
5441
5442 i.rm.mode = 3;
5443 /* One of the register operands will be encoded in the i.tm.reg
5444 field, the other in the combined i.tm.mode and i.tm.regmem
5445 fields. If no form of this instruction supports a memory
5446 destination operand, then we assume the source operand may
5447 sometimes be a memory operand and so we need to store the
5448 destination in the i.rm.reg field. */
5449 if (!i.tm.operand_types[dest].bitfield.regmem
5450 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5451 {
5452 i.rm.reg = i.op[dest].regs->reg_num;
5453 i.rm.regmem = i.op[source].regs->reg_num;
5454 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5455 i.rex |= REX_R;
5456 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5457 i.rex |= REX_B;
5458 }
5459 else
5460 {
5461 i.rm.reg = i.op[source].regs->reg_num;
5462 i.rm.regmem = i.op[dest].regs->reg_num;
5463 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5464 i.rex |= REX_B;
5465 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5466 i.rex |= REX_R;
5467 }
5468 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5469 {
5470 if (!i.types[0].bitfield.control
5471 && !i.types[1].bitfield.control)
5472 abort ();
5473 i.rex &= ~(REX_R | REX_B);
5474 add_prefix (LOCK_PREFIX_OPCODE);
5475 }
5476 }
5477 else
5478 { /* If it's not 2 reg operands... */
5479 unsigned int mem;
5480
5481 if (i.mem_operands)
5482 {
5483 unsigned int fake_zero_displacement = 0;
5484 unsigned int op;
5485
5486 for (op = 0; op < i.operands; op++)
5487 if (operand_type_check (i.types[op], anymem))
5488 break;
5489 gas_assert (op < i.operands);
5490
5491 if (i.tm.opcode_modifier.vecsib)
5492 {
5493 if (i.index_reg->reg_num == RegEiz
5494 || i.index_reg->reg_num == RegRiz)
5495 abort ();
5496
5497 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5498 if (!i.base_reg)
5499 {
5500 i.sib.base = NO_BASE_REGISTER;
5501 i.sib.scale = i.log2_scale_factor;
5502 i.types[op].bitfield.disp8 = 0;
5503 i.types[op].bitfield.disp16 = 0;
5504 i.types[op].bitfield.disp64 = 0;
5505 if (flag_code != CODE_64BIT)
5506 {
5507 /* Must be 32 bit */
5508 i.types[op].bitfield.disp32 = 1;
5509 i.types[op].bitfield.disp32s = 0;
5510 }
5511 else
5512 {
5513 i.types[op].bitfield.disp32 = 0;
5514 i.types[op].bitfield.disp32s = 1;
5515 }
5516 }
5517 i.sib.index = i.index_reg->reg_num;
5518 if ((i.index_reg->reg_flags & RegRex) != 0)
5519 i.rex |= REX_X;
5520 }
5521
5522 default_seg = &ds;
5523
5524 if (i.base_reg == 0)
5525 {
5526 i.rm.mode = 0;
5527 if (!i.disp_operands)
5528 {
5529 fake_zero_displacement = 1;
5530 /* Instructions with VSIB byte need 32bit displacement
5531 if there is no base register. */
5532 if (i.tm.opcode_modifier.vecsib)
5533 i.types[op].bitfield.disp32 = 1;
5534 }
5535 if (i.index_reg == 0)
5536 {
5537 gas_assert (!i.tm.opcode_modifier.vecsib);
5538 /* Operand is just <disp> */
5539 if (flag_code == CODE_64BIT)
5540 {
5541 /* 64bit mode overwrites the 32bit absolute
5542 addressing by RIP relative addressing and
5543 absolute addressing is encoded by one of the
5544 redundant SIB forms. */
5545 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5546 i.sib.base = NO_BASE_REGISTER;
5547 i.sib.index = NO_INDEX_REGISTER;
5548 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5549 ? disp32s : disp32);
5550 }
5551 else if ((flag_code == CODE_16BIT)
5552 ^ (i.prefix[ADDR_PREFIX] != 0))
5553 {
5554 i.rm.regmem = NO_BASE_REGISTER_16;
5555 i.types[op] = disp16;
5556 }
5557 else
5558 {
5559 i.rm.regmem = NO_BASE_REGISTER;
5560 i.types[op] = disp32;
5561 }
5562 }
5563 else if (!i.tm.opcode_modifier.vecsib)
5564 {
5565 /* !i.base_reg && i.index_reg */
5566 if (i.index_reg->reg_num == RegEiz
5567 || i.index_reg->reg_num == RegRiz)
5568 i.sib.index = NO_INDEX_REGISTER;
5569 else
5570 i.sib.index = i.index_reg->reg_num;
5571 i.sib.base = NO_BASE_REGISTER;
5572 i.sib.scale = i.log2_scale_factor;
5573 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5574 i.types[op].bitfield.disp8 = 0;
5575 i.types[op].bitfield.disp16 = 0;
5576 i.types[op].bitfield.disp64 = 0;
5577 if (flag_code != CODE_64BIT)
5578 {
5579 /* Must be 32 bit */
5580 i.types[op].bitfield.disp32 = 1;
5581 i.types[op].bitfield.disp32s = 0;
5582 }
5583 else
5584 {
5585 i.types[op].bitfield.disp32 = 0;
5586 i.types[op].bitfield.disp32s = 1;
5587 }
5588 if ((i.index_reg->reg_flags & RegRex) != 0)
5589 i.rex |= REX_X;
5590 }
5591 }
5592 /* RIP addressing for 64bit mode. */
5593 else if (i.base_reg->reg_num == RegRip ||
5594 i.base_reg->reg_num == RegEip)
5595 {
5596 gas_assert (!i.tm.opcode_modifier.vecsib);
5597 i.rm.regmem = NO_BASE_REGISTER;
5598 i.types[op].bitfield.disp8 = 0;
5599 i.types[op].bitfield.disp16 = 0;
5600 i.types[op].bitfield.disp32 = 0;
5601 i.types[op].bitfield.disp32s = 1;
5602 i.types[op].bitfield.disp64 = 0;
5603 i.flags[op] |= Operand_PCrel;
5604 if (! i.disp_operands)
5605 fake_zero_displacement = 1;
5606 }
5607 else if (i.base_reg->reg_type.bitfield.reg16)
5608 {
5609 gas_assert (!i.tm.opcode_modifier.vecsib);
5610 switch (i.base_reg->reg_num)
5611 {
5612 case 3: /* (%bx) */
5613 if (i.index_reg == 0)
5614 i.rm.regmem = 7;
5615 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5616 i.rm.regmem = i.index_reg->reg_num - 6;
5617 break;
5618 case 5: /* (%bp) */
5619 default_seg = &ss;
5620 if (i.index_reg == 0)
5621 {
5622 i.rm.regmem = 6;
5623 if (operand_type_check (i.types[op], disp) == 0)
5624 {
5625 /* fake (%bp) into 0(%bp) */
5626 i.types[op].bitfield.disp8 = 1;
5627 fake_zero_displacement = 1;
5628 }
5629 }
5630 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5631 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5632 break;
5633 default: /* (%si) -> 4 or (%di) -> 5 */
5634 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5635 }
5636 i.rm.mode = mode_from_disp_size (i.types[op]);
5637 }
5638 else /* i.base_reg and 32/64 bit mode */
5639 {
5640 if (flag_code == CODE_64BIT
5641 && operand_type_check (i.types[op], disp))
5642 {
5643 i386_operand_type temp;
5644 operand_type_set (&temp, 0);
5645 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5646 i.types[op] = temp;
5647 if (i.prefix[ADDR_PREFIX] == 0)
5648 i.types[op].bitfield.disp32s = 1;
5649 else
5650 i.types[op].bitfield.disp32 = 1;
5651 }
5652
5653 if (!i.tm.opcode_modifier.vecsib)
5654 i.rm.regmem = i.base_reg->reg_num;
5655 if ((i.base_reg->reg_flags & RegRex) != 0)
5656 i.rex |= REX_B;
5657 i.sib.base = i.base_reg->reg_num;
5658 /* x86-64 ignores REX prefix bit here to avoid decoder
5659 complications. */
5660 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5661 {
5662 default_seg = &ss;
5663 if (i.disp_operands == 0)
5664 {
5665 fake_zero_displacement = 1;
5666 i.types[op].bitfield.disp8 = 1;
5667 }
5668 }
5669 else if (i.base_reg->reg_num == ESP_REG_NUM)
5670 {
5671 default_seg = &ss;
5672 }
5673 i.sib.scale = i.log2_scale_factor;
5674 if (i.index_reg == 0)
5675 {
5676 gas_assert (!i.tm.opcode_modifier.vecsib);
5677 /* <disp>(%esp) becomes two byte modrm with no index
5678 register. We've already stored the code for esp
5679 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5680 Any base register besides %esp will not use the
5681 extra modrm byte. */
5682 i.sib.index = NO_INDEX_REGISTER;
5683 }
5684 else if (!i.tm.opcode_modifier.vecsib)
5685 {
5686 if (i.index_reg->reg_num == RegEiz
5687 || i.index_reg->reg_num == RegRiz)
5688 i.sib.index = NO_INDEX_REGISTER;
5689 else
5690 i.sib.index = i.index_reg->reg_num;
5691 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5692 if ((i.index_reg->reg_flags & RegRex) != 0)
5693 i.rex |= REX_X;
5694 }
5695
5696 if (i.disp_operands
5697 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5698 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5699 i.rm.mode = 0;
5700 else
5701 i.rm.mode = mode_from_disp_size (i.types[op]);
5702 }
5703
5704 if (fake_zero_displacement)
5705 {
5706 /* Fakes a zero displacement assuming that i.types[op]
5707 holds the correct displacement size. */
5708 expressionS *exp;
5709
5710 gas_assert (i.op[op].disps == 0);
5711 exp = &disp_expressions[i.disp_operands++];
5712 i.op[op].disps = exp;
5713 exp->X_op = O_constant;
5714 exp->X_add_number = 0;
5715 exp->X_add_symbol = (symbolS *) 0;
5716 exp->X_op_symbol = (symbolS *) 0;
5717 }
5718
5719 mem = op;
5720 }
5721 else
5722 mem = ~0;
5723
5724 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5725 {
5726 if (operand_type_check (i.types[0], imm))
5727 i.vex.register_specifier = NULL;
5728 else
5729 {
5730 /* VEX.vvvv encodes one of the sources when the first
5731 operand is not an immediate. */
5732 if (i.tm.opcode_modifier.vexw == VEXW0)
5733 i.vex.register_specifier = i.op[0].regs;
5734 else
5735 i.vex.register_specifier = i.op[1].regs;
5736 }
5737
5738 /* Destination is a XMM register encoded in the ModRM.reg
5739 and VEX.R bit. */
5740 i.rm.reg = i.op[2].regs->reg_num;
5741 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5742 i.rex |= REX_R;
5743
5744 /* ModRM.rm and VEX.B encodes the other source. */
5745 if (!i.mem_operands)
5746 {
5747 i.rm.mode = 3;
5748
5749 if (i.tm.opcode_modifier.vexw == VEXW0)
5750 i.rm.regmem = i.op[1].regs->reg_num;
5751 else
5752 i.rm.regmem = i.op[0].regs->reg_num;
5753
5754 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5755 i.rex |= REX_B;
5756 }
5757 }
5758 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5759 {
5760 i.vex.register_specifier = i.op[2].regs;
5761 if (!i.mem_operands)
5762 {
5763 i.rm.mode = 3;
5764 i.rm.regmem = i.op[1].regs->reg_num;
5765 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5766 i.rex |= REX_B;
5767 }
5768 }
5769 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5770 (if any) based on i.tm.extension_opcode. Again, we must be
5771 careful to make sure that segment/control/debug/test/MMX
5772 registers are coded into the i.rm.reg field. */
5773 else if (i.reg_operands)
5774 {
5775 unsigned int op;
5776 unsigned int vex_reg = ~0;
5777
5778 for (op = 0; op < i.operands; op++)
5779 if (i.types[op].bitfield.reg8
5780 || i.types[op].bitfield.reg16
5781 || i.types[op].bitfield.reg32
5782 || i.types[op].bitfield.reg64
5783 || i.types[op].bitfield.regmmx
5784 || i.types[op].bitfield.regxmm
5785 || i.types[op].bitfield.regymm
5786 || i.types[op].bitfield.sreg2
5787 || i.types[op].bitfield.sreg3
5788 || i.types[op].bitfield.control
5789 || i.types[op].bitfield.debug
5790 || i.types[op].bitfield.test)
5791 break;
5792
5793 if (vex_3_sources)
5794 op = dest;
5795 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5796 {
5797 /* For instructions with VexNDS, the register-only
5798 source operand is encoded in VEX prefix. */
5799 gas_assert (mem != (unsigned int) ~0);
5800
5801 if (op > mem)
5802 {
5803 vex_reg = op++;
5804 gas_assert (op < i.operands);
5805 }
5806 else
5807 {
5808 /* Check register-only source operand when two source
5809 operands are swapped. */
5810 if (!i.tm.operand_types[op].bitfield.baseindex
5811 && i.tm.operand_types[op + 1].bitfield.baseindex)
5812 {
5813 vex_reg = op;
5814 op += 2;
5815 gas_assert (mem == (vex_reg + 1)
5816 && op < i.operands);
5817 }
5818 else
5819 {
5820 vex_reg = op + 1;
5821 gas_assert (vex_reg < i.operands);
5822 }
5823 }
5824 }
5825 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5826 {
5827 /* For instructions with VexNDD, the register destination
5828 is encoded in VEX prefix. */
5829 if (i.mem_operands == 0)
5830 {
5831 /* There is no memory operand. */
5832 gas_assert ((op + 2) == i.operands);
5833 vex_reg = op + 1;
5834 }
5835 else
5836 {
5837 /* There are only 2 operands. */
5838 gas_assert (op < 2 && i.operands == 2);
5839 vex_reg = 1;
5840 }
5841 }
5842 else
5843 gas_assert (op < i.operands);
5844
5845 if (vex_reg != (unsigned int) ~0)
5846 {
5847 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5848
5849 if (type->bitfield.reg32 != 1
5850 && type->bitfield.reg64 != 1
5851 && !operand_type_equal (type, &regxmm)
5852 && !operand_type_equal (type, &regymm))
5853 abort ();
5854
5855 i.vex.register_specifier = i.op[vex_reg].regs;
5856 }
5857
5858 /* Don't set OP operand twice. */
5859 if (vex_reg != op)
5860 {
5861 /* If there is an extension opcode to put here, the
5862 register number must be put into the regmem field. */
5863 if (i.tm.extension_opcode != None)
5864 {
5865 i.rm.regmem = i.op[op].regs->reg_num;
5866 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5867 i.rex |= REX_B;
5868 }
5869 else
5870 {
5871 i.rm.reg = i.op[op].regs->reg_num;
5872 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5873 i.rex |= REX_R;
5874 }
5875 }
5876
5877 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5878 must set it to 3 to indicate this is a register operand
5879 in the regmem field. */
5880 if (!i.mem_operands)
5881 i.rm.mode = 3;
5882 }
5883
5884 /* Fill in i.rm.reg field with extension opcode (if any). */
5885 if (i.tm.extension_opcode != None)
5886 i.rm.reg = i.tm.extension_opcode;
5887 }
5888 return default_seg;
5889 }
5890
5891 static void
5892 output_branch (void)
5893 {
5894 char *p;
5895 int size;
5896 int code16;
5897 int prefix;
5898 relax_substateT subtype;
5899 symbolS *sym;
5900 offsetT off;
5901
5902 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5903 size = i.disp32_encoding ? BIG : SMALL;
5904
5905 prefix = 0;
5906 if (i.prefix[DATA_PREFIX] != 0)
5907 {
5908 prefix = 1;
5909 i.prefixes -= 1;
5910 code16 ^= CODE16;
5911 }
5912 /* Pentium4 branch hints. */
5913 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5914 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5915 {
5916 prefix++;
5917 i.prefixes--;
5918 }
5919 if (i.prefix[REX_PREFIX] != 0)
5920 {
5921 prefix++;
5922 i.prefixes--;
5923 }
5924
5925 if (i.prefixes != 0 && !intel_syntax)
5926 as_warn (_("skipping prefixes on this instruction"));
5927
5928 /* It's always a symbol; End frag & setup for relax.
5929 Make sure there is enough room in this frag for the largest
5930 instruction we may generate in md_convert_frag. This is 2
5931 bytes for the opcode and room for the prefix and largest
5932 displacement. */
5933 frag_grow (prefix + 2 + 4);
5934 /* Prefix and 1 opcode byte go in fr_fix. */
5935 p = frag_more (prefix + 1);
5936 if (i.prefix[DATA_PREFIX] != 0)
5937 *p++ = DATA_PREFIX_OPCODE;
5938 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5939 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5940 *p++ = i.prefix[SEG_PREFIX];
5941 if (i.prefix[REX_PREFIX] != 0)
5942 *p++ = i.prefix[REX_PREFIX];
5943 *p = i.tm.base_opcode;
5944
5945 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5946 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5947 else if (cpu_arch_flags.bitfield.cpui386)
5948 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5949 else
5950 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5951 subtype |= code16;
5952
5953 sym = i.op[0].disps->X_add_symbol;
5954 off = i.op[0].disps->X_add_number;
5955
5956 if (i.op[0].disps->X_op != O_constant
5957 && i.op[0].disps->X_op != O_symbol)
5958 {
5959 /* Handle complex expressions. */
5960 sym = make_expr_symbol (i.op[0].disps);
5961 off = 0;
5962 }
5963
5964 /* 1 possible extra opcode + 4 byte displacement go in var part.
5965 Pass reloc in fr_var. */
5966 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5967 }
5968
5969 static void
5970 output_jump (void)
5971 {
5972 char *p;
5973 int size;
5974 fixS *fixP;
5975
5976 if (i.tm.opcode_modifier.jumpbyte)
5977 {
5978 /* This is a loop or jecxz type instruction. */
5979 size = 1;
5980 if (i.prefix[ADDR_PREFIX] != 0)
5981 {
5982 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5983 i.prefixes -= 1;
5984 }
5985 /* Pentium4 branch hints. */
5986 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5987 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5988 {
5989 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5990 i.prefixes--;
5991 }
5992 }
5993 else
5994 {
5995 int code16;
5996
5997 code16 = 0;
5998 if (flag_code == CODE_16BIT)
5999 code16 = CODE16;
6000
6001 if (i.prefix[DATA_PREFIX] != 0)
6002 {
6003 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6004 i.prefixes -= 1;
6005 code16 ^= CODE16;
6006 }
6007
6008 size = 4;
6009 if (code16)
6010 size = 2;
6011 }
6012
6013 if (i.prefix[REX_PREFIX] != 0)
6014 {
6015 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6016 i.prefixes -= 1;
6017 }
6018
6019 if (i.prefixes != 0 && !intel_syntax)
6020 as_warn (_("skipping prefixes on this instruction"));
6021
6022 p = frag_more (1 + size);
6023 *p++ = i.tm.base_opcode;
6024
6025 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6026 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6027
6028 /* All jumps handled here are signed, but don't use a signed limit
6029 check for 32 and 16 bit jumps as we want to allow wrap around at
6030 4G and 64k respectively. */
6031 if (size == 1)
6032 fixP->fx_signed = 1;
6033 }
6034
6035 static void
6036 output_interseg_jump (void)
6037 {
6038 char *p;
6039 int size;
6040 int prefix;
6041 int code16;
6042
6043 code16 = 0;
6044 if (flag_code == CODE_16BIT)
6045 code16 = CODE16;
6046
6047 prefix = 0;
6048 if (i.prefix[DATA_PREFIX] != 0)
6049 {
6050 prefix = 1;
6051 i.prefixes -= 1;
6052 code16 ^= CODE16;
6053 }
6054 if (i.prefix[REX_PREFIX] != 0)
6055 {
6056 prefix++;
6057 i.prefixes -= 1;
6058 }
6059
6060 size = 4;
6061 if (code16)
6062 size = 2;
6063
6064 if (i.prefixes != 0 && !intel_syntax)
6065 as_warn (_("skipping prefixes on this instruction"));
6066
6067 /* 1 opcode; 2 segment; offset */
6068 p = frag_more (prefix + 1 + 2 + size);
6069
6070 if (i.prefix[DATA_PREFIX] != 0)
6071 *p++ = DATA_PREFIX_OPCODE;
6072
6073 if (i.prefix[REX_PREFIX] != 0)
6074 *p++ = i.prefix[REX_PREFIX];
6075
6076 *p++ = i.tm.base_opcode;
6077 if (i.op[1].imms->X_op == O_constant)
6078 {
6079 offsetT n = i.op[1].imms->X_add_number;
6080
6081 if (size == 2
6082 && !fits_in_unsigned_word (n)
6083 && !fits_in_signed_word (n))
6084 {
6085 as_bad (_("16-bit jump out of range"));
6086 return;
6087 }
6088 md_number_to_chars (p, n, size);
6089 }
6090 else
6091 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6092 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6093 if (i.op[0].imms->X_op != O_constant)
6094 as_bad (_("can't handle non absolute segment in `%s'"),
6095 i.tm.name);
6096 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6097 }
6098
6099 static void
6100 output_insn (void)
6101 {
6102 fragS *insn_start_frag;
6103 offsetT insn_start_off;
6104
6105 /* Tie dwarf2 debug info to the address at the start of the insn.
6106 We can't do this after the insn has been output as the current
6107 frag may have been closed off. eg. by frag_var. */
6108 dwarf2_emit_insn (0);
6109
6110 insn_start_frag = frag_now;
6111 insn_start_off = frag_now_fix ();
6112
6113 /* Output jumps. */
6114 if (i.tm.opcode_modifier.jump)
6115 output_branch ();
6116 else if (i.tm.opcode_modifier.jumpbyte
6117 || i.tm.opcode_modifier.jumpdword)
6118 output_jump ();
6119 else if (i.tm.opcode_modifier.jumpintersegment)
6120 output_interseg_jump ();
6121 else
6122 {
6123 /* Output normal instructions here. */
6124 char *p;
6125 unsigned char *q;
6126 unsigned int j;
6127 unsigned int prefix;
6128
6129 /* Since the VEX prefix contains the implicit prefix, we don't
6130 need the explicit prefix. */
6131 if (!i.tm.opcode_modifier.vex)
6132 {
6133 switch (i.tm.opcode_length)
6134 {
6135 case 3:
6136 if (i.tm.base_opcode & 0xff000000)
6137 {
6138 prefix = (i.tm.base_opcode >> 24) & 0xff;
6139 goto check_prefix;
6140 }
6141 break;
6142 case 2:
6143 if ((i.tm.base_opcode & 0xff0000) != 0)
6144 {
6145 prefix = (i.tm.base_opcode >> 16) & 0xff;
6146 if (i.tm.cpu_flags.bitfield.cpupadlock)
6147 {
6148 check_prefix:
6149 if (prefix != REPE_PREFIX_OPCODE
6150 || (i.prefix[REP_PREFIX]
6151 != REPE_PREFIX_OPCODE))
6152 add_prefix (prefix);
6153 }
6154 else
6155 add_prefix (prefix);
6156 }
6157 break;
6158 case 1:
6159 break;
6160 default:
6161 abort ();
6162 }
6163
6164 /* The prefix bytes. */
6165 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6166 if (*q)
6167 FRAG_APPEND_1_CHAR (*q);
6168 }
6169
6170 if (i.tm.opcode_modifier.vex)
6171 {
6172 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6173 if (*q)
6174 switch (j)
6175 {
6176 case REX_PREFIX:
6177 /* REX byte is encoded in VEX prefix. */
6178 break;
6179 case SEG_PREFIX:
6180 case ADDR_PREFIX:
6181 FRAG_APPEND_1_CHAR (*q);
6182 break;
6183 default:
6184 /* There should be no other prefixes for instructions
6185 with VEX prefix. */
6186 abort ();
6187 }
6188
6189 /* Now the VEX prefix. */
6190 p = frag_more (i.vex.length);
6191 for (j = 0; j < i.vex.length; j++)
6192 p[j] = i.vex.bytes[j];
6193 }
6194
6195 /* Now the opcode; be careful about word order here! */
6196 if (i.tm.opcode_length == 1)
6197 {
6198 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6199 }
6200 else
6201 {
6202 switch (i.tm.opcode_length)
6203 {
6204 case 3:
6205 p = frag_more (3);
6206 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6207 break;
6208 case 2:
6209 p = frag_more (2);
6210 break;
6211 default:
6212 abort ();
6213 break;
6214 }
6215
6216 /* Put out high byte first: can't use md_number_to_chars! */
6217 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6218 *p = i.tm.base_opcode & 0xff;
6219 }
6220
6221 /* Now the modrm byte and sib byte (if present). */
6222 if (i.tm.opcode_modifier.modrm)
6223 {
6224 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6225 | i.rm.reg << 3
6226 | i.rm.mode << 6));
6227 /* If i.rm.regmem == ESP (4)
6228 && i.rm.mode != (Register mode)
6229 && not 16 bit
6230 ==> need second modrm byte. */
6231 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6232 && i.rm.mode != 3
6233 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6234 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6235 | i.sib.index << 3
6236 | i.sib.scale << 6));
6237 }
6238
6239 if (i.disp_operands)
6240 output_disp (insn_start_frag, insn_start_off);
6241
6242 if (i.imm_operands)
6243 output_imm (insn_start_frag, insn_start_off);
6244 }
6245
6246 #ifdef DEBUG386
6247 if (flag_debug)
6248 {
6249 pi ("" /*line*/, &i);
6250 }
6251 #endif /* DEBUG386 */
6252 }
6253
6254 /* Return the size of the displacement operand N. */
6255
6256 static int
6257 disp_size (unsigned int n)
6258 {
6259 int size = 4;
6260 if (i.types[n].bitfield.disp64)
6261 size = 8;
6262 else if (i.types[n].bitfield.disp8)
6263 size = 1;
6264 else if (i.types[n].bitfield.disp16)
6265 size = 2;
6266 return size;
6267 }
6268
6269 /* Return the size of the immediate operand N. */
6270
6271 static int
6272 imm_size (unsigned int n)
6273 {
6274 int size = 4;
6275 if (i.types[n].bitfield.imm64)
6276 size = 8;
6277 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6278 size = 1;
6279 else if (i.types[n].bitfield.imm16)
6280 size = 2;
6281 return size;
6282 }
6283
6284 static void
6285 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6286 {
6287 char *p;
6288 unsigned int n;
6289
6290 for (n = 0; n < i.operands; n++)
6291 {
6292 if (operand_type_check (i.types[n], disp))
6293 {
6294 if (i.op[n].disps->X_op == O_constant)
6295 {
6296 int size = disp_size (n);
6297 offsetT val;
6298
6299 val = offset_in_range (i.op[n].disps->X_add_number,
6300 size);
6301 p = frag_more (size);
6302 md_number_to_chars (p, val, size);
6303 }
6304 else
6305 {
6306 enum bfd_reloc_code_real reloc_type;
6307 int size = disp_size (n);
6308 int sign = i.types[n].bitfield.disp32s;
6309 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6310
6311 /* We can't have 8 bit displacement here. */
6312 gas_assert (!i.types[n].bitfield.disp8);
6313
6314 /* The PC relative address is computed relative
6315 to the instruction boundary, so in case immediate
6316 fields follows, we need to adjust the value. */
6317 if (pcrel && i.imm_operands)
6318 {
6319 unsigned int n1;
6320 int sz = 0;
6321
6322 for (n1 = 0; n1 < i.operands; n1++)
6323 if (operand_type_check (i.types[n1], imm))
6324 {
6325 /* Only one immediate is allowed for PC
6326 relative address. */
6327 gas_assert (sz == 0);
6328 sz = imm_size (n1);
6329 i.op[n].disps->X_add_number -= sz;
6330 }
6331 /* We should find the immediate. */
6332 gas_assert (sz != 0);
6333 }
6334
6335 p = frag_more (size);
6336 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6337 if (GOT_symbol
6338 && GOT_symbol == i.op[n].disps->X_add_symbol
6339 && (((reloc_type == BFD_RELOC_32
6340 || reloc_type == BFD_RELOC_X86_64_32S
6341 || (reloc_type == BFD_RELOC_64
6342 && object_64bit))
6343 && (i.op[n].disps->X_op == O_symbol
6344 || (i.op[n].disps->X_op == O_add
6345 && ((symbol_get_value_expression
6346 (i.op[n].disps->X_op_symbol)->X_op)
6347 == O_subtract))))
6348 || reloc_type == BFD_RELOC_32_PCREL))
6349 {
6350 offsetT add;
6351
6352 if (insn_start_frag == frag_now)
6353 add = (p - frag_now->fr_literal) - insn_start_off;
6354 else
6355 {
6356 fragS *fr;
6357
6358 add = insn_start_frag->fr_fix - insn_start_off;
6359 for (fr = insn_start_frag->fr_next;
6360 fr && fr != frag_now; fr = fr->fr_next)
6361 add += fr->fr_fix;
6362 add += p - frag_now->fr_literal;
6363 }
6364
6365 if (!object_64bit)
6366 {
6367 reloc_type = BFD_RELOC_386_GOTPC;
6368 i.op[n].imms->X_add_number += add;
6369 }
6370 else if (reloc_type == BFD_RELOC_64)
6371 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6372 else
6373 /* Don't do the adjustment for x86-64, as there
6374 the pcrel addressing is relative to the _next_
6375 insn, and that is taken care of in other code. */
6376 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6377 }
6378 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6379 i.op[n].disps, pcrel, reloc_type);
6380 }
6381 }
6382 }
6383 }
6384
6385 static void
6386 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6387 {
6388 char *p;
6389 unsigned int n;
6390
6391 for (n = 0; n < i.operands; n++)
6392 {
6393 if (operand_type_check (i.types[n], imm))
6394 {
6395 if (i.op[n].imms->X_op == O_constant)
6396 {
6397 int size = imm_size (n);
6398 offsetT val;
6399
6400 val = offset_in_range (i.op[n].imms->X_add_number,
6401 size);
6402 p = frag_more (size);
6403 md_number_to_chars (p, val, size);
6404 }
6405 else
6406 {
6407 /* Not absolute_section.
6408 Need a 32-bit fixup (don't support 8bit
6409 non-absolute imms). Try to support other
6410 sizes ... */
6411 enum bfd_reloc_code_real reloc_type;
6412 int size = imm_size (n);
6413 int sign;
6414
6415 if (i.types[n].bitfield.imm32s
6416 && (i.suffix == QWORD_MNEM_SUFFIX
6417 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6418 sign = 1;
6419 else
6420 sign = 0;
6421
6422 p = frag_more (size);
6423 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6424
6425 /* This is tough to explain. We end up with this one if we
6426 * have operands that look like
6427 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6428 * obtain the absolute address of the GOT, and it is strongly
6429 * preferable from a performance point of view to avoid using
6430 * a runtime relocation for this. The actual sequence of
6431 * instructions often look something like:
6432 *
6433 * call .L66
6434 * .L66:
6435 * popl %ebx
6436 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6437 *
6438 * The call and pop essentially return the absolute address
6439 * of the label .L66 and store it in %ebx. The linker itself
6440 * will ultimately change the first operand of the addl so
6441 * that %ebx points to the GOT, but to keep things simple, the
6442 * .o file must have this operand set so that it generates not
6443 * the absolute address of .L66, but the absolute address of
6444 * itself. This allows the linker itself simply treat a GOTPC
6445 * relocation as asking for a pcrel offset to the GOT to be
6446 * added in, and the addend of the relocation is stored in the
6447 * operand field for the instruction itself.
6448 *
6449 * Our job here is to fix the operand so that it would add
6450 * the correct offset so that %ebx would point to itself. The
6451 * thing that is tricky is that .-.L66 will point to the
6452 * beginning of the instruction, so we need to further modify
6453 * the operand so that it will point to itself. There are
6454 * other cases where you have something like:
6455 *
6456 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6457 *
6458 * and here no correction would be required. Internally in
6459 * the assembler we treat operands of this form as not being
6460 * pcrel since the '.' is explicitly mentioned, and I wonder
6461 * whether it would simplify matters to do it this way. Who
6462 * knows. In earlier versions of the PIC patches, the
6463 * pcrel_adjust field was used to store the correction, but
6464 * since the expression is not pcrel, I felt it would be
6465 * confusing to do it this way. */
6466
6467 if ((reloc_type == BFD_RELOC_32
6468 || reloc_type == BFD_RELOC_X86_64_32S
6469 || reloc_type == BFD_RELOC_64)
6470 && GOT_symbol
6471 && GOT_symbol == i.op[n].imms->X_add_symbol
6472 && (i.op[n].imms->X_op == O_symbol
6473 || (i.op[n].imms->X_op == O_add
6474 && ((symbol_get_value_expression
6475 (i.op[n].imms->X_op_symbol)->X_op)
6476 == O_subtract))))
6477 {
6478 offsetT add;
6479
6480 if (insn_start_frag == frag_now)
6481 add = (p - frag_now->fr_literal) - insn_start_off;
6482 else
6483 {
6484 fragS *fr;
6485
6486 add = insn_start_frag->fr_fix - insn_start_off;
6487 for (fr = insn_start_frag->fr_next;
6488 fr && fr != frag_now; fr = fr->fr_next)
6489 add += fr->fr_fix;
6490 add += p - frag_now->fr_literal;
6491 }
6492
6493 if (!object_64bit)
6494 reloc_type = BFD_RELOC_386_GOTPC;
6495 else if (size == 4)
6496 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6497 else if (size == 8)
6498 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6499 i.op[n].imms->X_add_number += add;
6500 }
6501 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6502 i.op[n].imms, 0, reloc_type);
6503 }
6504 }
6505 }
6506 }
6507 \f
6508 /* x86_cons_fix_new is called via the expression parsing code when a
6509 reloc is needed. We use this hook to get the correct .got reloc. */
6510 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6511 static int cons_sign = -1;
6512
6513 void
6514 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6515 expressionS *exp)
6516 {
6517 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6518
6519 got_reloc = NO_RELOC;
6520
6521 #ifdef TE_PE
6522 if (exp->X_op == O_secrel)
6523 {
6524 exp->X_op = O_symbol;
6525 r = BFD_RELOC_32_SECREL;
6526 }
6527 #endif
6528
6529 fix_new_exp (frag, off, len, exp, 0, r);
6530 }
6531
6532 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6533 || defined (LEX_AT)
6534 # define lex_got(reloc, adjust, types) NULL
6535 #else
6536 /* Parse operands of the form
6537 <symbol>@GOTOFF+<nnn>
6538 and similar .plt or .got references.
6539
6540 If we find one, set up the correct relocation in RELOC and copy the
6541 input string, minus the `@GOTOFF' into a malloc'd buffer for
6542 parsing by the calling routine. Return this buffer, and if ADJUST
6543 is non-null set it to the length of the string we removed from the
6544 input line. Otherwise return NULL. */
6545 static char *
6546 lex_got (enum bfd_reloc_code_real *rel,
6547 int *adjust,
6548 i386_operand_type *types)
6549 {
6550 /* Some of the relocations depend on the size of what field is to
6551 be relocated. But in our callers i386_immediate and i386_displacement
6552 we don't yet know the operand size (this will be set by insn
6553 matching). Hence we record the word32 relocation here,
6554 and adjust the reloc according to the real size in reloc(). */
6555 static const struct {
6556 const char *str;
6557 int len;
6558 const enum bfd_reloc_code_real rel[2];
6559 const i386_operand_type types64;
6560 } gotrel[] = {
6561 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6562 BFD_RELOC_X86_64_PLTOFF64 },
6563 OPERAND_TYPE_IMM64 },
6564 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6565 BFD_RELOC_X86_64_PLT32 },
6566 OPERAND_TYPE_IMM32_32S_DISP32 },
6567 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6568 BFD_RELOC_X86_64_GOTPLT64 },
6569 OPERAND_TYPE_IMM64_DISP64 },
6570 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6571 BFD_RELOC_X86_64_GOTOFF64 },
6572 OPERAND_TYPE_IMM64_DISP64 },
6573 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6574 BFD_RELOC_X86_64_GOTPCREL },
6575 OPERAND_TYPE_IMM32_32S_DISP32 },
6576 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6577 BFD_RELOC_X86_64_TLSGD },
6578 OPERAND_TYPE_IMM32_32S_DISP32 },
6579 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6580 _dummy_first_bfd_reloc_code_real },
6581 OPERAND_TYPE_NONE },
6582 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6583 BFD_RELOC_X86_64_TLSLD },
6584 OPERAND_TYPE_IMM32_32S_DISP32 },
6585 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6586 BFD_RELOC_X86_64_GOTTPOFF },
6587 OPERAND_TYPE_IMM32_32S_DISP32 },
6588 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6589 BFD_RELOC_X86_64_TPOFF32 },
6590 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6591 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6592 _dummy_first_bfd_reloc_code_real },
6593 OPERAND_TYPE_NONE },
6594 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6595 BFD_RELOC_X86_64_DTPOFF32 },
6596 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6597 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6598 _dummy_first_bfd_reloc_code_real },
6599 OPERAND_TYPE_NONE },
6600 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6601 _dummy_first_bfd_reloc_code_real },
6602 OPERAND_TYPE_NONE },
6603 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6604 BFD_RELOC_X86_64_GOT32 },
6605 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6606 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6607 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6608 OPERAND_TYPE_IMM32_32S_DISP32 },
6609 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6610 BFD_RELOC_X86_64_TLSDESC_CALL },
6611 OPERAND_TYPE_IMM32_32S_DISP32 },
6612 };
6613 char *cp;
6614 unsigned int j;
6615
6616 #if defined (OBJ_MAYBE_ELF)
6617 if (!IS_ELF)
6618 return NULL;
6619 #endif
6620
6621 for (cp = input_line_pointer; *cp != '@'; cp++)
6622 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6623 return NULL;
6624
6625 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6626 {
6627 int len = gotrel[j].len;
6628 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6629 {
6630 if (gotrel[j].rel[object_64bit] != 0)
6631 {
6632 int first, second;
6633 char *tmpbuf, *past_reloc;
6634
6635 *rel = gotrel[j].rel[object_64bit];
6636 if (adjust)
6637 *adjust = len;
6638
6639 if (types)
6640 {
6641 if (flag_code != CODE_64BIT)
6642 {
6643 types->bitfield.imm32 = 1;
6644 types->bitfield.disp32 = 1;
6645 }
6646 else
6647 *types = gotrel[j].types64;
6648 }
6649
6650 if (GOT_symbol == NULL)
6651 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6652
6653 /* The length of the first part of our input line. */
6654 first = cp - input_line_pointer;
6655
6656 /* The second part goes from after the reloc token until
6657 (and including) an end_of_line char or comma. */
6658 past_reloc = cp + 1 + len;
6659 cp = past_reloc;
6660 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6661 ++cp;
6662 second = cp + 1 - past_reloc;
6663
6664 /* Allocate and copy string. The trailing NUL shouldn't
6665 be necessary, but be safe. */
6666 tmpbuf = (char *) xmalloc (first + second + 2);
6667 memcpy (tmpbuf, input_line_pointer, first);
6668 if (second != 0 && *past_reloc != ' ')
6669 /* Replace the relocation token with ' ', so that
6670 errors like foo@GOTOFF1 will be detected. */
6671 tmpbuf[first++] = ' ';
6672 memcpy (tmpbuf + first, past_reloc, second);
6673 tmpbuf[first + second] = '\0';
6674 return tmpbuf;
6675 }
6676
6677 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6678 gotrel[j].str, 1 << (5 + object_64bit));
6679 return NULL;
6680 }
6681 }
6682
6683 /* Might be a symbol version string. Don't as_bad here. */
6684 return NULL;
6685 }
6686 #endif
6687
6688 void
6689 x86_cons (expressionS *exp, int size)
6690 {
6691 intel_syntax = -intel_syntax;
6692
6693 exp->X_md = 0;
6694 if (size == 4 || (object_64bit && size == 8))
6695 {
6696 /* Handle @GOTOFF and the like in an expression. */
6697 char *save;
6698 char *gotfree_input_line;
6699 int adjust = 0;
6700
6701 save = input_line_pointer;
6702 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6703 if (gotfree_input_line)
6704 input_line_pointer = gotfree_input_line;
6705
6706 expression (exp);
6707
6708 if (gotfree_input_line)
6709 {
6710 /* expression () has merrily parsed up to the end of line,
6711 or a comma - in the wrong buffer. Transfer how far
6712 input_line_pointer has moved to the right buffer. */
6713 input_line_pointer = (save
6714 + (input_line_pointer - gotfree_input_line)
6715 + adjust);
6716 free (gotfree_input_line);
6717 if (exp->X_op == O_constant
6718 || exp->X_op == O_absent
6719 || exp->X_op == O_illegal
6720 || exp->X_op == O_register
6721 || exp->X_op == O_big)
6722 {
6723 char c = *input_line_pointer;
6724 *input_line_pointer = 0;
6725 as_bad (_("missing or invalid expression `%s'"), save);
6726 *input_line_pointer = c;
6727 }
6728 }
6729 }
6730 else
6731 expression (exp);
6732
6733 intel_syntax = -intel_syntax;
6734
6735 if (intel_syntax)
6736 i386_intel_simplify (exp);
6737 }
6738
6739 static void
6740 signed_cons (int size)
6741 {
6742 if (flag_code == CODE_64BIT)
6743 cons_sign = 1;
6744 cons (size);
6745 cons_sign = -1;
6746 }
6747
6748 #ifdef TE_PE
6749 static void
6750 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6751 {
6752 expressionS exp;
6753
6754 do
6755 {
6756 expression (&exp);
6757 if (exp.X_op == O_symbol)
6758 exp.X_op = O_secrel;
6759
6760 emit_expr (&exp, 4);
6761 }
6762 while (*input_line_pointer++ == ',');
6763
6764 input_line_pointer--;
6765 demand_empty_rest_of_line ();
6766 }
6767 #endif
6768
6769 static int
6770 i386_immediate (char *imm_start)
6771 {
6772 char *save_input_line_pointer;
6773 char *gotfree_input_line;
6774 segT exp_seg = 0;
6775 expressionS *exp;
6776 i386_operand_type types;
6777
6778 operand_type_set (&types, ~0);
6779
6780 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6781 {
6782 as_bad (_("at most %d immediate operands are allowed"),
6783 MAX_IMMEDIATE_OPERANDS);
6784 return 0;
6785 }
6786
6787 exp = &im_expressions[i.imm_operands++];
6788 i.op[this_operand].imms = exp;
6789
6790 if (is_space_char (*imm_start))
6791 ++imm_start;
6792
6793 save_input_line_pointer = input_line_pointer;
6794 input_line_pointer = imm_start;
6795
6796 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6797 if (gotfree_input_line)
6798 input_line_pointer = gotfree_input_line;
6799
6800 exp_seg = expression (exp);
6801
6802 SKIP_WHITESPACE ();
6803 if (*input_line_pointer)
6804 as_bad (_("junk `%s' after expression"), input_line_pointer);
6805
6806 input_line_pointer = save_input_line_pointer;
6807 if (gotfree_input_line)
6808 {
6809 free (gotfree_input_line);
6810
6811 if (exp->X_op == O_constant || exp->X_op == O_register)
6812 exp->X_op = O_illegal;
6813 }
6814
6815 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6816 }
6817
6818 static int
6819 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6820 i386_operand_type types, const char *imm_start)
6821 {
6822 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6823 {
6824 if (imm_start)
6825 as_bad (_("missing or invalid immediate expression `%s'"),
6826 imm_start);
6827 return 0;
6828 }
6829 else if (exp->X_op == O_constant)
6830 {
6831 /* Size it properly later. */
6832 i.types[this_operand].bitfield.imm64 = 1;
6833 /* If not 64bit, sign extend val. */
6834 if (flag_code != CODE_64BIT
6835 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6836 exp->X_add_number
6837 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6838 }
6839 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6840 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6841 && exp_seg != absolute_section
6842 && exp_seg != text_section
6843 && exp_seg != data_section
6844 && exp_seg != bss_section
6845 && exp_seg != undefined_section
6846 && !bfd_is_com_section (exp_seg))
6847 {
6848 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6849 return 0;
6850 }
6851 #endif
6852 else if (!intel_syntax && exp->X_op == O_register)
6853 {
6854 if (imm_start)
6855 as_bad (_("illegal immediate register operand %s"), imm_start);
6856 return 0;
6857 }
6858 else
6859 {
6860 /* This is an address. The size of the address will be
6861 determined later, depending on destination register,
6862 suffix, or the default for the section. */
6863 i.types[this_operand].bitfield.imm8 = 1;
6864 i.types[this_operand].bitfield.imm16 = 1;
6865 i.types[this_operand].bitfield.imm32 = 1;
6866 i.types[this_operand].bitfield.imm32s = 1;
6867 i.types[this_operand].bitfield.imm64 = 1;
6868 i.types[this_operand] = operand_type_and (i.types[this_operand],
6869 types);
6870 }
6871
6872 return 1;
6873 }
6874
6875 static char *
6876 i386_scale (char *scale)
6877 {
6878 offsetT val;
6879 char *save = input_line_pointer;
6880
6881 input_line_pointer = scale;
6882 val = get_absolute_expression ();
6883
6884 switch (val)
6885 {
6886 case 1:
6887 i.log2_scale_factor = 0;
6888 break;
6889 case 2:
6890 i.log2_scale_factor = 1;
6891 break;
6892 case 4:
6893 i.log2_scale_factor = 2;
6894 break;
6895 case 8:
6896 i.log2_scale_factor = 3;
6897 break;
6898 default:
6899 {
6900 char sep = *input_line_pointer;
6901
6902 *input_line_pointer = '\0';
6903 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6904 scale);
6905 *input_line_pointer = sep;
6906 input_line_pointer = save;
6907 return NULL;
6908 }
6909 }
6910 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6911 {
6912 as_warn (_("scale factor of %d without an index register"),
6913 1 << i.log2_scale_factor);
6914 i.log2_scale_factor = 0;
6915 }
6916 scale = input_line_pointer;
6917 input_line_pointer = save;
6918 return scale;
6919 }
6920
6921 static int
6922 i386_displacement (char *disp_start, char *disp_end)
6923 {
6924 expressionS *exp;
6925 segT exp_seg = 0;
6926 char *save_input_line_pointer;
6927 char *gotfree_input_line;
6928 int override;
6929 i386_operand_type bigdisp, types = anydisp;
6930 int ret;
6931
6932 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6933 {
6934 as_bad (_("at most %d displacement operands are allowed"),
6935 MAX_MEMORY_OPERANDS);
6936 return 0;
6937 }
6938
6939 operand_type_set (&bigdisp, 0);
6940 if ((i.types[this_operand].bitfield.jumpabsolute)
6941 || (!current_templates->start->opcode_modifier.jump
6942 && !current_templates->start->opcode_modifier.jumpdword))
6943 {
6944 bigdisp.bitfield.disp32 = 1;
6945 override = (i.prefix[ADDR_PREFIX] != 0);
6946 if (flag_code == CODE_64BIT)
6947 {
6948 if (!override)
6949 {
6950 bigdisp.bitfield.disp32s = 1;
6951 bigdisp.bitfield.disp64 = 1;
6952 }
6953 }
6954 else if ((flag_code == CODE_16BIT) ^ override)
6955 {
6956 bigdisp.bitfield.disp32 = 0;
6957 bigdisp.bitfield.disp16 = 1;
6958 }
6959 }
6960 else
6961 {
6962 /* For PC-relative branches, the width of the displacement
6963 is dependent upon data size, not address size. */
6964 override = (i.prefix[DATA_PREFIX] != 0);
6965 if (flag_code == CODE_64BIT)
6966 {
6967 if (override || i.suffix == WORD_MNEM_SUFFIX)
6968 bigdisp.bitfield.disp16 = 1;
6969 else
6970 {
6971 bigdisp.bitfield.disp32 = 1;
6972 bigdisp.bitfield.disp32s = 1;
6973 }
6974 }
6975 else
6976 {
6977 if (!override)
6978 override = (i.suffix == (flag_code != CODE_16BIT
6979 ? WORD_MNEM_SUFFIX
6980 : LONG_MNEM_SUFFIX));
6981 bigdisp.bitfield.disp32 = 1;
6982 if ((flag_code == CODE_16BIT) ^ override)
6983 {
6984 bigdisp.bitfield.disp32 = 0;
6985 bigdisp.bitfield.disp16 = 1;
6986 }
6987 }
6988 }
6989 i.types[this_operand] = operand_type_or (i.types[this_operand],
6990 bigdisp);
6991
6992 exp = &disp_expressions[i.disp_operands];
6993 i.op[this_operand].disps = exp;
6994 i.disp_operands++;
6995 save_input_line_pointer = input_line_pointer;
6996 input_line_pointer = disp_start;
6997 END_STRING_AND_SAVE (disp_end);
6998
6999 #ifndef GCC_ASM_O_HACK
7000 #define GCC_ASM_O_HACK 0
7001 #endif
7002 #if GCC_ASM_O_HACK
7003 END_STRING_AND_SAVE (disp_end + 1);
7004 if (i.types[this_operand].bitfield.baseIndex
7005 && displacement_string_end[-1] == '+')
7006 {
7007 /* This hack is to avoid a warning when using the "o"
7008 constraint within gcc asm statements.
7009 For instance:
7010
7011 #define _set_tssldt_desc(n,addr,limit,type) \
7012 __asm__ __volatile__ ( \
7013 "movw %w2,%0\n\t" \
7014 "movw %w1,2+%0\n\t" \
7015 "rorl $16,%1\n\t" \
7016 "movb %b1,4+%0\n\t" \
7017 "movb %4,5+%0\n\t" \
7018 "movb $0,6+%0\n\t" \
7019 "movb %h1,7+%0\n\t" \
7020 "rorl $16,%1" \
7021 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7022
7023 This works great except that the output assembler ends
7024 up looking a bit weird if it turns out that there is
7025 no offset. You end up producing code that looks like:
7026
7027 #APP
7028 movw $235,(%eax)
7029 movw %dx,2+(%eax)
7030 rorl $16,%edx
7031 movb %dl,4+(%eax)
7032 movb $137,5+(%eax)
7033 movb $0,6+(%eax)
7034 movb %dh,7+(%eax)
7035 rorl $16,%edx
7036 #NO_APP
7037
7038 So here we provide the missing zero. */
7039
7040 *displacement_string_end = '0';
7041 }
7042 #endif
7043 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7044 if (gotfree_input_line)
7045 input_line_pointer = gotfree_input_line;
7046
7047 exp_seg = expression (exp);
7048
7049 SKIP_WHITESPACE ();
7050 if (*input_line_pointer)
7051 as_bad (_("junk `%s' after expression"), input_line_pointer);
7052 #if GCC_ASM_O_HACK
7053 RESTORE_END_STRING (disp_end + 1);
7054 #endif
7055 input_line_pointer = save_input_line_pointer;
7056 if (gotfree_input_line)
7057 {
7058 free (gotfree_input_line);
7059
7060 if (exp->X_op == O_constant || exp->X_op == O_register)
7061 exp->X_op = O_illegal;
7062 }
7063
7064 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7065
7066 RESTORE_END_STRING (disp_end);
7067
7068 return ret;
7069 }
7070
7071 static int
7072 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7073 i386_operand_type types, const char *disp_start)
7074 {
7075 i386_operand_type bigdisp;
7076 int ret = 1;
7077
7078 /* We do this to make sure that the section symbol is in
7079 the symbol table. We will ultimately change the relocation
7080 to be relative to the beginning of the section. */
7081 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7082 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7083 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7084 {
7085 if (exp->X_op != O_symbol)
7086 goto inv_disp;
7087
7088 if (S_IS_LOCAL (exp->X_add_symbol)
7089 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7090 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7091 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7092 exp->X_op = O_subtract;
7093 exp->X_op_symbol = GOT_symbol;
7094 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7095 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7096 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7097 i.reloc[this_operand] = BFD_RELOC_64;
7098 else
7099 i.reloc[this_operand] = BFD_RELOC_32;
7100 }
7101
7102 else if (exp->X_op == O_absent
7103 || exp->X_op == O_illegal
7104 || exp->X_op == O_big)
7105 {
7106 inv_disp:
7107 as_bad (_("missing or invalid displacement expression `%s'"),
7108 disp_start);
7109 ret = 0;
7110 }
7111
7112 else if (flag_code == CODE_64BIT
7113 && !i.prefix[ADDR_PREFIX]
7114 && exp->X_op == O_constant)
7115 {
7116 /* Since displacement is signed extended to 64bit, don't allow
7117 disp32 and turn off disp32s if they are out of range. */
7118 i.types[this_operand].bitfield.disp32 = 0;
7119 if (!fits_in_signed_long (exp->X_add_number))
7120 {
7121 i.types[this_operand].bitfield.disp32s = 0;
7122 if (i.types[this_operand].bitfield.baseindex)
7123 {
7124 as_bad (_("0x%lx out range of signed 32bit displacement"),
7125 (long) exp->X_add_number);
7126 ret = 0;
7127 }
7128 }
7129 }
7130
7131 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7132 else if (exp->X_op != O_constant
7133 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7134 && exp_seg != absolute_section
7135 && exp_seg != text_section
7136 && exp_seg != data_section
7137 && exp_seg != bss_section
7138 && exp_seg != undefined_section
7139 && !bfd_is_com_section (exp_seg))
7140 {
7141 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7142 ret = 0;
7143 }
7144 #endif
7145
7146 /* Check if this is a displacement only operand. */
7147 bigdisp = i.types[this_operand];
7148 bigdisp.bitfield.disp8 = 0;
7149 bigdisp.bitfield.disp16 = 0;
7150 bigdisp.bitfield.disp32 = 0;
7151 bigdisp.bitfield.disp32s = 0;
7152 bigdisp.bitfield.disp64 = 0;
7153 if (operand_type_all_zero (&bigdisp))
7154 i.types[this_operand] = operand_type_and (i.types[this_operand],
7155 types);
7156
7157 return ret;
7158 }
7159
7160 /* Make sure the memory operand we've been dealt is valid.
7161 Return 1 on success, 0 on a failure. */
7162
7163 static int
7164 i386_index_check (const char *operand_string)
7165 {
7166 int ok;
7167 const char *kind = "base/index";
7168 #if INFER_ADDR_PREFIX
7169 int fudged = 0;
7170
7171 tryprefix:
7172 #endif
7173 ok = 1;
7174 if (current_templates->start->opcode_modifier.isstring
7175 && !current_templates->start->opcode_modifier.immext
7176 && (current_templates->end[-1].opcode_modifier.isstring
7177 || i.mem_operands))
7178 {
7179 /* Memory operands of string insns are special in that they only allow
7180 a single register (rDI, rSI, or rBX) as their memory address. */
7181 unsigned int expected;
7182
7183 kind = "string address";
7184
7185 if (current_templates->start->opcode_modifier.w)
7186 {
7187 i386_operand_type type = current_templates->end[-1].operand_types[0];
7188
7189 if (!type.bitfield.baseindex
7190 || ((!i.mem_operands != !intel_syntax)
7191 && current_templates->end[-1].operand_types[1]
7192 .bitfield.baseindex))
7193 type = current_templates->end[-1].operand_types[1];
7194 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7195 }
7196 else
7197 expected = 3 /* rBX */;
7198
7199 if (!i.base_reg || i.index_reg
7200 || operand_type_check (i.types[this_operand], disp))
7201 ok = -1;
7202 else if (!(flag_code == CODE_64BIT
7203 ? i.prefix[ADDR_PREFIX]
7204 ? i.base_reg->reg_type.bitfield.reg32
7205 : i.base_reg->reg_type.bitfield.reg64
7206 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7207 ? i.base_reg->reg_type.bitfield.reg32
7208 : i.base_reg->reg_type.bitfield.reg16))
7209 ok = 0;
7210 else if (i.base_reg->reg_num != expected)
7211 ok = -1;
7212
7213 if (ok < 0)
7214 {
7215 unsigned int j;
7216
7217 for (j = 0; j < i386_regtab_size; ++j)
7218 if ((flag_code == CODE_64BIT
7219 ? i.prefix[ADDR_PREFIX]
7220 ? i386_regtab[j].reg_type.bitfield.reg32
7221 : i386_regtab[j].reg_type.bitfield.reg64
7222 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7223 ? i386_regtab[j].reg_type.bitfield.reg32
7224 : i386_regtab[j].reg_type.bitfield.reg16)
7225 && i386_regtab[j].reg_num == expected)
7226 break;
7227 gas_assert (j < i386_regtab_size);
7228 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7229 operand_string,
7230 intel_syntax ? '[' : '(',
7231 register_prefix,
7232 i386_regtab[j].reg_name,
7233 intel_syntax ? ']' : ')');
7234 ok = 1;
7235 }
7236 }
7237 else if (flag_code == CODE_64BIT)
7238 {
7239 if ((i.base_reg
7240 && ((i.prefix[ADDR_PREFIX] == 0
7241 && !i.base_reg->reg_type.bitfield.reg64)
7242 || (i.prefix[ADDR_PREFIX]
7243 && !i.base_reg->reg_type.bitfield.reg32))
7244 && (i.index_reg
7245 || i.base_reg->reg_num !=
7246 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7247 || (i.index_reg
7248 && !(i.index_reg->reg_type.bitfield.regxmm
7249 || i.index_reg->reg_type.bitfield.regymm)
7250 && (!i.index_reg->reg_type.bitfield.baseindex
7251 || (i.prefix[ADDR_PREFIX] == 0
7252 && i.index_reg->reg_num != RegRiz
7253 && !i.index_reg->reg_type.bitfield.reg64
7254 )
7255 || (i.prefix[ADDR_PREFIX]
7256 && i.index_reg->reg_num != RegEiz
7257 && !i.index_reg->reg_type.bitfield.reg32))))
7258 ok = 0;
7259 }
7260 else
7261 {
7262 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7263 {
7264 /* 16bit checks. */
7265 if ((i.base_reg
7266 && (!i.base_reg->reg_type.bitfield.reg16
7267 || !i.base_reg->reg_type.bitfield.baseindex))
7268 || (i.index_reg
7269 && (!i.index_reg->reg_type.bitfield.reg16
7270 || !i.index_reg->reg_type.bitfield.baseindex
7271 || !(i.base_reg
7272 && i.base_reg->reg_num < 6
7273 && i.index_reg->reg_num >= 6
7274 && i.log2_scale_factor == 0))))
7275 ok = 0;
7276 }
7277 else
7278 {
7279 /* 32bit checks. */
7280 if ((i.base_reg
7281 && !i.base_reg->reg_type.bitfield.reg32)
7282 || (i.index_reg
7283 && !i.index_reg->reg_type.bitfield.regxmm
7284 && !i.index_reg->reg_type.bitfield.regymm
7285 && ((!i.index_reg->reg_type.bitfield.reg32
7286 && i.index_reg->reg_num != RegEiz)
7287 || !i.index_reg->reg_type.bitfield.baseindex)))
7288 ok = 0;
7289 }
7290 }
7291 if (!ok)
7292 {
7293 #if INFER_ADDR_PREFIX
7294 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7295 {
7296 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7297 i.prefixes += 1;
7298 /* Change the size of any displacement too. At most one of
7299 Disp16 or Disp32 is set.
7300 FIXME. There doesn't seem to be any real need for separate
7301 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7302 Removing them would probably clean up the code quite a lot. */
7303 if (flag_code != CODE_64BIT
7304 && (i.types[this_operand].bitfield.disp16
7305 || i.types[this_operand].bitfield.disp32))
7306 i.types[this_operand]
7307 = operand_type_xor (i.types[this_operand], disp16_32);
7308 fudged = 1;
7309 goto tryprefix;
7310 }
7311 if (fudged)
7312 as_bad (_("`%s' is not a valid %s expression"),
7313 operand_string,
7314 kind);
7315 else
7316 #endif
7317 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7318 operand_string,
7319 flag_code_names[i.prefix[ADDR_PREFIX]
7320 ? flag_code == CODE_32BIT
7321 ? CODE_16BIT
7322 : CODE_32BIT
7323 : flag_code],
7324 kind);
7325 }
7326 return ok;
7327 }
7328
7329 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7330 on error. */
7331
7332 static int
7333 i386_att_operand (char *operand_string)
7334 {
7335 const reg_entry *r;
7336 char *end_op;
7337 char *op_string = operand_string;
7338
7339 if (is_space_char (*op_string))
7340 ++op_string;
7341
7342 /* We check for an absolute prefix (differentiating,
7343 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7344 if (*op_string == ABSOLUTE_PREFIX)
7345 {
7346 ++op_string;
7347 if (is_space_char (*op_string))
7348 ++op_string;
7349 i.types[this_operand].bitfield.jumpabsolute = 1;
7350 }
7351
7352 /* Check if operand is a register. */
7353 if ((r = parse_register (op_string, &end_op)) != NULL)
7354 {
7355 i386_operand_type temp;
7356
7357 /* Check for a segment override by searching for ':' after a
7358 segment register. */
7359 op_string = end_op;
7360 if (is_space_char (*op_string))
7361 ++op_string;
7362 if (*op_string == ':'
7363 && (r->reg_type.bitfield.sreg2
7364 || r->reg_type.bitfield.sreg3))
7365 {
7366 switch (r->reg_num)
7367 {
7368 case 0:
7369 i.seg[i.mem_operands] = &es;
7370 break;
7371 case 1:
7372 i.seg[i.mem_operands] = &cs;
7373 break;
7374 case 2:
7375 i.seg[i.mem_operands] = &ss;
7376 break;
7377 case 3:
7378 i.seg[i.mem_operands] = &ds;
7379 break;
7380 case 4:
7381 i.seg[i.mem_operands] = &fs;
7382 break;
7383 case 5:
7384 i.seg[i.mem_operands] = &gs;
7385 break;
7386 }
7387
7388 /* Skip the ':' and whitespace. */
7389 ++op_string;
7390 if (is_space_char (*op_string))
7391 ++op_string;
7392
7393 if (!is_digit_char (*op_string)
7394 && !is_identifier_char (*op_string)
7395 && *op_string != '('
7396 && *op_string != ABSOLUTE_PREFIX)
7397 {
7398 as_bad (_("bad memory operand `%s'"), op_string);
7399 return 0;
7400 }
7401 /* Handle case of %es:*foo. */
7402 if (*op_string == ABSOLUTE_PREFIX)
7403 {
7404 ++op_string;
7405 if (is_space_char (*op_string))
7406 ++op_string;
7407 i.types[this_operand].bitfield.jumpabsolute = 1;
7408 }
7409 goto do_memory_reference;
7410 }
7411 if (*op_string)
7412 {
7413 as_bad (_("junk `%s' after register"), op_string);
7414 return 0;
7415 }
7416 temp = r->reg_type;
7417 temp.bitfield.baseindex = 0;
7418 i.types[this_operand] = operand_type_or (i.types[this_operand],
7419 temp);
7420 i.types[this_operand].bitfield.unspecified = 0;
7421 i.op[this_operand].regs = r;
7422 i.reg_operands++;
7423 }
7424 else if (*op_string == REGISTER_PREFIX)
7425 {
7426 as_bad (_("bad register name `%s'"), op_string);
7427 return 0;
7428 }
7429 else if (*op_string == IMMEDIATE_PREFIX)
7430 {
7431 ++op_string;
7432 if (i.types[this_operand].bitfield.jumpabsolute)
7433 {
7434 as_bad (_("immediate operand illegal with absolute jump"));
7435 return 0;
7436 }
7437 if (!i386_immediate (op_string))
7438 return 0;
7439 }
7440 else if (is_digit_char (*op_string)
7441 || is_identifier_char (*op_string)
7442 || *op_string == '(')
7443 {
7444 /* This is a memory reference of some sort. */
7445 char *base_string;
7446
7447 /* Start and end of displacement string expression (if found). */
7448 char *displacement_string_start;
7449 char *displacement_string_end;
7450
7451 do_memory_reference:
7452 if ((i.mem_operands == 1
7453 && !current_templates->start->opcode_modifier.isstring)
7454 || i.mem_operands == 2)
7455 {
7456 as_bad (_("too many memory references for `%s'"),
7457 current_templates->start->name);
7458 return 0;
7459 }
7460
7461 /* Check for base index form. We detect the base index form by
7462 looking for an ')' at the end of the operand, searching
7463 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7464 after the '('. */
7465 base_string = op_string + strlen (op_string);
7466
7467 --base_string;
7468 if (is_space_char (*base_string))
7469 --base_string;
7470
7471 /* If we only have a displacement, set-up for it to be parsed later. */
7472 displacement_string_start = op_string;
7473 displacement_string_end = base_string + 1;
7474
7475 if (*base_string == ')')
7476 {
7477 char *temp_string;
7478 unsigned int parens_balanced = 1;
7479 /* We've already checked that the number of left & right ()'s are
7480 equal, so this loop will not be infinite. */
7481 do
7482 {
7483 base_string--;
7484 if (*base_string == ')')
7485 parens_balanced++;
7486 if (*base_string == '(')
7487 parens_balanced--;
7488 }
7489 while (parens_balanced);
7490
7491 temp_string = base_string;
7492
7493 /* Skip past '(' and whitespace. */
7494 ++base_string;
7495 if (is_space_char (*base_string))
7496 ++base_string;
7497
7498 if (*base_string == ','
7499 || ((i.base_reg = parse_register (base_string, &end_op))
7500 != NULL))
7501 {
7502 displacement_string_end = temp_string;
7503
7504 i.types[this_operand].bitfield.baseindex = 1;
7505
7506 if (i.base_reg)
7507 {
7508 base_string = end_op;
7509 if (is_space_char (*base_string))
7510 ++base_string;
7511 }
7512
7513 /* There may be an index reg or scale factor here. */
7514 if (*base_string == ',')
7515 {
7516 ++base_string;
7517 if (is_space_char (*base_string))
7518 ++base_string;
7519
7520 if ((i.index_reg = parse_register (base_string, &end_op))
7521 != NULL)
7522 {
7523 base_string = end_op;
7524 if (is_space_char (*base_string))
7525 ++base_string;
7526 if (*base_string == ',')
7527 {
7528 ++base_string;
7529 if (is_space_char (*base_string))
7530 ++base_string;
7531 }
7532 else if (*base_string != ')')
7533 {
7534 as_bad (_("expecting `,' or `)' "
7535 "after index register in `%s'"),
7536 operand_string);
7537 return 0;
7538 }
7539 }
7540 else if (*base_string == REGISTER_PREFIX)
7541 {
7542 as_bad (_("bad register name `%s'"), base_string);
7543 return 0;
7544 }
7545
7546 /* Check for scale factor. */
7547 if (*base_string != ')')
7548 {
7549 char *end_scale = i386_scale (base_string);
7550
7551 if (!end_scale)
7552 return 0;
7553
7554 base_string = end_scale;
7555 if (is_space_char (*base_string))
7556 ++base_string;
7557 if (*base_string != ')')
7558 {
7559 as_bad (_("expecting `)' "
7560 "after scale factor in `%s'"),
7561 operand_string);
7562 return 0;
7563 }
7564 }
7565 else if (!i.index_reg)
7566 {
7567 as_bad (_("expecting index register or scale factor "
7568 "after `,'; got '%c'"),
7569 *base_string);
7570 return 0;
7571 }
7572 }
7573 else if (*base_string != ')')
7574 {
7575 as_bad (_("expecting `,' or `)' "
7576 "after base register in `%s'"),
7577 operand_string);
7578 return 0;
7579 }
7580 }
7581 else if (*base_string == REGISTER_PREFIX)
7582 {
7583 as_bad (_("bad register name `%s'"), base_string);
7584 return 0;
7585 }
7586 }
7587
7588 /* If there's an expression beginning the operand, parse it,
7589 assuming displacement_string_start and
7590 displacement_string_end are meaningful. */
7591 if (displacement_string_start != displacement_string_end)
7592 {
7593 if (!i386_displacement (displacement_string_start,
7594 displacement_string_end))
7595 return 0;
7596 }
7597
7598 /* Special case for (%dx) while doing input/output op. */
7599 if (i.base_reg
7600 && operand_type_equal (&i.base_reg->reg_type,
7601 &reg16_inoutportreg)
7602 && i.index_reg == 0
7603 && i.log2_scale_factor == 0
7604 && i.seg[i.mem_operands] == 0
7605 && !operand_type_check (i.types[this_operand], disp))
7606 {
7607 i.types[this_operand] = inoutportreg;
7608 return 1;
7609 }
7610
7611 if (i386_index_check (operand_string) == 0)
7612 return 0;
7613 i.types[this_operand].bitfield.mem = 1;
7614 i.mem_operands++;
7615 }
7616 else
7617 {
7618 /* It's not a memory operand; argh! */
7619 as_bad (_("invalid char %s beginning operand %d `%s'"),
7620 output_invalid (*op_string),
7621 this_operand + 1,
7622 op_string);
7623 return 0;
7624 }
7625 return 1; /* Normal return. */
7626 }
7627 \f
7628 /* md_estimate_size_before_relax()
7629
7630 Called just before relax() for rs_machine_dependent frags. The x86
7631 assembler uses these frags to handle variable size jump
7632 instructions.
7633
7634 Any symbol that is now undefined will not become defined.
7635 Return the correct fr_subtype in the frag.
7636 Return the initial "guess for variable size of frag" to caller.
7637 The guess is actually the growth beyond the fixed part. Whatever
7638 we do to grow the fixed or variable part contributes to our
7639 returned value. */
7640
7641 int
7642 md_estimate_size_before_relax (fragS *fragP, segT segment)
7643 {
7644 /* We've already got fragP->fr_subtype right; all we have to do is
7645 check for un-relaxable symbols. On an ELF system, we can't relax
7646 an externally visible symbol, because it may be overridden by a
7647 shared library. */
7648 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7649 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7650 || (IS_ELF
7651 && (S_IS_EXTERNAL (fragP->fr_symbol)
7652 || S_IS_WEAK (fragP->fr_symbol)
7653 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7654 & BSF_GNU_INDIRECT_FUNCTION))))
7655 #endif
7656 #if defined (OBJ_COFF) && defined (TE_PE)
7657 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7658 && S_IS_WEAK (fragP->fr_symbol))
7659 #endif
7660 )
7661 {
7662 /* Symbol is undefined in this segment, or we need to keep a
7663 reloc so that weak symbols can be overridden. */
7664 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7665 enum bfd_reloc_code_real reloc_type;
7666 unsigned char *opcode;
7667 int old_fr_fix;
7668
7669 if (fragP->fr_var != NO_RELOC)
7670 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7671 else if (size == 2)
7672 reloc_type = BFD_RELOC_16_PCREL;
7673 else
7674 reloc_type = BFD_RELOC_32_PCREL;
7675
7676 old_fr_fix = fragP->fr_fix;
7677 opcode = (unsigned char *) fragP->fr_opcode;
7678
7679 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7680 {
7681 case UNCOND_JUMP:
7682 /* Make jmp (0xeb) a (d)word displacement jump. */
7683 opcode[0] = 0xe9;
7684 fragP->fr_fix += size;
7685 fix_new (fragP, old_fr_fix, size,
7686 fragP->fr_symbol,
7687 fragP->fr_offset, 1,
7688 reloc_type);
7689 break;
7690
7691 case COND_JUMP86:
7692 if (size == 2
7693 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7694 {
7695 /* Negate the condition, and branch past an
7696 unconditional jump. */
7697 opcode[0] ^= 1;
7698 opcode[1] = 3;
7699 /* Insert an unconditional jump. */
7700 opcode[2] = 0xe9;
7701 /* We added two extra opcode bytes, and have a two byte
7702 offset. */
7703 fragP->fr_fix += 2 + 2;
7704 fix_new (fragP, old_fr_fix + 2, 2,
7705 fragP->fr_symbol,
7706 fragP->fr_offset, 1,
7707 reloc_type);
7708 break;
7709 }
7710 /* Fall through. */
7711
7712 case COND_JUMP:
7713 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7714 {
7715 fixS *fixP;
7716
7717 fragP->fr_fix += 1;
7718 fixP = fix_new (fragP, old_fr_fix, 1,
7719 fragP->fr_symbol,
7720 fragP->fr_offset, 1,
7721 BFD_RELOC_8_PCREL);
7722 fixP->fx_signed = 1;
7723 break;
7724 }
7725
7726 /* This changes the byte-displacement jump 0x7N
7727 to the (d)word-displacement jump 0x0f,0x8N. */
7728 opcode[1] = opcode[0] + 0x10;
7729 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7730 /* We've added an opcode byte. */
7731 fragP->fr_fix += 1 + size;
7732 fix_new (fragP, old_fr_fix + 1, size,
7733 fragP->fr_symbol,
7734 fragP->fr_offset, 1,
7735 reloc_type);
7736 break;
7737
7738 default:
7739 BAD_CASE (fragP->fr_subtype);
7740 break;
7741 }
7742 frag_wane (fragP);
7743 return fragP->fr_fix - old_fr_fix;
7744 }
7745
7746 /* Guess size depending on current relax state. Initially the relax
7747 state will correspond to a short jump and we return 1, because
7748 the variable part of the frag (the branch offset) is one byte
7749 long. However, we can relax a section more than once and in that
7750 case we must either set fr_subtype back to the unrelaxed state,
7751 or return the value for the appropriate branch. */
7752 return md_relax_table[fragP->fr_subtype].rlx_length;
7753 }
7754
7755 /* Called after relax() is finished.
7756
7757 In: Address of frag.
7758 fr_type == rs_machine_dependent.
7759 fr_subtype is what the address relaxed to.
7760
7761 Out: Any fixSs and constants are set up.
7762 Caller will turn frag into a ".space 0". */
7763
7764 void
7765 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7766 fragS *fragP)
7767 {
7768 unsigned char *opcode;
7769 unsigned char *where_to_put_displacement = NULL;
7770 offsetT target_address;
7771 offsetT opcode_address;
7772 unsigned int extension = 0;
7773 offsetT displacement_from_opcode_start;
7774
7775 opcode = (unsigned char *) fragP->fr_opcode;
7776
7777 /* Address we want to reach in file space. */
7778 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7779
7780 /* Address opcode resides at in file space. */
7781 opcode_address = fragP->fr_address + fragP->fr_fix;
7782
7783 /* Displacement from opcode start to fill into instruction. */
7784 displacement_from_opcode_start = target_address - opcode_address;
7785
7786 if ((fragP->fr_subtype & BIG) == 0)
7787 {
7788 /* Don't have to change opcode. */
7789 extension = 1; /* 1 opcode + 1 displacement */
7790 where_to_put_displacement = &opcode[1];
7791 }
7792 else
7793 {
7794 if (no_cond_jump_promotion
7795 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7796 as_warn_where (fragP->fr_file, fragP->fr_line,
7797 _("long jump required"));
7798
7799 switch (fragP->fr_subtype)
7800 {
7801 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7802 extension = 4; /* 1 opcode + 4 displacement */
7803 opcode[0] = 0xe9;
7804 where_to_put_displacement = &opcode[1];
7805 break;
7806
7807 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7808 extension = 2; /* 1 opcode + 2 displacement */
7809 opcode[0] = 0xe9;
7810 where_to_put_displacement = &opcode[1];
7811 break;
7812
7813 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7814 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7815 extension = 5; /* 2 opcode + 4 displacement */
7816 opcode[1] = opcode[0] + 0x10;
7817 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7818 where_to_put_displacement = &opcode[2];
7819 break;
7820
7821 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7822 extension = 3; /* 2 opcode + 2 displacement */
7823 opcode[1] = opcode[0] + 0x10;
7824 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7825 where_to_put_displacement = &opcode[2];
7826 break;
7827
7828 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7829 extension = 4;
7830 opcode[0] ^= 1;
7831 opcode[1] = 3;
7832 opcode[2] = 0xe9;
7833 where_to_put_displacement = &opcode[3];
7834 break;
7835
7836 default:
7837 BAD_CASE (fragP->fr_subtype);
7838 break;
7839 }
7840 }
7841
7842 /* If size if less then four we are sure that the operand fits,
7843 but if it's 4, then it could be that the displacement is larger
7844 then -/+ 2GB. */
7845 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7846 && object_64bit
7847 && ((addressT) (displacement_from_opcode_start - extension
7848 + ((addressT) 1 << 31))
7849 > (((addressT) 2 << 31) - 1)))
7850 {
7851 as_bad_where (fragP->fr_file, fragP->fr_line,
7852 _("jump target out of range"));
7853 /* Make us emit 0. */
7854 displacement_from_opcode_start = extension;
7855 }
7856 /* Now put displacement after opcode. */
7857 md_number_to_chars ((char *) where_to_put_displacement,
7858 (valueT) (displacement_from_opcode_start - extension),
7859 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7860 fragP->fr_fix += extension;
7861 }
7862 \f
7863 /* Apply a fixup (fixP) to segment data, once it has been determined
7864 by our caller that we have all the info we need to fix it up.
7865
7866 Parameter valP is the pointer to the value of the bits.
7867
7868 On the 386, immediates, displacements, and data pointers are all in
7869 the same (little-endian) format, so we don't need to care about which
7870 we are handling. */
7871
7872 void
7873 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7874 {
7875 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7876 valueT value = *valP;
7877
7878 #if !defined (TE_Mach)
7879 if (fixP->fx_pcrel)
7880 {
7881 switch (fixP->fx_r_type)
7882 {
7883 default:
7884 break;
7885
7886 case BFD_RELOC_64:
7887 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7888 break;
7889 case BFD_RELOC_32:
7890 case BFD_RELOC_X86_64_32S:
7891 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7892 break;
7893 case BFD_RELOC_16:
7894 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7895 break;
7896 case BFD_RELOC_8:
7897 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7898 break;
7899 }
7900 }
7901
7902 if (fixP->fx_addsy != NULL
7903 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7904 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7905 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7906 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7907 && !use_rela_relocations)
7908 {
7909 /* This is a hack. There should be a better way to handle this.
7910 This covers for the fact that bfd_install_relocation will
7911 subtract the current location (for partial_inplace, PC relative
7912 relocations); see more below. */
7913 #ifndef OBJ_AOUT
7914 if (IS_ELF
7915 #ifdef TE_PE
7916 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7917 #endif
7918 )
7919 value += fixP->fx_where + fixP->fx_frag->fr_address;
7920 #endif
7921 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7922 if (IS_ELF)
7923 {
7924 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7925
7926 if ((sym_seg == seg
7927 || (symbol_section_p (fixP->fx_addsy)
7928 && sym_seg != absolute_section))
7929 && !generic_force_reloc (fixP))
7930 {
7931 /* Yes, we add the values in twice. This is because
7932 bfd_install_relocation subtracts them out again. I think
7933 bfd_install_relocation is broken, but I don't dare change
7934 it. FIXME. */
7935 value += fixP->fx_where + fixP->fx_frag->fr_address;
7936 }
7937 }
7938 #endif
7939 #if defined (OBJ_COFF) && defined (TE_PE)
7940 /* For some reason, the PE format does not store a
7941 section address offset for a PC relative symbol. */
7942 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7943 || S_IS_WEAK (fixP->fx_addsy))
7944 value += md_pcrel_from (fixP);
7945 #endif
7946 }
7947 #if defined (OBJ_COFF) && defined (TE_PE)
7948 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7949 {
7950 value -= S_GET_VALUE (fixP->fx_addsy);
7951 }
7952 #endif
7953
7954 /* Fix a few things - the dynamic linker expects certain values here,
7955 and we must not disappoint it. */
7956 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7957 if (IS_ELF && fixP->fx_addsy)
7958 switch (fixP->fx_r_type)
7959 {
7960 case BFD_RELOC_386_PLT32:
7961 case BFD_RELOC_X86_64_PLT32:
7962 /* Make the jump instruction point to the address of the operand. At
7963 runtime we merely add the offset to the actual PLT entry. */
7964 value = -4;
7965 break;
7966
7967 case BFD_RELOC_386_TLS_GD:
7968 case BFD_RELOC_386_TLS_LDM:
7969 case BFD_RELOC_386_TLS_IE_32:
7970 case BFD_RELOC_386_TLS_IE:
7971 case BFD_RELOC_386_TLS_GOTIE:
7972 case BFD_RELOC_386_TLS_GOTDESC:
7973 case BFD_RELOC_X86_64_TLSGD:
7974 case BFD_RELOC_X86_64_TLSLD:
7975 case BFD_RELOC_X86_64_GOTTPOFF:
7976 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7977 value = 0; /* Fully resolved at runtime. No addend. */
7978 /* Fallthrough */
7979 case BFD_RELOC_386_TLS_LE:
7980 case BFD_RELOC_386_TLS_LDO_32:
7981 case BFD_RELOC_386_TLS_LE_32:
7982 case BFD_RELOC_X86_64_DTPOFF32:
7983 case BFD_RELOC_X86_64_DTPOFF64:
7984 case BFD_RELOC_X86_64_TPOFF32:
7985 case BFD_RELOC_X86_64_TPOFF64:
7986 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7987 break;
7988
7989 case BFD_RELOC_386_TLS_DESC_CALL:
7990 case BFD_RELOC_X86_64_TLSDESC_CALL:
7991 value = 0; /* Fully resolved at runtime. No addend. */
7992 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7993 fixP->fx_done = 0;
7994 return;
7995
7996 case BFD_RELOC_386_GOT32:
7997 case BFD_RELOC_X86_64_GOT32:
7998 value = 0; /* Fully resolved at runtime. No addend. */
7999 break;
8000
8001 case BFD_RELOC_VTABLE_INHERIT:
8002 case BFD_RELOC_VTABLE_ENTRY:
8003 fixP->fx_done = 0;
8004 return;
8005
8006 default:
8007 break;
8008 }
8009 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8010 *valP = value;
8011 #endif /* !defined (TE_Mach) */
8012
8013 /* Are we finished with this relocation now? */
8014 if (fixP->fx_addsy == NULL)
8015 fixP->fx_done = 1;
8016 #if defined (OBJ_COFF) && defined (TE_PE)
8017 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8018 {
8019 fixP->fx_done = 0;
8020 /* Remember value for tc_gen_reloc. */
8021 fixP->fx_addnumber = value;
8022 /* Clear out the frag for now. */
8023 value = 0;
8024 }
8025 #endif
8026 else if (use_rela_relocations)
8027 {
8028 fixP->fx_no_overflow = 1;
8029 /* Remember value for tc_gen_reloc. */
8030 fixP->fx_addnumber = value;
8031 value = 0;
8032 }
8033
8034 md_number_to_chars (p, value, fixP->fx_size);
8035 }
8036 \f
8037 char *
8038 md_atof (int type, char *litP, int *sizeP)
8039 {
8040 /* This outputs the LITTLENUMs in REVERSE order;
8041 in accord with the bigendian 386. */
8042 return ieee_md_atof (type, litP, sizeP, FALSE);
8043 }
8044 \f
8045 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8046
8047 static char *
8048 output_invalid (int c)
8049 {
8050 if (ISPRINT (c))
8051 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8052 "'%c'", c);
8053 else
8054 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8055 "(0x%x)", (unsigned char) c);
8056 return output_invalid_buf;
8057 }
8058
8059 /* REG_STRING starts *before* REGISTER_PREFIX. */
8060
8061 static const reg_entry *
8062 parse_real_register (char *reg_string, char **end_op)
8063 {
8064 char *s = reg_string;
8065 char *p;
8066 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8067 const reg_entry *r;
8068
8069 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8070 if (*s == REGISTER_PREFIX)
8071 ++s;
8072
8073 if (is_space_char (*s))
8074 ++s;
8075
8076 p = reg_name_given;
8077 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8078 {
8079 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8080 return (const reg_entry *) NULL;
8081 s++;
8082 }
8083
8084 /* For naked regs, make sure that we are not dealing with an identifier.
8085 This prevents confusing an identifier like `eax_var' with register
8086 `eax'. */
8087 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8088 return (const reg_entry *) NULL;
8089
8090 *end_op = s;
8091
8092 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8093
8094 /* Handle floating point regs, allowing spaces in the (i) part. */
8095 if (r == i386_regtab /* %st is first entry of table */)
8096 {
8097 if (is_space_char (*s))
8098 ++s;
8099 if (*s == '(')
8100 {
8101 ++s;
8102 if (is_space_char (*s))
8103 ++s;
8104 if (*s >= '0' && *s <= '7')
8105 {
8106 int fpr = *s - '0';
8107 ++s;
8108 if (is_space_char (*s))
8109 ++s;
8110 if (*s == ')')
8111 {
8112 *end_op = s + 1;
8113 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8114 know (r);
8115 return r + fpr;
8116 }
8117 }
8118 /* We have "%st(" then garbage. */
8119 return (const reg_entry *) NULL;
8120 }
8121 }
8122
8123 if (r == NULL || allow_pseudo_reg)
8124 return r;
8125
8126 if (operand_type_all_zero (&r->reg_type))
8127 return (const reg_entry *) NULL;
8128
8129 if ((r->reg_type.bitfield.reg32
8130 || r->reg_type.bitfield.sreg3
8131 || r->reg_type.bitfield.control
8132 || r->reg_type.bitfield.debug
8133 || r->reg_type.bitfield.test)
8134 && !cpu_arch_flags.bitfield.cpui386)
8135 return (const reg_entry *) NULL;
8136
8137 if (r->reg_type.bitfield.floatreg
8138 && !cpu_arch_flags.bitfield.cpu8087
8139 && !cpu_arch_flags.bitfield.cpu287
8140 && !cpu_arch_flags.bitfield.cpu387)
8141 return (const reg_entry *) NULL;
8142
8143 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8144 return (const reg_entry *) NULL;
8145
8146 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8147 return (const reg_entry *) NULL;
8148
8149 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8150 return (const reg_entry *) NULL;
8151
8152 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8153 if (!allow_index_reg
8154 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8155 return (const reg_entry *) NULL;
8156
8157 if (((r->reg_flags & (RegRex64 | RegRex))
8158 || r->reg_type.bitfield.reg64)
8159 && (!cpu_arch_flags.bitfield.cpulm
8160 || !operand_type_equal (&r->reg_type, &control))
8161 && flag_code != CODE_64BIT)
8162 return (const reg_entry *) NULL;
8163
8164 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8165 return (const reg_entry *) NULL;
8166
8167 return r;
8168 }
8169
8170 /* REG_STRING starts *before* REGISTER_PREFIX. */
8171
8172 static const reg_entry *
8173 parse_register (char *reg_string, char **end_op)
8174 {
8175 const reg_entry *r;
8176
8177 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8178 r = parse_real_register (reg_string, end_op);
8179 else
8180 r = NULL;
8181 if (!r)
8182 {
8183 char *save = input_line_pointer;
8184 char c;
8185 symbolS *symbolP;
8186
8187 input_line_pointer = reg_string;
8188 c = get_symbol_end ();
8189 symbolP = symbol_find (reg_string);
8190 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8191 {
8192 const expressionS *e = symbol_get_value_expression (symbolP);
8193
8194 know (e->X_op == O_register);
8195 know (e->X_add_number >= 0
8196 && (valueT) e->X_add_number < i386_regtab_size);
8197 r = i386_regtab + e->X_add_number;
8198 *end_op = input_line_pointer;
8199 }
8200 *input_line_pointer = c;
8201 input_line_pointer = save;
8202 }
8203 return r;
8204 }
8205
8206 int
8207 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8208 {
8209 const reg_entry *r;
8210 char *end = input_line_pointer;
8211
8212 *end = *nextcharP;
8213 r = parse_register (name, &input_line_pointer);
8214 if (r && end <= input_line_pointer)
8215 {
8216 *nextcharP = *input_line_pointer;
8217 *input_line_pointer = 0;
8218 e->X_op = O_register;
8219 e->X_add_number = r - i386_regtab;
8220 return 1;
8221 }
8222 input_line_pointer = end;
8223 *end = 0;
8224 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8225 }
8226
8227 void
8228 md_operand (expressionS *e)
8229 {
8230 char *end;
8231 const reg_entry *r;
8232
8233 switch (*input_line_pointer)
8234 {
8235 case REGISTER_PREFIX:
8236 r = parse_real_register (input_line_pointer, &end);
8237 if (r)
8238 {
8239 e->X_op = O_register;
8240 e->X_add_number = r - i386_regtab;
8241 input_line_pointer = end;
8242 }
8243 break;
8244
8245 case '[':
8246 gas_assert (intel_syntax);
8247 end = input_line_pointer++;
8248 expression (e);
8249 if (*input_line_pointer == ']')
8250 {
8251 ++input_line_pointer;
8252 e->X_op_symbol = make_expr_symbol (e);
8253 e->X_add_symbol = NULL;
8254 e->X_add_number = 0;
8255 e->X_op = O_index;
8256 }
8257 else
8258 {
8259 e->X_op = O_absent;
8260 input_line_pointer = end;
8261 }
8262 break;
8263 }
8264 }
8265
8266 \f
8267 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8268 const char *md_shortopts = "kVQ:sqn";
8269 #else
8270 const char *md_shortopts = "qn";
8271 #endif
8272
8273 #define OPTION_32 (OPTION_MD_BASE + 0)
8274 #define OPTION_64 (OPTION_MD_BASE + 1)
8275 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8276 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8277 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8278 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8279 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8280 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8281 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8282 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8283 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8284 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8285 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8286 #define OPTION_X32 (OPTION_MD_BASE + 13)
8287
8288 struct option md_longopts[] =
8289 {
8290 {"32", no_argument, NULL, OPTION_32},
8291 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8292 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8293 {"64", no_argument, NULL, OPTION_64},
8294 #endif
8295 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8296 {"x32", no_argument, NULL, OPTION_X32},
8297 #endif
8298 {"divide", no_argument, NULL, OPTION_DIVIDE},
8299 {"march", required_argument, NULL, OPTION_MARCH},
8300 {"mtune", required_argument, NULL, OPTION_MTUNE},
8301 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8302 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8303 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8304 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8305 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8306 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8307 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8308 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8309 {NULL, no_argument, NULL, 0}
8310 };
8311 size_t md_longopts_size = sizeof (md_longopts);
8312
8313 int
8314 md_parse_option (int c, char *arg)
8315 {
8316 unsigned int j;
8317 char *arch, *next;
8318
8319 switch (c)
8320 {
8321 case 'n':
8322 optimize_align_code = 0;
8323 break;
8324
8325 case 'q':
8326 quiet_warnings = 1;
8327 break;
8328
8329 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8330 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8331 should be emitted or not. FIXME: Not implemented. */
8332 case 'Q':
8333 break;
8334
8335 /* -V: SVR4 argument to print version ID. */
8336 case 'V':
8337 print_version_id ();
8338 break;
8339
8340 /* -k: Ignore for FreeBSD compatibility. */
8341 case 'k':
8342 break;
8343
8344 case 's':
8345 /* -s: On i386 Solaris, this tells the native assembler to use
8346 .stab instead of .stab.excl. We always use .stab anyhow. */
8347 break;
8348 #endif
8349 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8350 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8351 case OPTION_64:
8352 {
8353 const char **list, **l;
8354
8355 list = bfd_target_list ();
8356 for (l = list; *l != NULL; l++)
8357 if (CONST_STRNEQ (*l, "elf64-x86-64")
8358 || strcmp (*l, "coff-x86-64") == 0
8359 || strcmp (*l, "pe-x86-64") == 0
8360 || strcmp (*l, "pei-x86-64") == 0
8361 || strcmp (*l, "mach-o-x86-64") == 0)
8362 {
8363 default_arch = "x86_64";
8364 break;
8365 }
8366 if (*l == NULL)
8367 as_fatal (_("no compiled in support for x86_64"));
8368 free (list);
8369 }
8370 break;
8371 #endif
8372
8373 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8374 case OPTION_X32:
8375 if (IS_ELF)
8376 {
8377 const char **list, **l;
8378
8379 list = bfd_target_list ();
8380 for (l = list; *l != NULL; l++)
8381 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8382 {
8383 default_arch = "x86_64:32";
8384 break;
8385 }
8386 if (*l == NULL)
8387 as_fatal (_("no compiled in support for 32bit x86_64"));
8388 free (list);
8389 }
8390 else
8391 as_fatal (_("32bit x86_64 is only supported for ELF"));
8392 break;
8393 #endif
8394
8395 case OPTION_32:
8396 default_arch = "i386";
8397 break;
8398
8399 case OPTION_DIVIDE:
8400 #ifdef SVR4_COMMENT_CHARS
8401 {
8402 char *n, *t;
8403 const char *s;
8404
8405 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8406 t = n;
8407 for (s = i386_comment_chars; *s != '\0'; s++)
8408 if (*s != '/')
8409 *t++ = *s;
8410 *t = '\0';
8411 i386_comment_chars = n;
8412 }
8413 #endif
8414 break;
8415
8416 case OPTION_MARCH:
8417 arch = xstrdup (arg);
8418 do
8419 {
8420 if (*arch == '.')
8421 as_fatal (_("invalid -march= option: `%s'"), arg);
8422 next = strchr (arch, '+');
8423 if (next)
8424 *next++ = '\0';
8425 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8426 {
8427 if (strcmp (arch, cpu_arch [j].name) == 0)
8428 {
8429 /* Processor. */
8430 if (! cpu_arch[j].flags.bitfield.cpui386)
8431 continue;
8432
8433 cpu_arch_name = cpu_arch[j].name;
8434 cpu_sub_arch_name = NULL;
8435 cpu_arch_flags = cpu_arch[j].flags;
8436 cpu_arch_isa = cpu_arch[j].type;
8437 cpu_arch_isa_flags = cpu_arch[j].flags;
8438 if (!cpu_arch_tune_set)
8439 {
8440 cpu_arch_tune = cpu_arch_isa;
8441 cpu_arch_tune_flags = cpu_arch_isa_flags;
8442 }
8443 break;
8444 }
8445 else if (*cpu_arch [j].name == '.'
8446 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8447 {
8448 /* ISA entension. */
8449 i386_cpu_flags flags;
8450
8451 if (!cpu_arch[j].negated)
8452 flags = cpu_flags_or (cpu_arch_flags,
8453 cpu_arch[j].flags);
8454 else
8455 flags = cpu_flags_and_not (cpu_arch_flags,
8456 cpu_arch[j].flags);
8457 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8458 {
8459 if (cpu_sub_arch_name)
8460 {
8461 char *name = cpu_sub_arch_name;
8462 cpu_sub_arch_name = concat (name,
8463 cpu_arch[j].name,
8464 (const char *) NULL);
8465 free (name);
8466 }
8467 else
8468 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8469 cpu_arch_flags = flags;
8470 cpu_arch_isa_flags = flags;
8471 }
8472 break;
8473 }
8474 }
8475
8476 if (j >= ARRAY_SIZE (cpu_arch))
8477 as_fatal (_("invalid -march= option: `%s'"), arg);
8478
8479 arch = next;
8480 }
8481 while (next != NULL );
8482 break;
8483
8484 case OPTION_MTUNE:
8485 if (*arg == '.')
8486 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8487 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8488 {
8489 if (strcmp (arg, cpu_arch [j].name) == 0)
8490 {
8491 cpu_arch_tune_set = 1;
8492 cpu_arch_tune = cpu_arch [j].type;
8493 cpu_arch_tune_flags = cpu_arch[j].flags;
8494 break;
8495 }
8496 }
8497 if (j >= ARRAY_SIZE (cpu_arch))
8498 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8499 break;
8500
8501 case OPTION_MMNEMONIC:
8502 if (strcasecmp (arg, "att") == 0)
8503 intel_mnemonic = 0;
8504 else if (strcasecmp (arg, "intel") == 0)
8505 intel_mnemonic = 1;
8506 else
8507 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8508 break;
8509
8510 case OPTION_MSYNTAX:
8511 if (strcasecmp (arg, "att") == 0)
8512 intel_syntax = 0;
8513 else if (strcasecmp (arg, "intel") == 0)
8514 intel_syntax = 1;
8515 else
8516 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8517 break;
8518
8519 case OPTION_MINDEX_REG:
8520 allow_index_reg = 1;
8521 break;
8522
8523 case OPTION_MNAKED_REG:
8524 allow_naked_reg = 1;
8525 break;
8526
8527 case OPTION_MOLD_GCC:
8528 old_gcc = 1;
8529 break;
8530
8531 case OPTION_MSSE2AVX:
8532 sse2avx = 1;
8533 break;
8534
8535 case OPTION_MSSE_CHECK:
8536 if (strcasecmp (arg, "error") == 0)
8537 sse_check = sse_check_error;
8538 else if (strcasecmp (arg, "warning") == 0)
8539 sse_check = sse_check_warning;
8540 else if (strcasecmp (arg, "none") == 0)
8541 sse_check = sse_check_none;
8542 else
8543 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8544 break;
8545
8546 case OPTION_MAVXSCALAR:
8547 if (strcasecmp (arg, "128") == 0)
8548 avxscalar = vex128;
8549 else if (strcasecmp (arg, "256") == 0)
8550 avxscalar = vex256;
8551 else
8552 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8553 break;
8554
8555 default:
8556 return 0;
8557 }
8558 return 1;
8559 }
8560
8561 #define MESSAGE_TEMPLATE \
8562 " "
8563
8564 static void
8565 show_arch (FILE *stream, int ext, int check)
8566 {
8567 static char message[] = MESSAGE_TEMPLATE;
8568 char *start = message + 27;
8569 char *p;
8570 int size = sizeof (MESSAGE_TEMPLATE);
8571 int left;
8572 const char *name;
8573 int len;
8574 unsigned int j;
8575
8576 p = start;
8577 left = size - (start - message);
8578 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8579 {
8580 /* Should it be skipped? */
8581 if (cpu_arch [j].skip)
8582 continue;
8583
8584 name = cpu_arch [j].name;
8585 len = cpu_arch [j].len;
8586 if (*name == '.')
8587 {
8588 /* It is an extension. Skip if we aren't asked to show it. */
8589 if (ext)
8590 {
8591 name++;
8592 len--;
8593 }
8594 else
8595 continue;
8596 }
8597 else if (ext)
8598 {
8599 /* It is an processor. Skip if we show only extension. */
8600 continue;
8601 }
8602 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8603 {
8604 /* It is an impossible processor - skip. */
8605 continue;
8606 }
8607
8608 /* Reserve 2 spaces for ", " or ",\0" */
8609 left -= len + 2;
8610
8611 /* Check if there is any room. */
8612 if (left >= 0)
8613 {
8614 if (p != start)
8615 {
8616 *p++ = ',';
8617 *p++ = ' ';
8618 }
8619 p = mempcpy (p, name, len);
8620 }
8621 else
8622 {
8623 /* Output the current message now and start a new one. */
8624 *p++ = ',';
8625 *p = '\0';
8626 fprintf (stream, "%s\n", message);
8627 p = start;
8628 left = size - (start - message) - len - 2;
8629
8630 gas_assert (left >= 0);
8631
8632 p = mempcpy (p, name, len);
8633 }
8634 }
8635
8636 *p = '\0';
8637 fprintf (stream, "%s\n", message);
8638 }
8639
8640 void
8641 md_show_usage (FILE *stream)
8642 {
8643 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8644 fprintf (stream, _("\
8645 -Q ignored\n\
8646 -V print assembler version number\n\
8647 -k ignored\n"));
8648 #endif
8649 fprintf (stream, _("\
8650 -n Do not optimize code alignment\n\
8651 -q quieten some warnings\n"));
8652 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8653 fprintf (stream, _("\
8654 -s ignored\n"));
8655 #endif
8656 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8657 || defined (TE_PE) || defined (TE_PEP))
8658 fprintf (stream, _("\
8659 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8660 #endif
8661 #ifdef SVR4_COMMENT_CHARS
8662 fprintf (stream, _("\
8663 --divide do not treat `/' as a comment character\n"));
8664 #else
8665 fprintf (stream, _("\
8666 --divide ignored\n"));
8667 #endif
8668 fprintf (stream, _("\
8669 -march=CPU[,+EXTENSION...]\n\
8670 generate code for CPU and EXTENSION, CPU is one of:\n"));
8671 show_arch (stream, 0, 1);
8672 fprintf (stream, _("\
8673 EXTENSION is combination of:\n"));
8674 show_arch (stream, 1, 0);
8675 fprintf (stream, _("\
8676 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8677 show_arch (stream, 0, 0);
8678 fprintf (stream, _("\
8679 -msse2avx encode SSE instructions with VEX prefix\n"));
8680 fprintf (stream, _("\
8681 -msse-check=[none|error|warning]\n\
8682 check SSE instructions\n"));
8683 fprintf (stream, _("\
8684 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8685 length\n"));
8686 fprintf (stream, _("\
8687 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8688 fprintf (stream, _("\
8689 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8690 fprintf (stream, _("\
8691 -mindex-reg support pseudo index registers\n"));
8692 fprintf (stream, _("\
8693 -mnaked-reg don't require `%%' prefix for registers\n"));
8694 fprintf (stream, _("\
8695 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8696 }
8697
8698 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8699 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8700 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8701
8702 /* Pick the target format to use. */
8703
8704 const char *
8705 i386_target_format (void)
8706 {
8707 if (!strncmp (default_arch, "x86_64", 6))
8708 {
8709 update_code_flag (CODE_64BIT, 1);
8710 if (default_arch[6] == '\0')
8711 x86_elf_abi = X86_64_ABI;
8712 else
8713 x86_elf_abi = X86_64_X32_ABI;
8714 }
8715 else if (!strcmp (default_arch, "i386"))
8716 update_code_flag (CODE_32BIT, 1);
8717 else
8718 as_fatal (_("unknown architecture"));
8719
8720 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8721 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8722 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8723 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8724
8725 switch (OUTPUT_FLAVOR)
8726 {
8727 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8728 case bfd_target_aout_flavour:
8729 return AOUT_TARGET_FORMAT;
8730 #endif
8731 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8732 # if defined (TE_PE) || defined (TE_PEP)
8733 case bfd_target_coff_flavour:
8734 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8735 # elif defined (TE_GO32)
8736 case bfd_target_coff_flavour:
8737 return "coff-go32";
8738 # else
8739 case bfd_target_coff_flavour:
8740 return "coff-i386";
8741 # endif
8742 #endif
8743 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8744 case bfd_target_elf_flavour:
8745 {
8746 const char *format;
8747
8748 switch (x86_elf_abi)
8749 {
8750 default:
8751 format = ELF_TARGET_FORMAT;
8752 break;
8753 case X86_64_ABI:
8754 use_rela_relocations = 1;
8755 object_64bit = 1;
8756 format = ELF_TARGET_FORMAT64;
8757 break;
8758 case X86_64_X32_ABI:
8759 use_rela_relocations = 1;
8760 object_64bit = 1;
8761 disallow_64bit_reloc = 1;
8762 format = ELF_TARGET_FORMAT32;
8763 break;
8764 }
8765 if (cpu_arch_isa == PROCESSOR_L1OM)
8766 {
8767 if (x86_elf_abi != X86_64_ABI)
8768 as_fatal (_("Intel L1OM is 64bit only"));
8769 return ELF_TARGET_L1OM_FORMAT;
8770 }
8771 if (cpu_arch_isa == PROCESSOR_K1OM)
8772 {
8773 if (x86_elf_abi != X86_64_ABI)
8774 as_fatal (_("Intel K1OM is 64bit only"));
8775 return ELF_TARGET_K1OM_FORMAT;
8776 }
8777 else
8778 return format;
8779 }
8780 #endif
8781 #if defined (OBJ_MACH_O)
8782 case bfd_target_mach_o_flavour:
8783 if (flag_code == CODE_64BIT)
8784 {
8785 use_rela_relocations = 1;
8786 object_64bit = 1;
8787 return "mach-o-x86-64";
8788 }
8789 else
8790 return "mach-o-i386";
8791 #endif
8792 default:
8793 abort ();
8794 return NULL;
8795 }
8796 }
8797
8798 #endif /* OBJ_MAYBE_ more than one */
8799
8800 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8801 void
8802 i386_elf_emit_arch_note (void)
8803 {
8804 if (IS_ELF && cpu_arch_name != NULL)
8805 {
8806 char *p;
8807 asection *seg = now_seg;
8808 subsegT subseg = now_subseg;
8809 Elf_Internal_Note i_note;
8810 Elf_External_Note e_note;
8811 asection *note_secp;
8812 int len;
8813
8814 /* Create the .note section. */
8815 note_secp = subseg_new (".note", 0);
8816 bfd_set_section_flags (stdoutput,
8817 note_secp,
8818 SEC_HAS_CONTENTS | SEC_READONLY);
8819
8820 /* Process the arch string. */
8821 len = strlen (cpu_arch_name);
8822
8823 i_note.namesz = len + 1;
8824 i_note.descsz = 0;
8825 i_note.type = NT_ARCH;
8826 p = frag_more (sizeof (e_note.namesz));
8827 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8828 p = frag_more (sizeof (e_note.descsz));
8829 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8830 p = frag_more (sizeof (e_note.type));
8831 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8832 p = frag_more (len + 1);
8833 strcpy (p, cpu_arch_name);
8834
8835 frag_align (2, 0, 0);
8836
8837 subseg_set (seg, subseg);
8838 }
8839 }
8840 #endif
8841 \f
8842 symbolS *
8843 md_undefined_symbol (char *name)
8844 {
8845 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8846 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8847 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8848 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8849 {
8850 if (!GOT_symbol)
8851 {
8852 if (symbol_find (name))
8853 as_bad (_("GOT already in symbol table"));
8854 GOT_symbol = symbol_new (name, undefined_section,
8855 (valueT) 0, &zero_address_frag);
8856 };
8857 return GOT_symbol;
8858 }
8859 return 0;
8860 }
8861
8862 /* Round up a section size to the appropriate boundary. */
8863
8864 valueT
8865 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8866 {
8867 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8868 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8869 {
8870 /* For a.out, force the section size to be aligned. If we don't do
8871 this, BFD will align it for us, but it will not write out the
8872 final bytes of the section. This may be a bug in BFD, but it is
8873 easier to fix it here since that is how the other a.out targets
8874 work. */
8875 int align;
8876
8877 align = bfd_get_section_alignment (stdoutput, segment);
8878 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8879 }
8880 #endif
8881
8882 return size;
8883 }
8884
8885 /* On the i386, PC-relative offsets are relative to the start of the
8886 next instruction. That is, the address of the offset, plus its
8887 size, since the offset is always the last part of the insn. */
8888
8889 long
8890 md_pcrel_from (fixS *fixP)
8891 {
8892 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8893 }
8894
8895 #ifndef I386COFF
8896
8897 static void
8898 s_bss (int ignore ATTRIBUTE_UNUSED)
8899 {
8900 int temp;
8901
8902 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8903 if (IS_ELF)
8904 obj_elf_section_change_hook ();
8905 #endif
8906 temp = get_absolute_expression ();
8907 subseg_set (bss_section, (subsegT) temp);
8908 demand_empty_rest_of_line ();
8909 }
8910
8911 #endif
8912
8913 void
8914 i386_validate_fix (fixS *fixp)
8915 {
8916 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8917 {
8918 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8919 {
8920 if (!object_64bit)
8921 abort ();
8922 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8923 }
8924 else
8925 {
8926 if (!object_64bit)
8927 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8928 else
8929 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8930 }
8931 fixp->fx_subsy = 0;
8932 }
8933 }
8934
8935 arelent *
8936 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
8937 {
8938 arelent *rel;
8939 bfd_reloc_code_real_type code;
8940
8941 switch (fixp->fx_r_type)
8942 {
8943 case BFD_RELOC_X86_64_PLT32:
8944 case BFD_RELOC_X86_64_GOT32:
8945 case BFD_RELOC_X86_64_GOTPCREL:
8946 case BFD_RELOC_386_PLT32:
8947 case BFD_RELOC_386_GOT32:
8948 case BFD_RELOC_386_GOTOFF:
8949 case BFD_RELOC_386_GOTPC:
8950 case BFD_RELOC_386_TLS_GD:
8951 case BFD_RELOC_386_TLS_LDM:
8952 case BFD_RELOC_386_TLS_LDO_32:
8953 case BFD_RELOC_386_TLS_IE_32:
8954 case BFD_RELOC_386_TLS_IE:
8955 case BFD_RELOC_386_TLS_GOTIE:
8956 case BFD_RELOC_386_TLS_LE_32:
8957 case BFD_RELOC_386_TLS_LE:
8958 case BFD_RELOC_386_TLS_GOTDESC:
8959 case BFD_RELOC_386_TLS_DESC_CALL:
8960 case BFD_RELOC_X86_64_TLSGD:
8961 case BFD_RELOC_X86_64_TLSLD:
8962 case BFD_RELOC_X86_64_DTPOFF32:
8963 case BFD_RELOC_X86_64_DTPOFF64:
8964 case BFD_RELOC_X86_64_GOTTPOFF:
8965 case BFD_RELOC_X86_64_TPOFF32:
8966 case BFD_RELOC_X86_64_TPOFF64:
8967 case BFD_RELOC_X86_64_GOTOFF64:
8968 case BFD_RELOC_X86_64_GOTPC32:
8969 case BFD_RELOC_X86_64_GOT64:
8970 case BFD_RELOC_X86_64_GOTPCREL64:
8971 case BFD_RELOC_X86_64_GOTPC64:
8972 case BFD_RELOC_X86_64_GOTPLT64:
8973 case BFD_RELOC_X86_64_PLTOFF64:
8974 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8975 case BFD_RELOC_X86_64_TLSDESC_CALL:
8976 case BFD_RELOC_RVA:
8977 case BFD_RELOC_VTABLE_ENTRY:
8978 case BFD_RELOC_VTABLE_INHERIT:
8979 #ifdef TE_PE
8980 case BFD_RELOC_32_SECREL:
8981 #endif
8982 code = fixp->fx_r_type;
8983 break;
8984 case BFD_RELOC_X86_64_32S:
8985 if (!fixp->fx_pcrel)
8986 {
8987 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8988 code = fixp->fx_r_type;
8989 break;
8990 }
8991 default:
8992 if (fixp->fx_pcrel)
8993 {
8994 switch (fixp->fx_size)
8995 {
8996 default:
8997 as_bad_where (fixp->fx_file, fixp->fx_line,
8998 _("can not do %d byte pc-relative relocation"),
8999 fixp->fx_size);
9000 code = BFD_RELOC_32_PCREL;
9001 break;
9002 case 1: code = BFD_RELOC_8_PCREL; break;
9003 case 2: code = BFD_RELOC_16_PCREL; break;
9004 case 4: code = BFD_RELOC_32_PCREL; break;
9005 #ifdef BFD64
9006 case 8: code = BFD_RELOC_64_PCREL; break;
9007 #endif
9008 }
9009 }
9010 else
9011 {
9012 switch (fixp->fx_size)
9013 {
9014 default:
9015 as_bad_where (fixp->fx_file, fixp->fx_line,
9016 _("can not do %d byte relocation"),
9017 fixp->fx_size);
9018 code = BFD_RELOC_32;
9019 break;
9020 case 1: code = BFD_RELOC_8; break;
9021 case 2: code = BFD_RELOC_16; break;
9022 case 4: code = BFD_RELOC_32; break;
9023 #ifdef BFD64
9024 case 8: code = BFD_RELOC_64; break;
9025 #endif
9026 }
9027 }
9028 break;
9029 }
9030
9031 if ((code == BFD_RELOC_32
9032 || code == BFD_RELOC_32_PCREL
9033 || code == BFD_RELOC_X86_64_32S)
9034 && GOT_symbol
9035 && fixp->fx_addsy == GOT_symbol)
9036 {
9037 if (!object_64bit)
9038 code = BFD_RELOC_386_GOTPC;
9039 else
9040 code = BFD_RELOC_X86_64_GOTPC32;
9041 }
9042 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9043 && GOT_symbol
9044 && fixp->fx_addsy == GOT_symbol)
9045 {
9046 code = BFD_RELOC_X86_64_GOTPC64;
9047 }
9048
9049 rel = (arelent *) xmalloc (sizeof (arelent));
9050 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9051 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9052
9053 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9054
9055 if (!use_rela_relocations)
9056 {
9057 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9058 vtable entry to be used in the relocation's section offset. */
9059 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9060 rel->address = fixp->fx_offset;
9061 #if defined (OBJ_COFF) && defined (TE_PE)
9062 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9063 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9064 else
9065 #endif
9066 rel->addend = 0;
9067 }
9068 /* Use the rela in 64bit mode. */
9069 else
9070 {
9071 if (disallow_64bit_reloc)
9072 switch (code)
9073 {
9074 case BFD_RELOC_X86_64_DTPOFF64:
9075 case BFD_RELOC_X86_64_TPOFF64:
9076 case BFD_RELOC_64_PCREL:
9077 case BFD_RELOC_X86_64_GOTOFF64:
9078 case BFD_RELOC_X86_64_GOT64:
9079 case BFD_RELOC_X86_64_GOTPCREL64:
9080 case BFD_RELOC_X86_64_GOTPC64:
9081 case BFD_RELOC_X86_64_GOTPLT64:
9082 case BFD_RELOC_X86_64_PLTOFF64:
9083 as_bad_where (fixp->fx_file, fixp->fx_line,
9084 _("cannot represent relocation type %s in x32 mode"),
9085 bfd_get_reloc_code_name (code));
9086 break;
9087 default:
9088 break;
9089 }
9090
9091 if (!fixp->fx_pcrel)
9092 rel->addend = fixp->fx_offset;
9093 else
9094 switch (code)
9095 {
9096 case BFD_RELOC_X86_64_PLT32:
9097 case BFD_RELOC_X86_64_GOT32:
9098 case BFD_RELOC_X86_64_GOTPCREL:
9099 case BFD_RELOC_X86_64_TLSGD:
9100 case BFD_RELOC_X86_64_TLSLD:
9101 case BFD_RELOC_X86_64_GOTTPOFF:
9102 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9103 case BFD_RELOC_X86_64_TLSDESC_CALL:
9104 rel->addend = fixp->fx_offset - fixp->fx_size;
9105 break;
9106 default:
9107 rel->addend = (section->vma
9108 - fixp->fx_size
9109 + fixp->fx_addnumber
9110 + md_pcrel_from (fixp));
9111 break;
9112 }
9113 }
9114
9115 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9116 if (rel->howto == NULL)
9117 {
9118 as_bad_where (fixp->fx_file, fixp->fx_line,
9119 _("cannot represent relocation type %s"),
9120 bfd_get_reloc_code_name (code));
9121 /* Set howto to a garbage value so that we can keep going. */
9122 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9123 gas_assert (rel->howto != NULL);
9124 }
9125
9126 return rel;
9127 }
9128
9129 #include "tc-i386-intel.c"
9130
9131 void
9132 tc_x86_parse_to_dw2regnum (expressionS *exp)
9133 {
9134 int saved_naked_reg;
9135 char saved_register_dot;
9136
9137 saved_naked_reg = allow_naked_reg;
9138 allow_naked_reg = 1;
9139 saved_register_dot = register_chars['.'];
9140 register_chars['.'] = '.';
9141 allow_pseudo_reg = 1;
9142 expression_and_evaluate (exp);
9143 allow_pseudo_reg = 0;
9144 register_chars['.'] = saved_register_dot;
9145 allow_naked_reg = saved_naked_reg;
9146
9147 if (exp->X_op == O_register && exp->X_add_number >= 0)
9148 {
9149 if ((addressT) exp->X_add_number < i386_regtab_size)
9150 {
9151 exp->X_op = O_constant;
9152 exp->X_add_number = i386_regtab[exp->X_add_number]
9153 .dw2_regnum[flag_code >> 1];
9154 }
9155 else
9156 exp->X_op = O_illegal;
9157 }
9158 }
9159
9160 void
9161 tc_x86_frame_initial_instructions (void)
9162 {
9163 static unsigned int sp_regno[2];
9164
9165 if (!sp_regno[flag_code >> 1])
9166 {
9167 char *saved_input = input_line_pointer;
9168 char sp[][4] = {"esp", "rsp"};
9169 expressionS exp;
9170
9171 input_line_pointer = sp[flag_code >> 1];
9172 tc_x86_parse_to_dw2regnum (&exp);
9173 gas_assert (exp.X_op == O_constant);
9174 sp_regno[flag_code >> 1] = exp.X_add_number;
9175 input_line_pointer = saved_input;
9176 }
9177
9178 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9179 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9180 }
9181
9182 int
9183 x86_dwarf2_addr_size (void)
9184 {
9185 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9186 if (x86_elf_abi == X86_64_X32_ABI)
9187 return 4;
9188 #endif
9189 return bfd_arch_bits_per_address (stdoutput) / 8;
9190 }
9191
9192 int
9193 i386_elf_section_type (const char *str, size_t len)
9194 {
9195 if (flag_code == CODE_64BIT
9196 && len == sizeof ("unwind") - 1
9197 && strncmp (str, "unwind", 6) == 0)
9198 return SHT_X86_64_UNWIND;
9199
9200 return -1;
9201 }
9202
9203 #ifdef TE_SOLARIS
9204 void
9205 i386_solaris_fix_up_eh_frame (segT sec)
9206 {
9207 if (flag_code == CODE_64BIT)
9208 elf_section_type (sec) = SHT_X86_64_UNWIND;
9209 }
9210 #endif
9211
9212 #ifdef TE_PE
9213 void
9214 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9215 {
9216 expressionS exp;
9217
9218 exp.X_op = O_secrel;
9219 exp.X_add_symbol = symbol;
9220 exp.X_add_number = 0;
9221 emit_expr (&exp, size);
9222 }
9223 #endif
9224
9225 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9226 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9227
9228 bfd_vma
9229 x86_64_section_letter (int letter, char **ptr_msg)
9230 {
9231 if (flag_code == CODE_64BIT)
9232 {
9233 if (letter == 'l')
9234 return SHF_X86_64_LARGE;
9235
9236 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9237 }
9238 else
9239 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9240 return -1;
9241 }
9242
9243 bfd_vma
9244 x86_64_section_word (char *str, size_t len)
9245 {
9246 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9247 return SHF_X86_64_LARGE;
9248
9249 return -1;
9250 }
9251
9252 static void
9253 handle_large_common (int small ATTRIBUTE_UNUSED)
9254 {
9255 if (flag_code != CODE_64BIT)
9256 {
9257 s_comm_internal (0, elf_common_parse);
9258 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9259 }
9260 else
9261 {
9262 static segT lbss_section;
9263 asection *saved_com_section_ptr = elf_com_section_ptr;
9264 asection *saved_bss_section = bss_section;
9265
9266 if (lbss_section == NULL)
9267 {
9268 flagword applicable;
9269 segT seg = now_seg;
9270 subsegT subseg = now_subseg;
9271
9272 /* The .lbss section is for local .largecomm symbols. */
9273 lbss_section = subseg_new (".lbss", 0);
9274 applicable = bfd_applicable_section_flags (stdoutput);
9275 bfd_set_section_flags (stdoutput, lbss_section,
9276 applicable & SEC_ALLOC);
9277 seg_info (lbss_section)->bss = 1;
9278
9279 subseg_set (seg, subseg);
9280 }
9281
9282 elf_com_section_ptr = &_bfd_elf_large_com_section;
9283 bss_section = lbss_section;
9284
9285 s_comm_internal (0, elf_common_parse);
9286
9287 elf_com_section_ptr = saved_com_section_ptr;
9288 bss_section = saved_bss_section;
9289 }
9290 }
9291 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.220489 seconds and 3 git commands to generate.