Correct x86 assembler manual
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2014 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
38 #endif
39
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
42 #endif
43
44 #ifndef DEFAULT_ARCH
45 #define DEFAULT_ARCH "i386"
46 #endif
47
48 #ifndef INLINE
49 #if __GNUC__ >= 2
50 #define INLINE __inline__
51 #else
52 #define INLINE
53 #endif
54 #endif
55
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
61 #define WAIT_PREFIX 0
62 #define SEG_PREFIX 1
63 #define ADDR_PREFIX 2
64 #define DATA_PREFIX 3
65 #define REP_PREFIX 4
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
88 in instructions. */
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90
91 #define END_OF_INSN '\0'
92
93 /*
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
98 END.
99 */
100 typedef struct
101 {
102 const insn_template *start;
103 const insn_template *end;
104 }
105 templates;
106
107 /* 386 operand encoding bytes: see 386 book for details of this. */
108 typedef struct
109 {
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
113 }
114 modrm_byte;
115
116 /* x86-64 extension prefix. */
117 typedef int rex_byte;
118
119 /* 386 opcode byte to code indirect addressing. */
120 typedef struct
121 {
122 unsigned base;
123 unsigned index;
124 unsigned scale;
125 }
126 sib_byte;
127
128 /* x86 arch names, types and features */
129 typedef struct
130 {
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 unsigned int negated; /* turn off indicated flags. */
137 }
138 arch_entry;
139
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_check (int);
147 static void set_cpu_arch (int);
148 #ifdef TE_PE
149 static void pe_directive_secrel (int);
150 #endif
151 static void signed_cons (int);
152 static char *output_invalid (int c);
153 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
154 const char *);
155 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
156 const char *);
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS *);
160 static int i386_intel_parse_name (const char *, expressionS *);
161 static const reg_entry *parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template *match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry *build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS *, offsetT);
180 static void output_disp (fragS *, offsetT);
181 #ifndef I386COFF
182 static void s_bss (int);
183 #endif
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED);
186 #endif
187
188 static const char *default_arch = DEFAULT_ARCH;
189
190 /* This struct describes rounding control and SAE in the instruction. */
191 struct RC_Operation
192 {
193 enum rc_type
194 {
195 rne = 0,
196 rd,
197 ru,
198 rz,
199 saeonly
200 } type;
201 int operand;
202 };
203
204 static struct RC_Operation rc_op;
205
206 /* The struct describes masking, applied to OPERAND in the instruction.
207 MASK is a pointer to the corresponding mask register. ZEROING tells
208 whether merging or zeroing mask is used. */
209 struct Mask_Operation
210 {
211 const reg_entry *mask;
212 unsigned int zeroing;
213 /* The operand where this operation is associated. */
214 int operand;
215 };
216
217 static struct Mask_Operation mask_op;
218
219 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
220 broadcast factor. */
221 struct Broadcast_Operation
222 {
223 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
224 int type;
225
226 /* Index of broadcasted operand. */
227 int operand;
228 };
229
230 static struct Broadcast_Operation broadcast_op;
231
232 /* VEX prefix. */
233 typedef struct
234 {
235 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
236 unsigned char bytes[4];
237 unsigned int length;
238 /* Destination or source register specifier. */
239 const reg_entry *register_specifier;
240 } vex_prefix;
241
242 /* 'md_assemble ()' gathers together information and puts it into a
243 i386_insn. */
244
245 union i386_op
246 {
247 expressionS *disps;
248 expressionS *imms;
249 const reg_entry *regs;
250 };
251
252 enum i386_error
253 {
254 operand_size_mismatch,
255 operand_type_mismatch,
256 register_type_mismatch,
257 number_of_operands_mismatch,
258 invalid_instruction_suffix,
259 bad_imm4,
260 old_gcc_only,
261 unsupported_with_intel_mnemonic,
262 unsupported_syntax,
263 unsupported,
264 invalid_vsib_address,
265 invalid_vector_register_set,
266 unsupported_vector_index_register,
267 unsupported_broadcast,
268 broadcast_not_on_src_operand,
269 broadcast_needed,
270 unsupported_masking,
271 mask_not_on_destination,
272 no_default_mask,
273 unsupported_rc_sae,
274 rc_sae_operand_not_last_imm,
275 invalid_register_operand,
276 try_vector_disp8
277 };
278
279 struct _i386_insn
280 {
281 /* TM holds the template for the insn were currently assembling. */
282 insn_template tm;
283
284 /* SUFFIX holds the instruction size suffix for byte, word, dword
285 or qword, if given. */
286 char suffix;
287
288 /* OPERANDS gives the number of given operands. */
289 unsigned int operands;
290
291 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
292 of given register, displacement, memory operands and immediate
293 operands. */
294 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
295
296 /* TYPES [i] is the type (see above #defines) which tells us how to
297 use OP[i] for the corresponding operand. */
298 i386_operand_type types[MAX_OPERANDS];
299
300 /* Displacement expression, immediate expression, or register for each
301 operand. */
302 union i386_op op[MAX_OPERANDS];
303
304 /* Flags for operands. */
305 unsigned int flags[MAX_OPERANDS];
306 #define Operand_PCrel 1
307
308 /* Relocation type for operand */
309 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
310
311 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
312 the base index byte below. */
313 const reg_entry *base_reg;
314 const reg_entry *index_reg;
315 unsigned int log2_scale_factor;
316
317 /* SEG gives the seg_entries of this insn. They are zero unless
318 explicit segment overrides are given. */
319 const seg_entry *seg[2];
320
321 /* PREFIX holds all the given prefix opcodes (usually null).
322 PREFIXES is the number of prefix opcodes. */
323 unsigned int prefixes;
324 unsigned char prefix[MAX_PREFIXES];
325
326 /* RM and SIB are the modrm byte and the sib byte where the
327 addressing modes of this insn are encoded. */
328 modrm_byte rm;
329 rex_byte rex;
330 rex_byte vrex;
331 sib_byte sib;
332 vex_prefix vex;
333
334 /* Masking attributes. */
335 struct Mask_Operation *mask;
336
337 /* Rounding control and SAE attributes. */
338 struct RC_Operation *rounding;
339
340 /* Broadcasting attributes. */
341 struct Broadcast_Operation *broadcast;
342
343 /* Compressed disp8*N attribute. */
344 unsigned int memshift;
345
346 /* Swap operand in encoding. */
347 unsigned int swap_operand;
348
349 /* Prefer 8bit or 32bit displacement in encoding. */
350 enum
351 {
352 disp_encoding_default = 0,
353 disp_encoding_8bit,
354 disp_encoding_32bit
355 } disp_encoding;
356
357 /* REP prefix. */
358 const char *rep_prefix;
359
360 /* HLE prefix. */
361 const char *hle_prefix;
362
363 /* Have BND prefix. */
364 const char *bnd_prefix;
365
366 /* Need VREX to support upper 16 registers. */
367 int need_vrex;
368
369 /* Error message. */
370 enum i386_error error;
371 };
372
373 typedef struct _i386_insn i386_insn;
374
375 /* Link RC type with corresponding string, that'll be looked for in
376 asm. */
377 struct RC_name
378 {
379 enum rc_type type;
380 const char *name;
381 unsigned int len;
382 };
383
384 static const struct RC_name RC_NamesTable[] =
385 {
386 { rne, STRING_COMMA_LEN ("rn-sae") },
387 { rd, STRING_COMMA_LEN ("rd-sae") },
388 { ru, STRING_COMMA_LEN ("ru-sae") },
389 { rz, STRING_COMMA_LEN ("rz-sae") },
390 { saeonly, STRING_COMMA_LEN ("sae") },
391 };
392
393 /* List of chars besides those in app.c:symbol_chars that can start an
394 operand. Used to prevent the scrubber eating vital white-space. */
395 const char extra_symbol_chars[] = "*%-([{"
396 #ifdef LEX_AT
397 "@"
398 #endif
399 #ifdef LEX_QM
400 "?"
401 #endif
402 ;
403
404 #if (defined (TE_I386AIX) \
405 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
406 && !defined (TE_GNU) \
407 && !defined (TE_LINUX) \
408 && !defined (TE_NACL) \
409 && !defined (TE_NETWARE) \
410 && !defined (TE_FreeBSD) \
411 && !defined (TE_DragonFly) \
412 && !defined (TE_NetBSD)))
413 /* This array holds the chars that always start a comment. If the
414 pre-processor is disabled, these aren't very useful. The option
415 --divide will remove '/' from this list. */
416 const char *i386_comment_chars = "#/";
417 #define SVR4_COMMENT_CHARS 1
418 #define PREFIX_SEPARATOR '\\'
419
420 #else
421 const char *i386_comment_chars = "#";
422 #define PREFIX_SEPARATOR '/'
423 #endif
424
425 /* This array holds the chars that only start a comment at the beginning of
426 a line. If the line seems to have the form '# 123 filename'
427 .line and .file directives will appear in the pre-processed output.
428 Note that input_file.c hand checks for '#' at the beginning of the
429 first line of the input file. This is because the compiler outputs
430 #NO_APP at the beginning of its output.
431 Also note that comments started like this one will always work if
432 '/' isn't otherwise defined. */
433 const char line_comment_chars[] = "#/";
434
435 const char line_separator_chars[] = ";";
436
437 /* Chars that can be used to separate mant from exp in floating point
438 nums. */
439 const char EXP_CHARS[] = "eE";
440
441 /* Chars that mean this number is a floating point constant
442 As in 0f12.456
443 or 0d1.2345e12. */
444 const char FLT_CHARS[] = "fFdDxX";
445
446 /* Tables for lexical analysis. */
447 static char mnemonic_chars[256];
448 static char register_chars[256];
449 static char operand_chars[256];
450 static char identifier_chars[256];
451 static char digit_chars[256];
452
453 /* Lexical macros. */
454 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
455 #define is_operand_char(x) (operand_chars[(unsigned char) x])
456 #define is_register_char(x) (register_chars[(unsigned char) x])
457 #define is_space_char(x) ((x) == ' ')
458 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
459 #define is_digit_char(x) (digit_chars[(unsigned char) x])
460
461 /* All non-digit non-letter characters that may occur in an operand. */
462 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
463
464 /* md_assemble() always leaves the strings it's passed unaltered. To
465 effect this we maintain a stack of saved characters that we've smashed
466 with '\0's (indicating end of strings for various sub-fields of the
467 assembler instruction). */
468 static char save_stack[32];
469 static char *save_stack_p;
470 #define END_STRING_AND_SAVE(s) \
471 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
472 #define RESTORE_END_STRING(s) \
473 do { *(s) = *--save_stack_p; } while (0)
474
475 /* The instruction we're assembling. */
476 static i386_insn i;
477
478 /* Possible templates for current insn. */
479 static const templates *current_templates;
480
481 /* Per instruction expressionS buffers: max displacements & immediates. */
482 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
483 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
484
485 /* Current operand we are working on. */
486 static int this_operand = -1;
487
488 /* We support four different modes. FLAG_CODE variable is used to distinguish
489 these. */
490
491 enum flag_code {
492 CODE_32BIT,
493 CODE_16BIT,
494 CODE_64BIT };
495
496 static enum flag_code flag_code;
497 static unsigned int object_64bit;
498 static unsigned int disallow_64bit_reloc;
499 static int use_rela_relocations = 0;
500
501 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
502 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
503 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
504
505 /* The ELF ABI to use. */
506 enum x86_elf_abi
507 {
508 I386_ABI,
509 X86_64_ABI,
510 X86_64_X32_ABI
511 };
512
513 static enum x86_elf_abi x86_elf_abi = I386_ABI;
514 #endif
515
516 #if defined (TE_PE) || defined (TE_PEP)
517 /* Use big object file format. */
518 static int use_big_obj = 0;
519 #endif
520
521 /* 1 for intel syntax,
522 0 if att syntax. */
523 static int intel_syntax = 0;
524
525 /* 1 for intel mnemonic,
526 0 if att mnemonic. */
527 static int intel_mnemonic = !SYSV386_COMPAT;
528
529 /* 1 if support old (<= 2.8.1) versions of gcc. */
530 static int old_gcc = OLDGCC_COMPAT;
531
532 /* 1 if pseudo registers are permitted. */
533 static int allow_pseudo_reg = 0;
534
535 /* 1 if register prefix % not required. */
536 static int allow_naked_reg = 0;
537
538 /* 1 if the assembler should add BND prefix for all control-tranferring
539 instructions supporting it, even if this prefix wasn't specified
540 explicitly. */
541 static int add_bnd_prefix = 0;
542
543 /* 1 if pseudo index register, eiz/riz, is allowed . */
544 static int allow_index_reg = 0;
545
546 /* 1 if the assembler should ignore LOCK prefix, even if it was
547 specified explicitly. */
548 static int omit_lock_prefix = 0;
549
550 static enum check_kind
551 {
552 check_none = 0,
553 check_warning,
554 check_error
555 }
556 sse_check, operand_check = check_warning;
557
558 /* Register prefix used for error message. */
559 static const char *register_prefix = "%";
560
561 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
562 leave, push, and pop instructions so that gcc has the same stack
563 frame as in 32 bit mode. */
564 static char stackop_size = '\0';
565
566 /* Non-zero to optimize code alignment. */
567 int optimize_align_code = 1;
568
569 /* Non-zero to quieten some warnings. */
570 static int quiet_warnings = 0;
571
572 /* CPU name. */
573 static const char *cpu_arch_name = NULL;
574 static char *cpu_sub_arch_name = NULL;
575
576 /* CPU feature flags. */
577 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
578
579 /* If we have selected a cpu we are generating instructions for. */
580 static int cpu_arch_tune_set = 0;
581
582 /* Cpu we are generating instructions for. */
583 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
584
585 /* CPU feature flags of cpu we are generating instructions for. */
586 static i386_cpu_flags cpu_arch_tune_flags;
587
588 /* CPU instruction set architecture used. */
589 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
590
591 /* CPU feature flags of instruction set architecture used. */
592 i386_cpu_flags cpu_arch_isa_flags;
593
594 /* If set, conditional jumps are not automatically promoted to handle
595 larger than a byte offset. */
596 static unsigned int no_cond_jump_promotion = 0;
597
598 /* Encode SSE instructions with VEX prefix. */
599 static unsigned int sse2avx;
600
601 /* Encode scalar AVX instructions with specific vector length. */
602 static enum
603 {
604 vex128 = 0,
605 vex256
606 } avxscalar;
607
608 /* Encode scalar EVEX LIG instructions with specific vector length. */
609 static enum
610 {
611 evexl128 = 0,
612 evexl256,
613 evexl512
614 } evexlig;
615
616 /* Encode EVEX WIG instructions with specific evex.w. */
617 static enum
618 {
619 evexw0 = 0,
620 evexw1
621 } evexwig;
622
623 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
624 static enum rc_type evexrcig = rne;
625
626 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
627 static symbolS *GOT_symbol;
628
629 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
630 unsigned int x86_dwarf2_return_column;
631
632 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
633 int x86_cie_data_alignment;
634
635 /* Interface to relax_segment.
636 There are 3 major relax states for 386 jump insns because the
637 different types of jumps add different sizes to frags when we're
638 figuring out what sort of jump to choose to reach a given label. */
639
640 /* Types. */
641 #define UNCOND_JUMP 0
642 #define COND_JUMP 1
643 #define COND_JUMP86 2
644
645 /* Sizes. */
646 #define CODE16 1
647 #define SMALL 0
648 #define SMALL16 (SMALL | CODE16)
649 #define BIG 2
650 #define BIG16 (BIG | CODE16)
651
652 #ifndef INLINE
653 #ifdef __GNUC__
654 #define INLINE __inline__
655 #else
656 #define INLINE
657 #endif
658 #endif
659
660 #define ENCODE_RELAX_STATE(type, size) \
661 ((relax_substateT) (((type) << 2) | (size)))
662 #define TYPE_FROM_RELAX_STATE(s) \
663 ((s) >> 2)
664 #define DISP_SIZE_FROM_RELAX_STATE(s) \
665 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
666
667 /* This table is used by relax_frag to promote short jumps to long
668 ones where necessary. SMALL (short) jumps may be promoted to BIG
669 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
670 don't allow a short jump in a 32 bit code segment to be promoted to
671 a 16 bit offset jump because it's slower (requires data size
672 prefix), and doesn't work, unless the destination is in the bottom
673 64k of the code segment (The top 16 bits of eip are zeroed). */
674
675 const relax_typeS md_relax_table[] =
676 {
677 /* The fields are:
678 1) most positive reach of this state,
679 2) most negative reach of this state,
680 3) how many bytes this mode will have in the variable part of the frag
681 4) which index into the table to try if we can't fit into this one. */
682
683 /* UNCOND_JUMP states. */
684 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
685 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
686 /* dword jmp adds 4 bytes to frag:
687 0 extra opcode bytes, 4 displacement bytes. */
688 {0, 0, 4, 0},
689 /* word jmp adds 2 byte2 to frag:
690 0 extra opcode bytes, 2 displacement bytes. */
691 {0, 0, 2, 0},
692
693 /* COND_JUMP states. */
694 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
695 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
696 /* dword conditionals adds 5 bytes to frag:
697 1 extra opcode byte, 4 displacement bytes. */
698 {0, 0, 5, 0},
699 /* word conditionals add 3 bytes to frag:
700 1 extra opcode byte, 2 displacement bytes. */
701 {0, 0, 3, 0},
702
703 /* COND_JUMP86 states. */
704 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
705 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
706 /* dword conditionals adds 5 bytes to frag:
707 1 extra opcode byte, 4 displacement bytes. */
708 {0, 0, 5, 0},
709 /* word conditionals add 4 bytes to frag:
710 1 displacement byte and a 3 byte long branch insn. */
711 {0, 0, 4, 0}
712 };
713
714 static const arch_entry cpu_arch[] =
715 {
716 /* Do not replace the first two entries - i386_target_format()
717 relies on them being there in this order. */
718 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
719 CPU_GENERIC32_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
721 CPU_GENERIC64_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
723 CPU_NONE_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
725 CPU_I186_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
727 CPU_I286_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
729 CPU_I386_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
731 CPU_I486_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
733 CPU_I586_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
735 CPU_I686_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
737 CPU_I586_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
739 CPU_PENTIUMPRO_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
741 CPU_P2_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
743 CPU_P3_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
745 CPU_P4_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
747 CPU_CORE_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
749 CPU_NOCONA_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
751 CPU_CORE_FLAGS, 1, 0 },
752 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
753 CPU_CORE_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
755 CPU_CORE2_FLAGS, 1, 0 },
756 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
757 CPU_CORE2_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
759 CPU_COREI7_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
761 CPU_L1OM_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
763 CPU_K1OM_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
765 CPU_K6_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
767 CPU_K6_2_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
769 CPU_ATHLON_FLAGS, 0, 0 },
770 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
771 CPU_K8_FLAGS, 1, 0 },
772 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
773 CPU_K8_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
775 CPU_K8_FLAGS, 0, 0 },
776 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
777 CPU_AMDFAM10_FLAGS, 0, 0 },
778 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
779 CPU_BDVER1_FLAGS, 0, 0 },
780 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
781 CPU_BDVER2_FLAGS, 0, 0 },
782 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
783 CPU_BDVER3_FLAGS, 0, 0 },
784 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
785 CPU_BDVER4_FLAGS, 0, 0 },
786 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
787 CPU_BTVER1_FLAGS, 0, 0 },
788 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
789 CPU_BTVER2_FLAGS, 0, 0 },
790 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
791 CPU_8087_FLAGS, 0, 0 },
792 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
793 CPU_287_FLAGS, 0, 0 },
794 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
795 CPU_387_FLAGS, 0, 0 },
796 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
797 CPU_ANY87_FLAGS, 0, 1 },
798 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
799 CPU_MMX_FLAGS, 0, 0 },
800 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
801 CPU_3DNOWA_FLAGS, 0, 1 },
802 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
803 CPU_SSE_FLAGS, 0, 0 },
804 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
805 CPU_SSE2_FLAGS, 0, 0 },
806 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
807 CPU_SSE3_FLAGS, 0, 0 },
808 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
809 CPU_SSSE3_FLAGS, 0, 0 },
810 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
811 CPU_SSE4_1_FLAGS, 0, 0 },
812 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
813 CPU_SSE4_2_FLAGS, 0, 0 },
814 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
815 CPU_SSE4_2_FLAGS, 0, 0 },
816 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
817 CPU_ANY_SSE_FLAGS, 0, 1 },
818 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
819 CPU_AVX_FLAGS, 0, 0 },
820 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
821 CPU_AVX2_FLAGS, 0, 0 },
822 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
823 CPU_AVX512F_FLAGS, 0, 0 },
824 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
825 CPU_AVX512CD_FLAGS, 0, 0 },
826 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
827 CPU_AVX512ER_FLAGS, 0, 0 },
828 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
829 CPU_AVX512PF_FLAGS, 0, 0 },
830 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
831 CPU_AVX512DQ_FLAGS, 0, 0 },
832 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
833 CPU_AVX512BW_FLAGS, 0, 0 },
834 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
835 CPU_AVX512VL_FLAGS, 0, 0 },
836 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
837 CPU_ANY_AVX_FLAGS, 0, 1 },
838 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
839 CPU_VMX_FLAGS, 0, 0 },
840 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
841 CPU_VMFUNC_FLAGS, 0, 0 },
842 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
843 CPU_SMX_FLAGS, 0, 0 },
844 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
845 CPU_XSAVE_FLAGS, 0, 0 },
846 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
847 CPU_XSAVEOPT_FLAGS, 0, 0 },
848 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
849 CPU_XSAVEC_FLAGS, 0, 0 },
850 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
851 CPU_XSAVES_FLAGS, 0, 0 },
852 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
853 CPU_AES_FLAGS, 0, 0 },
854 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
855 CPU_PCLMUL_FLAGS, 0, 0 },
856 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
857 CPU_PCLMUL_FLAGS, 1, 0 },
858 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
859 CPU_FSGSBASE_FLAGS, 0, 0 },
860 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
861 CPU_RDRND_FLAGS, 0, 0 },
862 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
863 CPU_F16C_FLAGS, 0, 0 },
864 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
865 CPU_BMI2_FLAGS, 0, 0 },
866 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
867 CPU_FMA_FLAGS, 0, 0 },
868 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
869 CPU_FMA4_FLAGS, 0, 0 },
870 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
871 CPU_XOP_FLAGS, 0, 0 },
872 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
873 CPU_LWP_FLAGS, 0, 0 },
874 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
875 CPU_MOVBE_FLAGS, 0, 0 },
876 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
877 CPU_CX16_FLAGS, 0, 0 },
878 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
879 CPU_EPT_FLAGS, 0, 0 },
880 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
881 CPU_LZCNT_FLAGS, 0, 0 },
882 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
883 CPU_HLE_FLAGS, 0, 0 },
884 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
885 CPU_RTM_FLAGS, 0, 0 },
886 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
887 CPU_INVPCID_FLAGS, 0, 0 },
888 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
889 CPU_CLFLUSH_FLAGS, 0, 0 },
890 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
891 CPU_NOP_FLAGS, 0, 0 },
892 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
893 CPU_SYSCALL_FLAGS, 0, 0 },
894 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
895 CPU_RDTSCP_FLAGS, 0, 0 },
896 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
897 CPU_3DNOW_FLAGS, 0, 0 },
898 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
899 CPU_3DNOWA_FLAGS, 0, 0 },
900 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
901 CPU_PADLOCK_FLAGS, 0, 0 },
902 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
903 CPU_SVME_FLAGS, 1, 0 },
904 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
905 CPU_SVME_FLAGS, 0, 0 },
906 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
907 CPU_SSE4A_FLAGS, 0, 0 },
908 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
909 CPU_ABM_FLAGS, 0, 0 },
910 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
911 CPU_BMI_FLAGS, 0, 0 },
912 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
913 CPU_TBM_FLAGS, 0, 0 },
914 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
915 CPU_ADX_FLAGS, 0, 0 },
916 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
917 CPU_RDSEED_FLAGS, 0, 0 },
918 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
919 CPU_PRFCHW_FLAGS, 0, 0 },
920 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
921 CPU_SMAP_FLAGS, 0, 0 },
922 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
923 CPU_MPX_FLAGS, 0, 0 },
924 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
925 CPU_SHA_FLAGS, 0, 0 },
926 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
927 CPU_CLFLUSHOPT_FLAGS, 0, 0 },
928 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
929 CPU_PREFETCHWT1_FLAGS, 0, 0 },
930 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
931 CPU_SE1_FLAGS, 0, 0 },
932 };
933
934 #ifdef I386COFF
935 /* Like s_lcomm_internal in gas/read.c but the alignment string
936 is allowed to be optional. */
937
938 static symbolS *
939 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
940 {
941 addressT align = 0;
942
943 SKIP_WHITESPACE ();
944
945 if (needs_align
946 && *input_line_pointer == ',')
947 {
948 align = parse_align (needs_align - 1);
949
950 if (align == (addressT) -1)
951 return NULL;
952 }
953 else
954 {
955 if (size >= 8)
956 align = 3;
957 else if (size >= 4)
958 align = 2;
959 else if (size >= 2)
960 align = 1;
961 else
962 align = 0;
963 }
964
965 bss_alloc (symbolP, size, align);
966 return symbolP;
967 }
968
969 static void
970 pe_lcomm (int needs_align)
971 {
972 s_comm_internal (needs_align * 2, pe_lcomm_internal);
973 }
974 #endif
975
976 const pseudo_typeS md_pseudo_table[] =
977 {
978 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
979 {"align", s_align_bytes, 0},
980 #else
981 {"align", s_align_ptwo, 0},
982 #endif
983 {"arch", set_cpu_arch, 0},
984 #ifndef I386COFF
985 {"bss", s_bss, 0},
986 #else
987 {"lcomm", pe_lcomm, 1},
988 #endif
989 {"ffloat", float_cons, 'f'},
990 {"dfloat", float_cons, 'd'},
991 {"tfloat", float_cons, 'x'},
992 {"value", cons, 2},
993 {"slong", signed_cons, 4},
994 {"noopt", s_ignore, 0},
995 {"optim", s_ignore, 0},
996 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
997 {"code16", set_code_flag, CODE_16BIT},
998 {"code32", set_code_flag, CODE_32BIT},
999 {"code64", set_code_flag, CODE_64BIT},
1000 {"intel_syntax", set_intel_syntax, 1},
1001 {"att_syntax", set_intel_syntax, 0},
1002 {"intel_mnemonic", set_intel_mnemonic, 1},
1003 {"att_mnemonic", set_intel_mnemonic, 0},
1004 {"allow_index_reg", set_allow_index_reg, 1},
1005 {"disallow_index_reg", set_allow_index_reg, 0},
1006 {"sse_check", set_check, 0},
1007 {"operand_check", set_check, 1},
1008 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1009 {"largecomm", handle_large_common, 0},
1010 #else
1011 {"file", (void (*) (int)) dwarf2_directive_file, 0},
1012 {"loc", dwarf2_directive_loc, 0},
1013 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1014 #endif
1015 #ifdef TE_PE
1016 {"secrel32", pe_directive_secrel, 0},
1017 #endif
1018 {0, 0, 0}
1019 };
1020
1021 /* For interface with expression (). */
1022 extern char *input_line_pointer;
1023
1024 /* Hash table for instruction mnemonic lookup. */
1025 static struct hash_control *op_hash;
1026
1027 /* Hash table for register lookup. */
1028 static struct hash_control *reg_hash;
1029 \f
1030 void
1031 i386_align_code (fragS *fragP, int count)
1032 {
1033 /* Various efficient no-op patterns for aligning code labels.
1034 Note: Don't try to assemble the instructions in the comments.
1035 0L and 0w are not legal. */
1036 static const char f32_1[] =
1037 {0x90}; /* nop */
1038 static const char f32_2[] =
1039 {0x66,0x90}; /* xchg %ax,%ax */
1040 static const char f32_3[] =
1041 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1042 static const char f32_4[] =
1043 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1044 static const char f32_5[] =
1045 {0x90, /* nop */
1046 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1047 static const char f32_6[] =
1048 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1049 static const char f32_7[] =
1050 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1051 static const char f32_8[] =
1052 {0x90, /* nop */
1053 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1054 static const char f32_9[] =
1055 {0x89,0xf6, /* movl %esi,%esi */
1056 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1057 static const char f32_10[] =
1058 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1059 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1060 static const char f32_11[] =
1061 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1062 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1063 static const char f32_12[] =
1064 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1065 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1066 static const char f32_13[] =
1067 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1068 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1069 static const char f32_14[] =
1070 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1071 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1072 static const char f16_3[] =
1073 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1074 static const char f16_4[] =
1075 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1076 static const char f16_5[] =
1077 {0x90, /* nop */
1078 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1079 static const char f16_6[] =
1080 {0x89,0xf6, /* mov %si,%si */
1081 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1082 static const char f16_7[] =
1083 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1084 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1085 static const char f16_8[] =
1086 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1087 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1088 static const char jump_31[] =
1089 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1090 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1091 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1092 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1093 static const char *const f32_patt[] = {
1094 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1095 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1096 };
1097 static const char *const f16_patt[] = {
1098 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1099 };
1100 /* nopl (%[re]ax) */
1101 static const char alt_3[] =
1102 {0x0f,0x1f,0x00};
1103 /* nopl 0(%[re]ax) */
1104 static const char alt_4[] =
1105 {0x0f,0x1f,0x40,0x00};
1106 /* nopl 0(%[re]ax,%[re]ax,1) */
1107 static const char alt_5[] =
1108 {0x0f,0x1f,0x44,0x00,0x00};
1109 /* nopw 0(%[re]ax,%[re]ax,1) */
1110 static const char alt_6[] =
1111 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1112 /* nopl 0L(%[re]ax) */
1113 static const char alt_7[] =
1114 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1115 /* nopl 0L(%[re]ax,%[re]ax,1) */
1116 static const char alt_8[] =
1117 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1118 /* nopw 0L(%[re]ax,%[re]ax,1) */
1119 static const char alt_9[] =
1120 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1121 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1122 static const char alt_10[] =
1123 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1124 /* data16
1125 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1126 static const char alt_long_11[] =
1127 {0x66,
1128 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1129 /* data16
1130 data16
1131 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1132 static const char alt_long_12[] =
1133 {0x66,
1134 0x66,
1135 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1136 /* data16
1137 data16
1138 data16
1139 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1140 static const char alt_long_13[] =
1141 {0x66,
1142 0x66,
1143 0x66,
1144 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1145 /* data16
1146 data16
1147 data16
1148 data16
1149 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1150 static const char alt_long_14[] =
1151 {0x66,
1152 0x66,
1153 0x66,
1154 0x66,
1155 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1156 /* data16
1157 data16
1158 data16
1159 data16
1160 data16
1161 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1162 static const char alt_long_15[] =
1163 {0x66,
1164 0x66,
1165 0x66,
1166 0x66,
1167 0x66,
1168 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1169 /* nopl 0(%[re]ax,%[re]ax,1)
1170 nopw 0(%[re]ax,%[re]ax,1) */
1171 static const char alt_short_11[] =
1172 {0x0f,0x1f,0x44,0x00,0x00,
1173 0x66,0x0f,0x1f,0x44,0x00,0x00};
1174 /* nopw 0(%[re]ax,%[re]ax,1)
1175 nopw 0(%[re]ax,%[re]ax,1) */
1176 static const char alt_short_12[] =
1177 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1178 0x66,0x0f,0x1f,0x44,0x00,0x00};
1179 /* nopw 0(%[re]ax,%[re]ax,1)
1180 nopl 0L(%[re]ax) */
1181 static const char alt_short_13[] =
1182 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1183 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1184 /* nopl 0L(%[re]ax)
1185 nopl 0L(%[re]ax) */
1186 static const char alt_short_14[] =
1187 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1188 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1189 /* nopl 0L(%[re]ax)
1190 nopl 0L(%[re]ax,%[re]ax,1) */
1191 static const char alt_short_15[] =
1192 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1193 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1194 static const char *const alt_short_patt[] = {
1195 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1196 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1197 alt_short_14, alt_short_15
1198 };
1199 static const char *const alt_long_patt[] = {
1200 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1201 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1202 alt_long_14, alt_long_15
1203 };
1204
1205 /* Only align for at least a positive non-zero boundary. */
1206 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1207 return;
1208
1209 /* We need to decide which NOP sequence to use for 32bit and
1210 64bit. When -mtune= is used:
1211
1212 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1213 PROCESSOR_GENERIC32, f32_patt will be used.
1214 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1215 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1216 PROCESSOR_GENERIC64, alt_long_patt will be used.
1217 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1218 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1219 will be used.
1220
1221 When -mtune= isn't used, alt_long_patt will be used if
1222 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1223 be used.
1224
1225 When -march= or .arch is used, we can't use anything beyond
1226 cpu_arch_isa_flags. */
1227
1228 if (flag_code == CODE_16BIT)
1229 {
1230 if (count > 8)
1231 {
1232 memcpy (fragP->fr_literal + fragP->fr_fix,
1233 jump_31, count);
1234 /* Adjust jump offset. */
1235 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1236 }
1237 else
1238 memcpy (fragP->fr_literal + fragP->fr_fix,
1239 f16_patt[count - 1], count);
1240 }
1241 else
1242 {
1243 const char *const *patt = NULL;
1244
1245 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1246 {
1247 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1248 switch (cpu_arch_tune)
1249 {
1250 case PROCESSOR_UNKNOWN:
1251 /* We use cpu_arch_isa_flags to check if we SHOULD
1252 optimize with nops. */
1253 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1254 patt = alt_long_patt;
1255 else
1256 patt = f32_patt;
1257 break;
1258 case PROCESSOR_PENTIUM4:
1259 case PROCESSOR_NOCONA:
1260 case PROCESSOR_CORE:
1261 case PROCESSOR_CORE2:
1262 case PROCESSOR_COREI7:
1263 case PROCESSOR_L1OM:
1264 case PROCESSOR_K1OM:
1265 case PROCESSOR_GENERIC64:
1266 patt = alt_long_patt;
1267 break;
1268 case PROCESSOR_K6:
1269 case PROCESSOR_ATHLON:
1270 case PROCESSOR_K8:
1271 case PROCESSOR_AMDFAM10:
1272 case PROCESSOR_BD:
1273 case PROCESSOR_BT:
1274 patt = alt_short_patt;
1275 break;
1276 case PROCESSOR_I386:
1277 case PROCESSOR_I486:
1278 case PROCESSOR_PENTIUM:
1279 case PROCESSOR_PENTIUMPRO:
1280 case PROCESSOR_GENERIC32:
1281 patt = f32_patt;
1282 break;
1283 }
1284 }
1285 else
1286 {
1287 switch (fragP->tc_frag_data.tune)
1288 {
1289 case PROCESSOR_UNKNOWN:
1290 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1291 PROCESSOR_UNKNOWN. */
1292 abort ();
1293 break;
1294
1295 case PROCESSOR_I386:
1296 case PROCESSOR_I486:
1297 case PROCESSOR_PENTIUM:
1298 case PROCESSOR_K6:
1299 case PROCESSOR_ATHLON:
1300 case PROCESSOR_K8:
1301 case PROCESSOR_AMDFAM10:
1302 case PROCESSOR_BD:
1303 case PROCESSOR_BT:
1304 case PROCESSOR_GENERIC32:
1305 /* We use cpu_arch_isa_flags to check if we CAN optimize
1306 with nops. */
1307 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1308 patt = alt_short_patt;
1309 else
1310 patt = f32_patt;
1311 break;
1312 case PROCESSOR_PENTIUMPRO:
1313 case PROCESSOR_PENTIUM4:
1314 case PROCESSOR_NOCONA:
1315 case PROCESSOR_CORE:
1316 case PROCESSOR_CORE2:
1317 case PROCESSOR_COREI7:
1318 case PROCESSOR_L1OM:
1319 case PROCESSOR_K1OM:
1320 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1321 patt = alt_long_patt;
1322 else
1323 patt = f32_patt;
1324 break;
1325 case PROCESSOR_GENERIC64:
1326 patt = alt_long_patt;
1327 break;
1328 }
1329 }
1330
1331 if (patt == f32_patt)
1332 {
1333 /* If the padding is less than 15 bytes, we use the normal
1334 ones. Otherwise, we use a jump instruction and adjust
1335 its offset. */
1336 int limit;
1337
1338 /* For 64bit, the limit is 3 bytes. */
1339 if (flag_code == CODE_64BIT
1340 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1341 limit = 3;
1342 else
1343 limit = 15;
1344 if (count < limit)
1345 memcpy (fragP->fr_literal + fragP->fr_fix,
1346 patt[count - 1], count);
1347 else
1348 {
1349 memcpy (fragP->fr_literal + fragP->fr_fix,
1350 jump_31, count);
1351 /* Adjust jump offset. */
1352 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1353 }
1354 }
1355 else
1356 {
1357 /* Maximum length of an instruction is 15 byte. If the
1358 padding is greater than 15 bytes and we don't use jump,
1359 we have to break it into smaller pieces. */
1360 int padding = count;
1361 while (padding > 15)
1362 {
1363 padding -= 15;
1364 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1365 patt [14], 15);
1366 }
1367
1368 if (padding)
1369 memcpy (fragP->fr_literal + fragP->fr_fix,
1370 patt [padding - 1], padding);
1371 }
1372 }
1373 fragP->fr_var = count;
1374 }
1375
1376 static INLINE int
1377 operand_type_all_zero (const union i386_operand_type *x)
1378 {
1379 switch (ARRAY_SIZE(x->array))
1380 {
1381 case 3:
1382 if (x->array[2])
1383 return 0;
1384 case 2:
1385 if (x->array[1])
1386 return 0;
1387 case 1:
1388 return !x->array[0];
1389 default:
1390 abort ();
1391 }
1392 }
1393
1394 static INLINE void
1395 operand_type_set (union i386_operand_type *x, unsigned int v)
1396 {
1397 switch (ARRAY_SIZE(x->array))
1398 {
1399 case 3:
1400 x->array[2] = v;
1401 case 2:
1402 x->array[1] = v;
1403 case 1:
1404 x->array[0] = v;
1405 break;
1406 default:
1407 abort ();
1408 }
1409 }
1410
1411 static INLINE int
1412 operand_type_equal (const union i386_operand_type *x,
1413 const union i386_operand_type *y)
1414 {
1415 switch (ARRAY_SIZE(x->array))
1416 {
1417 case 3:
1418 if (x->array[2] != y->array[2])
1419 return 0;
1420 case 2:
1421 if (x->array[1] != y->array[1])
1422 return 0;
1423 case 1:
1424 return x->array[0] == y->array[0];
1425 break;
1426 default:
1427 abort ();
1428 }
1429 }
1430
1431 static INLINE int
1432 cpu_flags_all_zero (const union i386_cpu_flags *x)
1433 {
1434 switch (ARRAY_SIZE(x->array))
1435 {
1436 case 3:
1437 if (x->array[2])
1438 return 0;
1439 case 2:
1440 if (x->array[1])
1441 return 0;
1442 case 1:
1443 return !x->array[0];
1444 default:
1445 abort ();
1446 }
1447 }
1448
1449 static INLINE void
1450 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1451 {
1452 switch (ARRAY_SIZE(x->array))
1453 {
1454 case 3:
1455 x->array[2] = v;
1456 case 2:
1457 x->array[1] = v;
1458 case 1:
1459 x->array[0] = v;
1460 break;
1461 default:
1462 abort ();
1463 }
1464 }
1465
1466 static INLINE int
1467 cpu_flags_equal (const union i386_cpu_flags *x,
1468 const union i386_cpu_flags *y)
1469 {
1470 switch (ARRAY_SIZE(x->array))
1471 {
1472 case 3:
1473 if (x->array[2] != y->array[2])
1474 return 0;
1475 case 2:
1476 if (x->array[1] != y->array[1])
1477 return 0;
1478 case 1:
1479 return x->array[0] == y->array[0];
1480 break;
1481 default:
1482 abort ();
1483 }
1484 }
1485
1486 static INLINE int
1487 cpu_flags_check_cpu64 (i386_cpu_flags f)
1488 {
1489 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1490 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1491 }
1492
1493 static INLINE i386_cpu_flags
1494 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1495 {
1496 switch (ARRAY_SIZE (x.array))
1497 {
1498 case 3:
1499 x.array [2] &= y.array [2];
1500 case 2:
1501 x.array [1] &= y.array [1];
1502 case 1:
1503 x.array [0] &= y.array [0];
1504 break;
1505 default:
1506 abort ();
1507 }
1508 return x;
1509 }
1510
1511 static INLINE i386_cpu_flags
1512 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1513 {
1514 switch (ARRAY_SIZE (x.array))
1515 {
1516 case 3:
1517 x.array [2] |= y.array [2];
1518 case 2:
1519 x.array [1] |= y.array [1];
1520 case 1:
1521 x.array [0] |= y.array [0];
1522 break;
1523 default:
1524 abort ();
1525 }
1526 return x;
1527 }
1528
1529 static INLINE i386_cpu_flags
1530 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1531 {
1532 switch (ARRAY_SIZE (x.array))
1533 {
1534 case 3:
1535 x.array [2] &= ~y.array [2];
1536 case 2:
1537 x.array [1] &= ~y.array [1];
1538 case 1:
1539 x.array [0] &= ~y.array [0];
1540 break;
1541 default:
1542 abort ();
1543 }
1544 return x;
1545 }
1546
1547 #define CPU_FLAGS_ARCH_MATCH 0x1
1548 #define CPU_FLAGS_64BIT_MATCH 0x2
1549 #define CPU_FLAGS_AES_MATCH 0x4
1550 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1551 #define CPU_FLAGS_AVX_MATCH 0x10
1552
1553 #define CPU_FLAGS_32BIT_MATCH \
1554 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1555 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1556 #define CPU_FLAGS_PERFECT_MATCH \
1557 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1558
1559 /* Return CPU flags match bits. */
1560
1561 static int
1562 cpu_flags_match (const insn_template *t)
1563 {
1564 i386_cpu_flags x = t->cpu_flags;
1565 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1566
1567 x.bitfield.cpu64 = 0;
1568 x.bitfield.cpuno64 = 0;
1569
1570 if (cpu_flags_all_zero (&x))
1571 {
1572 /* This instruction is available on all archs. */
1573 match |= CPU_FLAGS_32BIT_MATCH;
1574 }
1575 else
1576 {
1577 /* This instruction is available only on some archs. */
1578 i386_cpu_flags cpu = cpu_arch_flags;
1579
1580 cpu.bitfield.cpu64 = 0;
1581 cpu.bitfield.cpuno64 = 0;
1582 cpu = cpu_flags_and (x, cpu);
1583 if (!cpu_flags_all_zero (&cpu))
1584 {
1585 if (x.bitfield.cpuavx)
1586 {
1587 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1588 if (cpu.bitfield.cpuavx)
1589 {
1590 /* Check SSE2AVX. */
1591 if (!t->opcode_modifier.sse2avx|| sse2avx)
1592 {
1593 match |= (CPU_FLAGS_ARCH_MATCH
1594 | CPU_FLAGS_AVX_MATCH);
1595 /* Check AES. */
1596 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1597 match |= CPU_FLAGS_AES_MATCH;
1598 /* Check PCLMUL. */
1599 if (!x.bitfield.cpupclmul
1600 || cpu.bitfield.cpupclmul)
1601 match |= CPU_FLAGS_PCLMUL_MATCH;
1602 }
1603 }
1604 else
1605 match |= CPU_FLAGS_ARCH_MATCH;
1606 }
1607 else
1608 match |= CPU_FLAGS_32BIT_MATCH;
1609 }
1610 }
1611 return match;
1612 }
1613
1614 static INLINE i386_operand_type
1615 operand_type_and (i386_operand_type x, i386_operand_type y)
1616 {
1617 switch (ARRAY_SIZE (x.array))
1618 {
1619 case 3:
1620 x.array [2] &= y.array [2];
1621 case 2:
1622 x.array [1] &= y.array [1];
1623 case 1:
1624 x.array [0] &= y.array [0];
1625 break;
1626 default:
1627 abort ();
1628 }
1629 return x;
1630 }
1631
1632 static INLINE i386_operand_type
1633 operand_type_or (i386_operand_type x, i386_operand_type y)
1634 {
1635 switch (ARRAY_SIZE (x.array))
1636 {
1637 case 3:
1638 x.array [2] |= y.array [2];
1639 case 2:
1640 x.array [1] |= y.array [1];
1641 case 1:
1642 x.array [0] |= y.array [0];
1643 break;
1644 default:
1645 abort ();
1646 }
1647 return x;
1648 }
1649
1650 static INLINE i386_operand_type
1651 operand_type_xor (i386_operand_type x, i386_operand_type y)
1652 {
1653 switch (ARRAY_SIZE (x.array))
1654 {
1655 case 3:
1656 x.array [2] ^= y.array [2];
1657 case 2:
1658 x.array [1] ^= y.array [1];
1659 case 1:
1660 x.array [0] ^= y.array [0];
1661 break;
1662 default:
1663 abort ();
1664 }
1665 return x;
1666 }
1667
1668 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1669 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1670 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1671 static const i386_operand_type inoutportreg
1672 = OPERAND_TYPE_INOUTPORTREG;
1673 static const i386_operand_type reg16_inoutportreg
1674 = OPERAND_TYPE_REG16_INOUTPORTREG;
1675 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1676 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1677 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1678 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1679 static const i386_operand_type anydisp
1680 = OPERAND_TYPE_ANYDISP;
1681 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1682 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1683 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1684 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1685 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1686 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1687 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1688 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1689 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1690 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1691 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1692 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1693 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1694 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1695
1696 enum operand_type
1697 {
1698 reg,
1699 imm,
1700 disp,
1701 anymem
1702 };
1703
1704 static INLINE int
1705 operand_type_check (i386_operand_type t, enum operand_type c)
1706 {
1707 switch (c)
1708 {
1709 case reg:
1710 return (t.bitfield.reg8
1711 || t.bitfield.reg16
1712 || t.bitfield.reg32
1713 || t.bitfield.reg64);
1714
1715 case imm:
1716 return (t.bitfield.imm8
1717 || t.bitfield.imm8s
1718 || t.bitfield.imm16
1719 || t.bitfield.imm32
1720 || t.bitfield.imm32s
1721 || t.bitfield.imm64);
1722
1723 case disp:
1724 return (t.bitfield.disp8
1725 || t.bitfield.disp16
1726 || t.bitfield.disp32
1727 || t.bitfield.disp32s
1728 || t.bitfield.disp64);
1729
1730 case anymem:
1731 return (t.bitfield.disp8
1732 || t.bitfield.disp16
1733 || t.bitfield.disp32
1734 || t.bitfield.disp32s
1735 || t.bitfield.disp64
1736 || t.bitfield.baseindex);
1737
1738 default:
1739 abort ();
1740 }
1741
1742 return 0;
1743 }
1744
1745 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1746 operand J for instruction template T. */
1747
1748 static INLINE int
1749 match_reg_size (const insn_template *t, unsigned int j)
1750 {
1751 return !((i.types[j].bitfield.byte
1752 && !t->operand_types[j].bitfield.byte)
1753 || (i.types[j].bitfield.word
1754 && !t->operand_types[j].bitfield.word)
1755 || (i.types[j].bitfield.dword
1756 && !t->operand_types[j].bitfield.dword)
1757 || (i.types[j].bitfield.qword
1758 && !t->operand_types[j].bitfield.qword));
1759 }
1760
1761 /* Return 1 if there is no conflict in any size on operand J for
1762 instruction template T. */
1763
1764 static INLINE int
1765 match_mem_size (const insn_template *t, unsigned int j)
1766 {
1767 return (match_reg_size (t, j)
1768 && !((i.types[j].bitfield.unspecified
1769 && !t->operand_types[j].bitfield.unspecified)
1770 || (i.types[j].bitfield.fword
1771 && !t->operand_types[j].bitfield.fword)
1772 || (i.types[j].bitfield.tbyte
1773 && !t->operand_types[j].bitfield.tbyte)
1774 || (i.types[j].bitfield.xmmword
1775 && !t->operand_types[j].bitfield.xmmword)
1776 || (i.types[j].bitfield.ymmword
1777 && !t->operand_types[j].bitfield.ymmword)
1778 || (i.types[j].bitfield.zmmword
1779 && !t->operand_types[j].bitfield.zmmword)));
1780 }
1781
1782 /* Return 1 if there is no size conflict on any operands for
1783 instruction template T. */
1784
1785 static INLINE int
1786 operand_size_match (const insn_template *t)
1787 {
1788 unsigned int j;
1789 int match = 1;
1790
1791 /* Don't check jump instructions. */
1792 if (t->opcode_modifier.jump
1793 || t->opcode_modifier.jumpbyte
1794 || t->opcode_modifier.jumpdword
1795 || t->opcode_modifier.jumpintersegment)
1796 return match;
1797
1798 /* Check memory and accumulator operand size. */
1799 for (j = 0; j < i.operands; j++)
1800 {
1801 if (t->operand_types[j].bitfield.anysize)
1802 continue;
1803
1804 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1805 {
1806 match = 0;
1807 break;
1808 }
1809
1810 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1811 {
1812 match = 0;
1813 break;
1814 }
1815 }
1816
1817 if (match)
1818 return match;
1819 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1820 {
1821 mismatch:
1822 i.error = operand_size_mismatch;
1823 return 0;
1824 }
1825
1826 /* Check reverse. */
1827 gas_assert (i.operands == 2);
1828
1829 match = 1;
1830 for (j = 0; j < 2; j++)
1831 {
1832 if (t->operand_types[j].bitfield.acc
1833 && !match_reg_size (t, j ? 0 : 1))
1834 goto mismatch;
1835
1836 if (i.types[j].bitfield.mem
1837 && !match_mem_size (t, j ? 0 : 1))
1838 goto mismatch;
1839 }
1840
1841 return match;
1842 }
1843
1844 static INLINE int
1845 operand_type_match (i386_operand_type overlap,
1846 i386_operand_type given)
1847 {
1848 i386_operand_type temp = overlap;
1849
1850 temp.bitfield.jumpabsolute = 0;
1851 temp.bitfield.unspecified = 0;
1852 temp.bitfield.byte = 0;
1853 temp.bitfield.word = 0;
1854 temp.bitfield.dword = 0;
1855 temp.bitfield.fword = 0;
1856 temp.bitfield.qword = 0;
1857 temp.bitfield.tbyte = 0;
1858 temp.bitfield.xmmword = 0;
1859 temp.bitfield.ymmword = 0;
1860 temp.bitfield.zmmword = 0;
1861 if (operand_type_all_zero (&temp))
1862 goto mismatch;
1863
1864 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1865 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1866 return 1;
1867
1868 mismatch:
1869 i.error = operand_type_mismatch;
1870 return 0;
1871 }
1872
1873 /* If given types g0 and g1 are registers they must be of the same type
1874 unless the expected operand type register overlap is null.
1875 Note that Acc in a template matches every size of reg. */
1876
1877 static INLINE int
1878 operand_type_register_match (i386_operand_type m0,
1879 i386_operand_type g0,
1880 i386_operand_type t0,
1881 i386_operand_type m1,
1882 i386_operand_type g1,
1883 i386_operand_type t1)
1884 {
1885 if (!operand_type_check (g0, reg))
1886 return 1;
1887
1888 if (!operand_type_check (g1, reg))
1889 return 1;
1890
1891 if (g0.bitfield.reg8 == g1.bitfield.reg8
1892 && g0.bitfield.reg16 == g1.bitfield.reg16
1893 && g0.bitfield.reg32 == g1.bitfield.reg32
1894 && g0.bitfield.reg64 == g1.bitfield.reg64)
1895 return 1;
1896
1897 if (m0.bitfield.acc)
1898 {
1899 t0.bitfield.reg8 = 1;
1900 t0.bitfield.reg16 = 1;
1901 t0.bitfield.reg32 = 1;
1902 t0.bitfield.reg64 = 1;
1903 }
1904
1905 if (m1.bitfield.acc)
1906 {
1907 t1.bitfield.reg8 = 1;
1908 t1.bitfield.reg16 = 1;
1909 t1.bitfield.reg32 = 1;
1910 t1.bitfield.reg64 = 1;
1911 }
1912
1913 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1914 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1915 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1916 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1917 return 1;
1918
1919 i.error = register_type_mismatch;
1920
1921 return 0;
1922 }
1923
1924 static INLINE unsigned int
1925 register_number (const reg_entry *r)
1926 {
1927 unsigned int nr = r->reg_num;
1928
1929 if (r->reg_flags & RegRex)
1930 nr += 8;
1931
1932 return nr;
1933 }
1934
1935 static INLINE unsigned int
1936 mode_from_disp_size (i386_operand_type t)
1937 {
1938 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1939 return 1;
1940 else if (t.bitfield.disp16
1941 || t.bitfield.disp32
1942 || t.bitfield.disp32s)
1943 return 2;
1944 else
1945 return 0;
1946 }
1947
1948 static INLINE int
1949 fits_in_signed_byte (addressT num)
1950 {
1951 return num + 0x80 <= 0xff;
1952 }
1953
1954 static INLINE int
1955 fits_in_unsigned_byte (addressT num)
1956 {
1957 return num <= 0xff;
1958 }
1959
1960 static INLINE int
1961 fits_in_unsigned_word (addressT num)
1962 {
1963 return num <= 0xffff;
1964 }
1965
1966 static INLINE int
1967 fits_in_signed_word (addressT num)
1968 {
1969 return num + 0x8000 <= 0xffff;
1970 }
1971
1972 static INLINE int
1973 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
1974 {
1975 #ifndef BFD64
1976 return 1;
1977 #else
1978 return num + 0x80000000 <= 0xffffffff;
1979 #endif
1980 } /* fits_in_signed_long() */
1981
1982 static INLINE int
1983 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
1984 {
1985 #ifndef BFD64
1986 return 1;
1987 #else
1988 return num <= 0xffffffff;
1989 #endif
1990 } /* fits_in_unsigned_long() */
1991
1992 static INLINE int
1993 fits_in_vec_disp8 (offsetT num)
1994 {
1995 int shift = i.memshift;
1996 unsigned int mask;
1997
1998 if (shift == -1)
1999 abort ();
2000
2001 mask = (1 << shift) - 1;
2002
2003 /* Return 0 if NUM isn't properly aligned. */
2004 if ((num & mask))
2005 return 0;
2006
2007 /* Check if NUM will fit in 8bit after shift. */
2008 return fits_in_signed_byte (num >> shift);
2009 }
2010
2011 static INLINE int
2012 fits_in_imm4 (offsetT num)
2013 {
2014 return (num & 0xf) == num;
2015 }
2016
2017 static i386_operand_type
2018 smallest_imm_type (offsetT num)
2019 {
2020 i386_operand_type t;
2021
2022 operand_type_set (&t, 0);
2023 t.bitfield.imm64 = 1;
2024
2025 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2026 {
2027 /* This code is disabled on the 486 because all the Imm1 forms
2028 in the opcode table are slower on the i486. They're the
2029 versions with the implicitly specified single-position
2030 displacement, which has another syntax if you really want to
2031 use that form. */
2032 t.bitfield.imm1 = 1;
2033 t.bitfield.imm8 = 1;
2034 t.bitfield.imm8s = 1;
2035 t.bitfield.imm16 = 1;
2036 t.bitfield.imm32 = 1;
2037 t.bitfield.imm32s = 1;
2038 }
2039 else if (fits_in_signed_byte (num))
2040 {
2041 t.bitfield.imm8 = 1;
2042 t.bitfield.imm8s = 1;
2043 t.bitfield.imm16 = 1;
2044 t.bitfield.imm32 = 1;
2045 t.bitfield.imm32s = 1;
2046 }
2047 else if (fits_in_unsigned_byte (num))
2048 {
2049 t.bitfield.imm8 = 1;
2050 t.bitfield.imm16 = 1;
2051 t.bitfield.imm32 = 1;
2052 t.bitfield.imm32s = 1;
2053 }
2054 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2055 {
2056 t.bitfield.imm16 = 1;
2057 t.bitfield.imm32 = 1;
2058 t.bitfield.imm32s = 1;
2059 }
2060 else if (fits_in_signed_long (num))
2061 {
2062 t.bitfield.imm32 = 1;
2063 t.bitfield.imm32s = 1;
2064 }
2065 else if (fits_in_unsigned_long (num))
2066 t.bitfield.imm32 = 1;
2067
2068 return t;
2069 }
2070
2071 static offsetT
2072 offset_in_range (offsetT val, int size)
2073 {
2074 addressT mask;
2075
2076 switch (size)
2077 {
2078 case 1: mask = ((addressT) 1 << 8) - 1; break;
2079 case 2: mask = ((addressT) 1 << 16) - 1; break;
2080 case 4: mask = ((addressT) 2 << 31) - 1; break;
2081 #ifdef BFD64
2082 case 8: mask = ((addressT) 2 << 63) - 1; break;
2083 #endif
2084 default: abort ();
2085 }
2086
2087 #ifdef BFD64
2088 /* If BFD64, sign extend val for 32bit address mode. */
2089 if (flag_code != CODE_64BIT
2090 || i.prefix[ADDR_PREFIX])
2091 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2092 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2093 #endif
2094
2095 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2096 {
2097 char buf1[40], buf2[40];
2098
2099 sprint_value (buf1, val);
2100 sprint_value (buf2, val & mask);
2101 as_warn (_("%s shortened to %s"), buf1, buf2);
2102 }
2103 return val & mask;
2104 }
2105
2106 enum PREFIX_GROUP
2107 {
2108 PREFIX_EXIST = 0,
2109 PREFIX_LOCK,
2110 PREFIX_REP,
2111 PREFIX_OTHER
2112 };
2113
2114 /* Returns
2115 a. PREFIX_EXIST if attempting to add a prefix where one from the
2116 same class already exists.
2117 b. PREFIX_LOCK if lock prefix is added.
2118 c. PREFIX_REP if rep/repne prefix is added.
2119 d. PREFIX_OTHER if other prefix is added.
2120 */
2121
2122 static enum PREFIX_GROUP
2123 add_prefix (unsigned int prefix)
2124 {
2125 enum PREFIX_GROUP ret = PREFIX_OTHER;
2126 unsigned int q;
2127
2128 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2129 && flag_code == CODE_64BIT)
2130 {
2131 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2132 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2133 && (prefix & (REX_R | REX_X | REX_B))))
2134 ret = PREFIX_EXIST;
2135 q = REX_PREFIX;
2136 }
2137 else
2138 {
2139 switch (prefix)
2140 {
2141 default:
2142 abort ();
2143
2144 case CS_PREFIX_OPCODE:
2145 case DS_PREFIX_OPCODE:
2146 case ES_PREFIX_OPCODE:
2147 case FS_PREFIX_OPCODE:
2148 case GS_PREFIX_OPCODE:
2149 case SS_PREFIX_OPCODE:
2150 q = SEG_PREFIX;
2151 break;
2152
2153 case REPNE_PREFIX_OPCODE:
2154 case REPE_PREFIX_OPCODE:
2155 q = REP_PREFIX;
2156 ret = PREFIX_REP;
2157 break;
2158
2159 case LOCK_PREFIX_OPCODE:
2160 q = LOCK_PREFIX;
2161 ret = PREFIX_LOCK;
2162 break;
2163
2164 case FWAIT_OPCODE:
2165 q = WAIT_PREFIX;
2166 break;
2167
2168 case ADDR_PREFIX_OPCODE:
2169 q = ADDR_PREFIX;
2170 break;
2171
2172 case DATA_PREFIX_OPCODE:
2173 q = DATA_PREFIX;
2174 break;
2175 }
2176 if (i.prefix[q] != 0)
2177 ret = PREFIX_EXIST;
2178 }
2179
2180 if (ret)
2181 {
2182 if (!i.prefix[q])
2183 ++i.prefixes;
2184 i.prefix[q] |= prefix;
2185 }
2186 else
2187 as_bad (_("same type of prefix used twice"));
2188
2189 return ret;
2190 }
2191
2192 static void
2193 update_code_flag (int value, int check)
2194 {
2195 PRINTF_LIKE ((*as_error));
2196
2197 flag_code = (enum flag_code) value;
2198 if (flag_code == CODE_64BIT)
2199 {
2200 cpu_arch_flags.bitfield.cpu64 = 1;
2201 cpu_arch_flags.bitfield.cpuno64 = 0;
2202 }
2203 else
2204 {
2205 cpu_arch_flags.bitfield.cpu64 = 0;
2206 cpu_arch_flags.bitfield.cpuno64 = 1;
2207 }
2208 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2209 {
2210 if (check)
2211 as_error = as_fatal;
2212 else
2213 as_error = as_bad;
2214 (*as_error) (_("64bit mode not supported on `%s'."),
2215 cpu_arch_name ? cpu_arch_name : default_arch);
2216 }
2217 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2218 {
2219 if (check)
2220 as_error = as_fatal;
2221 else
2222 as_error = as_bad;
2223 (*as_error) (_("32bit mode not supported on `%s'."),
2224 cpu_arch_name ? cpu_arch_name : default_arch);
2225 }
2226 stackop_size = '\0';
2227 }
2228
2229 static void
2230 set_code_flag (int value)
2231 {
2232 update_code_flag (value, 0);
2233 }
2234
2235 static void
2236 set_16bit_gcc_code_flag (int new_code_flag)
2237 {
2238 flag_code = (enum flag_code) new_code_flag;
2239 if (flag_code != CODE_16BIT)
2240 abort ();
2241 cpu_arch_flags.bitfield.cpu64 = 0;
2242 cpu_arch_flags.bitfield.cpuno64 = 1;
2243 stackop_size = LONG_MNEM_SUFFIX;
2244 }
2245
2246 static void
2247 set_intel_syntax (int syntax_flag)
2248 {
2249 /* Find out if register prefixing is specified. */
2250 int ask_naked_reg = 0;
2251
2252 SKIP_WHITESPACE ();
2253 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2254 {
2255 char *string = input_line_pointer;
2256 int e = get_symbol_end ();
2257
2258 if (strcmp (string, "prefix") == 0)
2259 ask_naked_reg = 1;
2260 else if (strcmp (string, "noprefix") == 0)
2261 ask_naked_reg = -1;
2262 else
2263 as_bad (_("bad argument to syntax directive."));
2264 *input_line_pointer = e;
2265 }
2266 demand_empty_rest_of_line ();
2267
2268 intel_syntax = syntax_flag;
2269
2270 if (ask_naked_reg == 0)
2271 allow_naked_reg = (intel_syntax
2272 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2273 else
2274 allow_naked_reg = (ask_naked_reg < 0);
2275
2276 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2277
2278 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2279 identifier_chars['$'] = intel_syntax ? '$' : 0;
2280 register_prefix = allow_naked_reg ? "" : "%";
2281 }
2282
2283 static void
2284 set_intel_mnemonic (int mnemonic_flag)
2285 {
2286 intel_mnemonic = mnemonic_flag;
2287 }
2288
2289 static void
2290 set_allow_index_reg (int flag)
2291 {
2292 allow_index_reg = flag;
2293 }
2294
2295 static void
2296 set_check (int what)
2297 {
2298 enum check_kind *kind;
2299 const char *str;
2300
2301 if (what)
2302 {
2303 kind = &operand_check;
2304 str = "operand";
2305 }
2306 else
2307 {
2308 kind = &sse_check;
2309 str = "sse";
2310 }
2311
2312 SKIP_WHITESPACE ();
2313
2314 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2315 {
2316 char *string = input_line_pointer;
2317 int e = get_symbol_end ();
2318
2319 if (strcmp (string, "none") == 0)
2320 *kind = check_none;
2321 else if (strcmp (string, "warning") == 0)
2322 *kind = check_warning;
2323 else if (strcmp (string, "error") == 0)
2324 *kind = check_error;
2325 else
2326 as_bad (_("bad argument to %s_check directive."), str);
2327 *input_line_pointer = e;
2328 }
2329 else
2330 as_bad (_("missing argument for %s_check directive"), str);
2331
2332 demand_empty_rest_of_line ();
2333 }
2334
2335 static void
2336 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2337 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2338 {
2339 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2340 static const char *arch;
2341
2342 /* Intel LIOM is only supported on ELF. */
2343 if (!IS_ELF)
2344 return;
2345
2346 if (!arch)
2347 {
2348 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2349 use default_arch. */
2350 arch = cpu_arch_name;
2351 if (!arch)
2352 arch = default_arch;
2353 }
2354
2355 /* If we are targeting Intel L1OM, we must enable it. */
2356 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2357 || new_flag.bitfield.cpul1om)
2358 return;
2359
2360 /* If we are targeting Intel K1OM, we must enable it. */
2361 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2362 || new_flag.bitfield.cpuk1om)
2363 return;
2364
2365 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2366 #endif
2367 }
2368
2369 static void
2370 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2371 {
2372 SKIP_WHITESPACE ();
2373
2374 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2375 {
2376 char *string = input_line_pointer;
2377 int e = get_symbol_end ();
2378 unsigned int j;
2379 i386_cpu_flags flags;
2380
2381 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2382 {
2383 if (strcmp (string, cpu_arch[j].name) == 0)
2384 {
2385 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2386
2387 if (*string != '.')
2388 {
2389 cpu_arch_name = cpu_arch[j].name;
2390 cpu_sub_arch_name = NULL;
2391 cpu_arch_flags = cpu_arch[j].flags;
2392 if (flag_code == CODE_64BIT)
2393 {
2394 cpu_arch_flags.bitfield.cpu64 = 1;
2395 cpu_arch_flags.bitfield.cpuno64 = 0;
2396 }
2397 else
2398 {
2399 cpu_arch_flags.bitfield.cpu64 = 0;
2400 cpu_arch_flags.bitfield.cpuno64 = 1;
2401 }
2402 cpu_arch_isa = cpu_arch[j].type;
2403 cpu_arch_isa_flags = cpu_arch[j].flags;
2404 if (!cpu_arch_tune_set)
2405 {
2406 cpu_arch_tune = cpu_arch_isa;
2407 cpu_arch_tune_flags = cpu_arch_isa_flags;
2408 }
2409 break;
2410 }
2411
2412 if (!cpu_arch[j].negated)
2413 flags = cpu_flags_or (cpu_arch_flags,
2414 cpu_arch[j].flags);
2415 else
2416 flags = cpu_flags_and_not (cpu_arch_flags,
2417 cpu_arch[j].flags);
2418 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2419 {
2420 if (cpu_sub_arch_name)
2421 {
2422 char *name = cpu_sub_arch_name;
2423 cpu_sub_arch_name = concat (name,
2424 cpu_arch[j].name,
2425 (const char *) NULL);
2426 free (name);
2427 }
2428 else
2429 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2430 cpu_arch_flags = flags;
2431 cpu_arch_isa_flags = flags;
2432 }
2433 *input_line_pointer = e;
2434 demand_empty_rest_of_line ();
2435 return;
2436 }
2437 }
2438 if (j >= ARRAY_SIZE (cpu_arch))
2439 as_bad (_("no such architecture: `%s'"), string);
2440
2441 *input_line_pointer = e;
2442 }
2443 else
2444 as_bad (_("missing cpu architecture"));
2445
2446 no_cond_jump_promotion = 0;
2447 if (*input_line_pointer == ','
2448 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2449 {
2450 char *string = ++input_line_pointer;
2451 int e = get_symbol_end ();
2452
2453 if (strcmp (string, "nojumps") == 0)
2454 no_cond_jump_promotion = 1;
2455 else if (strcmp (string, "jumps") == 0)
2456 ;
2457 else
2458 as_bad (_("no such architecture modifier: `%s'"), string);
2459
2460 *input_line_pointer = e;
2461 }
2462
2463 demand_empty_rest_of_line ();
2464 }
2465
2466 enum bfd_architecture
2467 i386_arch (void)
2468 {
2469 if (cpu_arch_isa == PROCESSOR_L1OM)
2470 {
2471 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2472 || flag_code != CODE_64BIT)
2473 as_fatal (_("Intel L1OM is 64bit ELF only"));
2474 return bfd_arch_l1om;
2475 }
2476 else if (cpu_arch_isa == PROCESSOR_K1OM)
2477 {
2478 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2479 || flag_code != CODE_64BIT)
2480 as_fatal (_("Intel K1OM is 64bit ELF only"));
2481 return bfd_arch_k1om;
2482 }
2483 else
2484 return bfd_arch_i386;
2485 }
2486
2487 unsigned long
2488 i386_mach (void)
2489 {
2490 if (!strncmp (default_arch, "x86_64", 6))
2491 {
2492 if (cpu_arch_isa == PROCESSOR_L1OM)
2493 {
2494 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2495 || default_arch[6] != '\0')
2496 as_fatal (_("Intel L1OM is 64bit ELF only"));
2497 return bfd_mach_l1om;
2498 }
2499 else if (cpu_arch_isa == PROCESSOR_K1OM)
2500 {
2501 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2502 || default_arch[6] != '\0')
2503 as_fatal (_("Intel K1OM is 64bit ELF only"));
2504 return bfd_mach_k1om;
2505 }
2506 else if (default_arch[6] == '\0')
2507 return bfd_mach_x86_64;
2508 else
2509 return bfd_mach_x64_32;
2510 }
2511 else if (!strcmp (default_arch, "i386"))
2512 return bfd_mach_i386_i386;
2513 else
2514 as_fatal (_("unknown architecture"));
2515 }
2516 \f
2517 void
2518 md_begin (void)
2519 {
2520 const char *hash_err;
2521
2522 /* Initialize op_hash hash table. */
2523 op_hash = hash_new ();
2524
2525 {
2526 const insn_template *optab;
2527 templates *core_optab;
2528
2529 /* Setup for loop. */
2530 optab = i386_optab;
2531 core_optab = (templates *) xmalloc (sizeof (templates));
2532 core_optab->start = optab;
2533
2534 while (1)
2535 {
2536 ++optab;
2537 if (optab->name == NULL
2538 || strcmp (optab->name, (optab - 1)->name) != 0)
2539 {
2540 /* different name --> ship out current template list;
2541 add to hash table; & begin anew. */
2542 core_optab->end = optab;
2543 hash_err = hash_insert (op_hash,
2544 (optab - 1)->name,
2545 (void *) core_optab);
2546 if (hash_err)
2547 {
2548 as_fatal (_("can't hash %s: %s"),
2549 (optab - 1)->name,
2550 hash_err);
2551 }
2552 if (optab->name == NULL)
2553 break;
2554 core_optab = (templates *) xmalloc (sizeof (templates));
2555 core_optab->start = optab;
2556 }
2557 }
2558 }
2559
2560 /* Initialize reg_hash hash table. */
2561 reg_hash = hash_new ();
2562 {
2563 const reg_entry *regtab;
2564 unsigned int regtab_size = i386_regtab_size;
2565
2566 for (regtab = i386_regtab; regtab_size--; regtab++)
2567 {
2568 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2569 if (hash_err)
2570 as_fatal (_("can't hash %s: %s"),
2571 regtab->reg_name,
2572 hash_err);
2573 }
2574 }
2575
2576 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2577 {
2578 int c;
2579 char *p;
2580
2581 for (c = 0; c < 256; c++)
2582 {
2583 if (ISDIGIT (c))
2584 {
2585 digit_chars[c] = c;
2586 mnemonic_chars[c] = c;
2587 register_chars[c] = c;
2588 operand_chars[c] = c;
2589 }
2590 else if (ISLOWER (c))
2591 {
2592 mnemonic_chars[c] = c;
2593 register_chars[c] = c;
2594 operand_chars[c] = c;
2595 }
2596 else if (ISUPPER (c))
2597 {
2598 mnemonic_chars[c] = TOLOWER (c);
2599 register_chars[c] = mnemonic_chars[c];
2600 operand_chars[c] = c;
2601 }
2602 else if (c == '{' || c == '}')
2603 operand_chars[c] = c;
2604
2605 if (ISALPHA (c) || ISDIGIT (c))
2606 identifier_chars[c] = c;
2607 else if (c >= 128)
2608 {
2609 identifier_chars[c] = c;
2610 operand_chars[c] = c;
2611 }
2612 }
2613
2614 #ifdef LEX_AT
2615 identifier_chars['@'] = '@';
2616 #endif
2617 #ifdef LEX_QM
2618 identifier_chars['?'] = '?';
2619 operand_chars['?'] = '?';
2620 #endif
2621 digit_chars['-'] = '-';
2622 mnemonic_chars['_'] = '_';
2623 mnemonic_chars['-'] = '-';
2624 mnemonic_chars['.'] = '.';
2625 identifier_chars['_'] = '_';
2626 identifier_chars['.'] = '.';
2627
2628 for (p = operand_special_chars; *p != '\0'; p++)
2629 operand_chars[(unsigned char) *p] = *p;
2630 }
2631
2632 if (flag_code == CODE_64BIT)
2633 {
2634 #if defined (OBJ_COFF) && defined (TE_PE)
2635 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2636 ? 32 : 16);
2637 #else
2638 x86_dwarf2_return_column = 16;
2639 #endif
2640 x86_cie_data_alignment = -8;
2641 }
2642 else
2643 {
2644 x86_dwarf2_return_column = 8;
2645 x86_cie_data_alignment = -4;
2646 }
2647 }
2648
2649 void
2650 i386_print_statistics (FILE *file)
2651 {
2652 hash_print_statistics (file, "i386 opcode", op_hash);
2653 hash_print_statistics (file, "i386 register", reg_hash);
2654 }
2655 \f
2656 #ifdef DEBUG386
2657
2658 /* Debugging routines for md_assemble. */
2659 static void pte (insn_template *);
2660 static void pt (i386_operand_type);
2661 static void pe (expressionS *);
2662 static void ps (symbolS *);
2663
2664 static void
2665 pi (char *line, i386_insn *x)
2666 {
2667 unsigned int j;
2668
2669 fprintf (stdout, "%s: template ", line);
2670 pte (&x->tm);
2671 fprintf (stdout, " address: base %s index %s scale %x\n",
2672 x->base_reg ? x->base_reg->reg_name : "none",
2673 x->index_reg ? x->index_reg->reg_name : "none",
2674 x->log2_scale_factor);
2675 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2676 x->rm.mode, x->rm.reg, x->rm.regmem);
2677 fprintf (stdout, " sib: base %x index %x scale %x\n",
2678 x->sib.base, x->sib.index, x->sib.scale);
2679 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2680 (x->rex & REX_W) != 0,
2681 (x->rex & REX_R) != 0,
2682 (x->rex & REX_X) != 0,
2683 (x->rex & REX_B) != 0);
2684 for (j = 0; j < x->operands; j++)
2685 {
2686 fprintf (stdout, " #%d: ", j + 1);
2687 pt (x->types[j]);
2688 fprintf (stdout, "\n");
2689 if (x->types[j].bitfield.reg8
2690 || x->types[j].bitfield.reg16
2691 || x->types[j].bitfield.reg32
2692 || x->types[j].bitfield.reg64
2693 || x->types[j].bitfield.regmmx
2694 || x->types[j].bitfield.regxmm
2695 || x->types[j].bitfield.regymm
2696 || x->types[j].bitfield.regzmm
2697 || x->types[j].bitfield.sreg2
2698 || x->types[j].bitfield.sreg3
2699 || x->types[j].bitfield.control
2700 || x->types[j].bitfield.debug
2701 || x->types[j].bitfield.test)
2702 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2703 if (operand_type_check (x->types[j], imm))
2704 pe (x->op[j].imms);
2705 if (operand_type_check (x->types[j], disp))
2706 pe (x->op[j].disps);
2707 }
2708 }
2709
2710 static void
2711 pte (insn_template *t)
2712 {
2713 unsigned int j;
2714 fprintf (stdout, " %d operands ", t->operands);
2715 fprintf (stdout, "opcode %x ", t->base_opcode);
2716 if (t->extension_opcode != None)
2717 fprintf (stdout, "ext %x ", t->extension_opcode);
2718 if (t->opcode_modifier.d)
2719 fprintf (stdout, "D");
2720 if (t->opcode_modifier.w)
2721 fprintf (stdout, "W");
2722 fprintf (stdout, "\n");
2723 for (j = 0; j < t->operands; j++)
2724 {
2725 fprintf (stdout, " #%d type ", j + 1);
2726 pt (t->operand_types[j]);
2727 fprintf (stdout, "\n");
2728 }
2729 }
2730
2731 static void
2732 pe (expressionS *e)
2733 {
2734 fprintf (stdout, " operation %d\n", e->X_op);
2735 fprintf (stdout, " add_number %ld (%lx)\n",
2736 (long) e->X_add_number, (long) e->X_add_number);
2737 if (e->X_add_symbol)
2738 {
2739 fprintf (stdout, " add_symbol ");
2740 ps (e->X_add_symbol);
2741 fprintf (stdout, "\n");
2742 }
2743 if (e->X_op_symbol)
2744 {
2745 fprintf (stdout, " op_symbol ");
2746 ps (e->X_op_symbol);
2747 fprintf (stdout, "\n");
2748 }
2749 }
2750
2751 static void
2752 ps (symbolS *s)
2753 {
2754 fprintf (stdout, "%s type %s%s",
2755 S_GET_NAME (s),
2756 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2757 segment_name (S_GET_SEGMENT (s)));
2758 }
2759
2760 static struct type_name
2761 {
2762 i386_operand_type mask;
2763 const char *name;
2764 }
2765 const type_names[] =
2766 {
2767 { OPERAND_TYPE_REG8, "r8" },
2768 { OPERAND_TYPE_REG16, "r16" },
2769 { OPERAND_TYPE_REG32, "r32" },
2770 { OPERAND_TYPE_REG64, "r64" },
2771 { OPERAND_TYPE_IMM8, "i8" },
2772 { OPERAND_TYPE_IMM8, "i8s" },
2773 { OPERAND_TYPE_IMM16, "i16" },
2774 { OPERAND_TYPE_IMM32, "i32" },
2775 { OPERAND_TYPE_IMM32S, "i32s" },
2776 { OPERAND_TYPE_IMM64, "i64" },
2777 { OPERAND_TYPE_IMM1, "i1" },
2778 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2779 { OPERAND_TYPE_DISP8, "d8" },
2780 { OPERAND_TYPE_DISP16, "d16" },
2781 { OPERAND_TYPE_DISP32, "d32" },
2782 { OPERAND_TYPE_DISP32S, "d32s" },
2783 { OPERAND_TYPE_DISP64, "d64" },
2784 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2785 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2786 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2787 { OPERAND_TYPE_CONTROL, "control reg" },
2788 { OPERAND_TYPE_TEST, "test reg" },
2789 { OPERAND_TYPE_DEBUG, "debug reg" },
2790 { OPERAND_TYPE_FLOATREG, "FReg" },
2791 { OPERAND_TYPE_FLOATACC, "FAcc" },
2792 { OPERAND_TYPE_SREG2, "SReg2" },
2793 { OPERAND_TYPE_SREG3, "SReg3" },
2794 { OPERAND_TYPE_ACC, "Acc" },
2795 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2796 { OPERAND_TYPE_REGMMX, "rMMX" },
2797 { OPERAND_TYPE_REGXMM, "rXMM" },
2798 { OPERAND_TYPE_REGYMM, "rYMM" },
2799 { OPERAND_TYPE_REGZMM, "rZMM" },
2800 { OPERAND_TYPE_REGMASK, "Mask reg" },
2801 { OPERAND_TYPE_ESSEG, "es" },
2802 };
2803
2804 static void
2805 pt (i386_operand_type t)
2806 {
2807 unsigned int j;
2808 i386_operand_type a;
2809
2810 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2811 {
2812 a = operand_type_and (t, type_names[j].mask);
2813 if (!operand_type_all_zero (&a))
2814 fprintf (stdout, "%s, ", type_names[j].name);
2815 }
2816 fflush (stdout);
2817 }
2818
2819 #endif /* DEBUG386 */
2820 \f
2821 static bfd_reloc_code_real_type
2822 reloc (unsigned int size,
2823 int pcrel,
2824 int sign,
2825 int bnd_prefix,
2826 bfd_reloc_code_real_type other)
2827 {
2828 if (other != NO_RELOC)
2829 {
2830 reloc_howto_type *rel;
2831
2832 if (size == 8)
2833 switch (other)
2834 {
2835 case BFD_RELOC_X86_64_GOT32:
2836 return BFD_RELOC_X86_64_GOT64;
2837 break;
2838 case BFD_RELOC_X86_64_GOTPLT64:
2839 return BFD_RELOC_X86_64_GOTPLT64;
2840 break;
2841 case BFD_RELOC_X86_64_PLTOFF64:
2842 return BFD_RELOC_X86_64_PLTOFF64;
2843 break;
2844 case BFD_RELOC_X86_64_GOTPC32:
2845 other = BFD_RELOC_X86_64_GOTPC64;
2846 break;
2847 case BFD_RELOC_X86_64_GOTPCREL:
2848 other = BFD_RELOC_X86_64_GOTPCREL64;
2849 break;
2850 case BFD_RELOC_X86_64_TPOFF32:
2851 other = BFD_RELOC_X86_64_TPOFF64;
2852 break;
2853 case BFD_RELOC_X86_64_DTPOFF32:
2854 other = BFD_RELOC_X86_64_DTPOFF64;
2855 break;
2856 default:
2857 break;
2858 }
2859
2860 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2861 if (other == BFD_RELOC_SIZE32)
2862 {
2863 if (size == 8)
2864 other = BFD_RELOC_SIZE64;
2865 if (pcrel)
2866 {
2867 as_bad (_("there are no pc-relative size relocations"));
2868 return NO_RELOC;
2869 }
2870 }
2871 #endif
2872
2873 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2874 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2875 sign = -1;
2876
2877 rel = bfd_reloc_type_lookup (stdoutput, other);
2878 if (!rel)
2879 as_bad (_("unknown relocation (%u)"), other);
2880 else if (size != bfd_get_reloc_size (rel))
2881 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2882 bfd_get_reloc_size (rel),
2883 size);
2884 else if (pcrel && !rel->pc_relative)
2885 as_bad (_("non-pc-relative relocation for pc-relative field"));
2886 else if ((rel->complain_on_overflow == complain_overflow_signed
2887 && !sign)
2888 || (rel->complain_on_overflow == complain_overflow_unsigned
2889 && sign > 0))
2890 as_bad (_("relocated field and relocation type differ in signedness"));
2891 else
2892 return other;
2893 return NO_RELOC;
2894 }
2895
2896 if (pcrel)
2897 {
2898 if (!sign)
2899 as_bad (_("there are no unsigned pc-relative relocations"));
2900 switch (size)
2901 {
2902 case 1: return BFD_RELOC_8_PCREL;
2903 case 2: return BFD_RELOC_16_PCREL;
2904 case 4: return (bnd_prefix && object_64bit
2905 ? BFD_RELOC_X86_64_PC32_BND
2906 : BFD_RELOC_32_PCREL);
2907 case 8: return BFD_RELOC_64_PCREL;
2908 }
2909 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2910 }
2911 else
2912 {
2913 if (sign > 0)
2914 switch (size)
2915 {
2916 case 4: return BFD_RELOC_X86_64_32S;
2917 }
2918 else
2919 switch (size)
2920 {
2921 case 1: return BFD_RELOC_8;
2922 case 2: return BFD_RELOC_16;
2923 case 4: return BFD_RELOC_32;
2924 case 8: return BFD_RELOC_64;
2925 }
2926 as_bad (_("cannot do %s %u byte relocation"),
2927 sign > 0 ? "signed" : "unsigned", size);
2928 }
2929
2930 return NO_RELOC;
2931 }
2932
2933 /* Here we decide which fixups can be adjusted to make them relative to
2934 the beginning of the section instead of the symbol. Basically we need
2935 to make sure that the dynamic relocations are done correctly, so in
2936 some cases we force the original symbol to be used. */
2937
2938 int
2939 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2940 {
2941 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2942 if (!IS_ELF)
2943 return 1;
2944
2945 /* Don't adjust pc-relative references to merge sections in 64-bit
2946 mode. */
2947 if (use_rela_relocations
2948 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2949 && fixP->fx_pcrel)
2950 return 0;
2951
2952 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2953 and changed later by validate_fix. */
2954 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2955 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2956 return 0;
2957
2958 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2959 for size relocations. */
2960 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2961 || fixP->fx_r_type == BFD_RELOC_SIZE64
2962 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2963 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2964 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2965 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2966 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2967 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2968 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2969 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2970 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2971 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2972 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2973 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2974 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2975 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2976 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2977 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2978 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2979 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2980 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2981 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2982 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2983 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2984 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2985 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2986 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2987 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2988 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2989 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2990 return 0;
2991 #endif
2992 return 1;
2993 }
2994
2995 static int
2996 intel_float_operand (const char *mnemonic)
2997 {
2998 /* Note that the value returned is meaningful only for opcodes with (memory)
2999 operands, hence the code here is free to improperly handle opcodes that
3000 have no operands (for better performance and smaller code). */
3001
3002 if (mnemonic[0] != 'f')
3003 return 0; /* non-math */
3004
3005 switch (mnemonic[1])
3006 {
3007 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3008 the fs segment override prefix not currently handled because no
3009 call path can make opcodes without operands get here */
3010 case 'i':
3011 return 2 /* integer op */;
3012 case 'l':
3013 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3014 return 3; /* fldcw/fldenv */
3015 break;
3016 case 'n':
3017 if (mnemonic[2] != 'o' /* fnop */)
3018 return 3; /* non-waiting control op */
3019 break;
3020 case 'r':
3021 if (mnemonic[2] == 's')
3022 return 3; /* frstor/frstpm */
3023 break;
3024 case 's':
3025 if (mnemonic[2] == 'a')
3026 return 3; /* fsave */
3027 if (mnemonic[2] == 't')
3028 {
3029 switch (mnemonic[3])
3030 {
3031 case 'c': /* fstcw */
3032 case 'd': /* fstdw */
3033 case 'e': /* fstenv */
3034 case 's': /* fsts[gw] */
3035 return 3;
3036 }
3037 }
3038 break;
3039 case 'x':
3040 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3041 return 0; /* fxsave/fxrstor are not really math ops */
3042 break;
3043 }
3044
3045 return 1;
3046 }
3047
3048 /* Build the VEX prefix. */
3049
3050 static void
3051 build_vex_prefix (const insn_template *t)
3052 {
3053 unsigned int register_specifier;
3054 unsigned int implied_prefix;
3055 unsigned int vector_length;
3056
3057 /* Check register specifier. */
3058 if (i.vex.register_specifier)
3059 {
3060 register_specifier =
3061 ~register_number (i.vex.register_specifier) & 0xf;
3062 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3063 }
3064 else
3065 register_specifier = 0xf;
3066
3067 /* Use 2-byte VEX prefix by swappping destination and source
3068 operand. */
3069 if (!i.swap_operand
3070 && i.operands == i.reg_operands
3071 && i.tm.opcode_modifier.vexopcode == VEX0F
3072 && i.tm.opcode_modifier.s
3073 && i.rex == REX_B)
3074 {
3075 unsigned int xchg = i.operands - 1;
3076 union i386_op temp_op;
3077 i386_operand_type temp_type;
3078
3079 temp_type = i.types[xchg];
3080 i.types[xchg] = i.types[0];
3081 i.types[0] = temp_type;
3082 temp_op = i.op[xchg];
3083 i.op[xchg] = i.op[0];
3084 i.op[0] = temp_op;
3085
3086 gas_assert (i.rm.mode == 3);
3087
3088 i.rex = REX_R;
3089 xchg = i.rm.regmem;
3090 i.rm.regmem = i.rm.reg;
3091 i.rm.reg = xchg;
3092
3093 /* Use the next insn. */
3094 i.tm = t[1];
3095 }
3096
3097 if (i.tm.opcode_modifier.vex == VEXScalar)
3098 vector_length = avxscalar;
3099 else
3100 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3101
3102 switch ((i.tm.base_opcode >> 8) & 0xff)
3103 {
3104 case 0:
3105 implied_prefix = 0;
3106 break;
3107 case DATA_PREFIX_OPCODE:
3108 implied_prefix = 1;
3109 break;
3110 case REPE_PREFIX_OPCODE:
3111 implied_prefix = 2;
3112 break;
3113 case REPNE_PREFIX_OPCODE:
3114 implied_prefix = 3;
3115 break;
3116 default:
3117 abort ();
3118 }
3119
3120 /* Use 2-byte VEX prefix if possible. */
3121 if (i.tm.opcode_modifier.vexopcode == VEX0F
3122 && i.tm.opcode_modifier.vexw != VEXW1
3123 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3124 {
3125 /* 2-byte VEX prefix. */
3126 unsigned int r;
3127
3128 i.vex.length = 2;
3129 i.vex.bytes[0] = 0xc5;
3130
3131 /* Check the REX.R bit. */
3132 r = (i.rex & REX_R) ? 0 : 1;
3133 i.vex.bytes[1] = (r << 7
3134 | register_specifier << 3
3135 | vector_length << 2
3136 | implied_prefix);
3137 }
3138 else
3139 {
3140 /* 3-byte VEX prefix. */
3141 unsigned int m, w;
3142
3143 i.vex.length = 3;
3144
3145 switch (i.tm.opcode_modifier.vexopcode)
3146 {
3147 case VEX0F:
3148 m = 0x1;
3149 i.vex.bytes[0] = 0xc4;
3150 break;
3151 case VEX0F38:
3152 m = 0x2;
3153 i.vex.bytes[0] = 0xc4;
3154 break;
3155 case VEX0F3A:
3156 m = 0x3;
3157 i.vex.bytes[0] = 0xc4;
3158 break;
3159 case XOP08:
3160 m = 0x8;
3161 i.vex.bytes[0] = 0x8f;
3162 break;
3163 case XOP09:
3164 m = 0x9;
3165 i.vex.bytes[0] = 0x8f;
3166 break;
3167 case XOP0A:
3168 m = 0xa;
3169 i.vex.bytes[0] = 0x8f;
3170 break;
3171 default:
3172 abort ();
3173 }
3174
3175 /* The high 3 bits of the second VEX byte are 1's compliment
3176 of RXB bits from REX. */
3177 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3178
3179 /* Check the REX.W bit. */
3180 w = (i.rex & REX_W) ? 1 : 0;
3181 if (i.tm.opcode_modifier.vexw == VEXW1)
3182 w = 1;
3183
3184 i.vex.bytes[2] = (w << 7
3185 | register_specifier << 3
3186 | vector_length << 2
3187 | implied_prefix);
3188 }
3189 }
3190
3191 /* Build the EVEX prefix. */
3192
3193 static void
3194 build_evex_prefix (void)
3195 {
3196 unsigned int register_specifier;
3197 unsigned int implied_prefix;
3198 unsigned int m, w;
3199 rex_byte vrex_used = 0;
3200
3201 /* Check register specifier. */
3202 if (i.vex.register_specifier)
3203 {
3204 gas_assert ((i.vrex & REX_X) == 0);
3205
3206 register_specifier = i.vex.register_specifier->reg_num;
3207 if ((i.vex.register_specifier->reg_flags & RegRex))
3208 register_specifier += 8;
3209 /* The upper 16 registers are encoded in the fourth byte of the
3210 EVEX prefix. */
3211 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3212 i.vex.bytes[3] = 0x8;
3213 register_specifier = ~register_specifier & 0xf;
3214 }
3215 else
3216 {
3217 register_specifier = 0xf;
3218
3219 /* Encode upper 16 vector index register in the fourth byte of
3220 the EVEX prefix. */
3221 if (!(i.vrex & REX_X))
3222 i.vex.bytes[3] = 0x8;
3223 else
3224 vrex_used |= REX_X;
3225 }
3226
3227 switch ((i.tm.base_opcode >> 8) & 0xff)
3228 {
3229 case 0:
3230 implied_prefix = 0;
3231 break;
3232 case DATA_PREFIX_OPCODE:
3233 implied_prefix = 1;
3234 break;
3235 case REPE_PREFIX_OPCODE:
3236 implied_prefix = 2;
3237 break;
3238 case REPNE_PREFIX_OPCODE:
3239 implied_prefix = 3;
3240 break;
3241 default:
3242 abort ();
3243 }
3244
3245 /* 4 byte EVEX prefix. */
3246 i.vex.length = 4;
3247 i.vex.bytes[0] = 0x62;
3248
3249 /* mmmm bits. */
3250 switch (i.tm.opcode_modifier.vexopcode)
3251 {
3252 case VEX0F:
3253 m = 1;
3254 break;
3255 case VEX0F38:
3256 m = 2;
3257 break;
3258 case VEX0F3A:
3259 m = 3;
3260 break;
3261 default:
3262 abort ();
3263 break;
3264 }
3265
3266 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3267 bits from REX. */
3268 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3269
3270 /* The fifth bit of the second EVEX byte is 1's compliment of the
3271 REX_R bit in VREX. */
3272 if (!(i.vrex & REX_R))
3273 i.vex.bytes[1] |= 0x10;
3274 else
3275 vrex_used |= REX_R;
3276
3277 if ((i.reg_operands + i.imm_operands) == i.operands)
3278 {
3279 /* When all operands are registers, the REX_X bit in REX is not
3280 used. We reuse it to encode the upper 16 registers, which is
3281 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3282 as 1's compliment. */
3283 if ((i.vrex & REX_B))
3284 {
3285 vrex_used |= REX_B;
3286 i.vex.bytes[1] &= ~0x40;
3287 }
3288 }
3289
3290 /* EVEX instructions shouldn't need the REX prefix. */
3291 i.vrex &= ~vrex_used;
3292 gas_assert (i.vrex == 0);
3293
3294 /* Check the REX.W bit. */
3295 w = (i.rex & REX_W) ? 1 : 0;
3296 if (i.tm.opcode_modifier.vexw)
3297 {
3298 if (i.tm.opcode_modifier.vexw == VEXW1)
3299 w = 1;
3300 }
3301 /* If w is not set it means we are dealing with WIG instruction. */
3302 else if (!w)
3303 {
3304 if (evexwig == evexw1)
3305 w = 1;
3306 }
3307
3308 /* Encode the U bit. */
3309 implied_prefix |= 0x4;
3310
3311 /* The third byte of the EVEX prefix. */
3312 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3313
3314 /* The fourth byte of the EVEX prefix. */
3315 /* The zeroing-masking bit. */
3316 if (i.mask && i.mask->zeroing)
3317 i.vex.bytes[3] |= 0x80;
3318
3319 /* Don't always set the broadcast bit if there is no RC. */
3320 if (!i.rounding)
3321 {
3322 /* Encode the vector length. */
3323 unsigned int vec_length;
3324
3325 switch (i.tm.opcode_modifier.evex)
3326 {
3327 case EVEXLIG: /* LL' is ignored */
3328 vec_length = evexlig << 5;
3329 break;
3330 case EVEX128:
3331 vec_length = 0 << 5;
3332 break;
3333 case EVEX256:
3334 vec_length = 1 << 5;
3335 break;
3336 case EVEX512:
3337 vec_length = 2 << 5;
3338 break;
3339 default:
3340 abort ();
3341 break;
3342 }
3343 i.vex.bytes[3] |= vec_length;
3344 /* Encode the broadcast bit. */
3345 if (i.broadcast)
3346 i.vex.bytes[3] |= 0x10;
3347 }
3348 else
3349 {
3350 if (i.rounding->type != saeonly)
3351 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3352 else
3353 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3354 }
3355
3356 if (i.mask && i.mask->mask)
3357 i.vex.bytes[3] |= i.mask->mask->reg_num;
3358 }
3359
3360 static void
3361 process_immext (void)
3362 {
3363 expressionS *exp;
3364
3365 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3366 && i.operands > 0)
3367 {
3368 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3369 with an opcode suffix which is coded in the same place as an
3370 8-bit immediate field would be.
3371 Here we check those operands and remove them afterwards. */
3372 unsigned int x;
3373
3374 for (x = 0; x < i.operands; x++)
3375 if (register_number (i.op[x].regs) != x)
3376 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3377 register_prefix, i.op[x].regs->reg_name, x + 1,
3378 i.tm.name);
3379
3380 i.operands = 0;
3381 }
3382
3383 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3384 which is coded in the same place as an 8-bit immediate field
3385 would be. Here we fake an 8-bit immediate operand from the
3386 opcode suffix stored in tm.extension_opcode.
3387
3388 AVX instructions also use this encoding, for some of
3389 3 argument instructions. */
3390
3391 gas_assert (i.imm_operands <= 1
3392 && (i.operands <= 2
3393 || ((i.tm.opcode_modifier.vex
3394 || i.tm.opcode_modifier.evex)
3395 && i.operands <= 4)));
3396
3397 exp = &im_expressions[i.imm_operands++];
3398 i.op[i.operands].imms = exp;
3399 i.types[i.operands] = imm8;
3400 i.operands++;
3401 exp->X_op = O_constant;
3402 exp->X_add_number = i.tm.extension_opcode;
3403 i.tm.extension_opcode = None;
3404 }
3405
3406
3407 static int
3408 check_hle (void)
3409 {
3410 switch (i.tm.opcode_modifier.hleprefixok)
3411 {
3412 default:
3413 abort ();
3414 case HLEPrefixNone:
3415 as_bad (_("invalid instruction `%s' after `%s'"),
3416 i.tm.name, i.hle_prefix);
3417 return 0;
3418 case HLEPrefixLock:
3419 if (i.prefix[LOCK_PREFIX])
3420 return 1;
3421 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3422 return 0;
3423 case HLEPrefixAny:
3424 return 1;
3425 case HLEPrefixRelease:
3426 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3427 {
3428 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3429 i.tm.name);
3430 return 0;
3431 }
3432 if (i.mem_operands == 0
3433 || !operand_type_check (i.types[i.operands - 1], anymem))
3434 {
3435 as_bad (_("memory destination needed for instruction `%s'"
3436 " after `xrelease'"), i.tm.name);
3437 return 0;
3438 }
3439 return 1;
3440 }
3441 }
3442
3443 /* This is the guts of the machine-dependent assembler. LINE points to a
3444 machine dependent instruction. This function is supposed to emit
3445 the frags/bytes it assembles to. */
3446
3447 void
3448 md_assemble (char *line)
3449 {
3450 unsigned int j;
3451 char mnemonic[MAX_MNEM_SIZE];
3452 const insn_template *t;
3453
3454 /* Initialize globals. */
3455 memset (&i, '\0', sizeof (i));
3456 for (j = 0; j < MAX_OPERANDS; j++)
3457 i.reloc[j] = NO_RELOC;
3458 memset (disp_expressions, '\0', sizeof (disp_expressions));
3459 memset (im_expressions, '\0', sizeof (im_expressions));
3460 save_stack_p = save_stack;
3461
3462 /* First parse an instruction mnemonic & call i386_operand for the operands.
3463 We assume that the scrubber has arranged it so that line[0] is the valid
3464 start of a (possibly prefixed) mnemonic. */
3465
3466 line = parse_insn (line, mnemonic);
3467 if (line == NULL)
3468 return;
3469
3470 line = parse_operands (line, mnemonic);
3471 this_operand = -1;
3472 if (line == NULL)
3473 return;
3474
3475 /* Now we've parsed the mnemonic into a set of templates, and have the
3476 operands at hand. */
3477
3478 /* All intel opcodes have reversed operands except for "bound" and
3479 "enter". We also don't reverse intersegment "jmp" and "call"
3480 instructions with 2 immediate operands so that the immediate segment
3481 precedes the offset, as it does when in AT&T mode. */
3482 if (intel_syntax
3483 && i.operands > 1
3484 && (strcmp (mnemonic, "bound") != 0)
3485 && (strcmp (mnemonic, "invlpga") != 0)
3486 && !(operand_type_check (i.types[0], imm)
3487 && operand_type_check (i.types[1], imm)))
3488 swap_operands ();
3489
3490 /* The order of the immediates should be reversed
3491 for 2 immediates extrq and insertq instructions */
3492 if (i.imm_operands == 2
3493 && (strcmp (mnemonic, "extrq") == 0
3494 || strcmp (mnemonic, "insertq") == 0))
3495 swap_2_operands (0, 1);
3496
3497 if (i.imm_operands)
3498 optimize_imm ();
3499
3500 /* Don't optimize displacement for movabs since it only takes 64bit
3501 displacement. */
3502 if (i.disp_operands
3503 && i.disp_encoding != disp_encoding_32bit
3504 && (flag_code != CODE_64BIT
3505 || strcmp (mnemonic, "movabs") != 0))
3506 optimize_disp ();
3507
3508 /* Next, we find a template that matches the given insn,
3509 making sure the overlap of the given operands types is consistent
3510 with the template operand types. */
3511
3512 if (!(t = match_template ()))
3513 return;
3514
3515 if (sse_check != check_none
3516 && !i.tm.opcode_modifier.noavx
3517 && (i.tm.cpu_flags.bitfield.cpusse
3518 || i.tm.cpu_flags.bitfield.cpusse2
3519 || i.tm.cpu_flags.bitfield.cpusse3
3520 || i.tm.cpu_flags.bitfield.cpussse3
3521 || i.tm.cpu_flags.bitfield.cpusse4_1
3522 || i.tm.cpu_flags.bitfield.cpusse4_2))
3523 {
3524 (sse_check == check_warning
3525 ? as_warn
3526 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3527 }
3528
3529 /* Zap movzx and movsx suffix. The suffix has been set from
3530 "word ptr" or "byte ptr" on the source operand in Intel syntax
3531 or extracted from mnemonic in AT&T syntax. But we'll use
3532 the destination register to choose the suffix for encoding. */
3533 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3534 {
3535 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3536 there is no suffix, the default will be byte extension. */
3537 if (i.reg_operands != 2
3538 && !i.suffix
3539 && intel_syntax)
3540 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3541
3542 i.suffix = 0;
3543 }
3544
3545 if (i.tm.opcode_modifier.fwait)
3546 if (!add_prefix (FWAIT_OPCODE))
3547 return;
3548
3549 /* Check if REP prefix is OK. */
3550 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3551 {
3552 as_bad (_("invalid instruction `%s' after `%s'"),
3553 i.tm.name, i.rep_prefix);
3554 return;
3555 }
3556
3557 /* Check for lock without a lockable instruction. Destination operand
3558 must be memory unless it is xchg (0x86). */
3559 if (i.prefix[LOCK_PREFIX]
3560 && (!i.tm.opcode_modifier.islockable
3561 || i.mem_operands == 0
3562 || (i.tm.base_opcode != 0x86
3563 && !operand_type_check (i.types[i.operands - 1], anymem))))
3564 {
3565 as_bad (_("expecting lockable instruction after `lock'"));
3566 return;
3567 }
3568
3569 /* Check if HLE prefix is OK. */
3570 if (i.hle_prefix && !check_hle ())
3571 return;
3572
3573 /* Check BND prefix. */
3574 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3575 as_bad (_("expecting valid branch instruction after `bnd'"));
3576
3577 if (i.tm.cpu_flags.bitfield.cpumpx
3578 && flag_code == CODE_64BIT
3579 && i.prefix[ADDR_PREFIX])
3580 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3581
3582 /* Insert BND prefix. */
3583 if (add_bnd_prefix
3584 && i.tm.opcode_modifier.bndprefixok
3585 && !i.prefix[BND_PREFIX])
3586 add_prefix (BND_PREFIX_OPCODE);
3587
3588 /* Check string instruction segment overrides. */
3589 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3590 {
3591 if (!check_string ())
3592 return;
3593 i.disp_operands = 0;
3594 }
3595
3596 if (!process_suffix ())
3597 return;
3598
3599 /* Update operand types. */
3600 for (j = 0; j < i.operands; j++)
3601 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3602
3603 /* Make still unresolved immediate matches conform to size of immediate
3604 given in i.suffix. */
3605 if (!finalize_imm ())
3606 return;
3607
3608 if (i.types[0].bitfield.imm1)
3609 i.imm_operands = 0; /* kludge for shift insns. */
3610
3611 /* We only need to check those implicit registers for instructions
3612 with 3 operands or less. */
3613 if (i.operands <= 3)
3614 for (j = 0; j < i.operands; j++)
3615 if (i.types[j].bitfield.inoutportreg
3616 || i.types[j].bitfield.shiftcount
3617 || i.types[j].bitfield.acc
3618 || i.types[j].bitfield.floatacc)
3619 i.reg_operands--;
3620
3621 /* ImmExt should be processed after SSE2AVX. */
3622 if (!i.tm.opcode_modifier.sse2avx
3623 && i.tm.opcode_modifier.immext)
3624 process_immext ();
3625
3626 /* For insns with operands there are more diddles to do to the opcode. */
3627 if (i.operands)
3628 {
3629 if (!process_operands ())
3630 return;
3631 }
3632 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3633 {
3634 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3635 as_warn (_("translating to `%sp'"), i.tm.name);
3636 }
3637
3638 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3639 {
3640 if (flag_code == CODE_16BIT)
3641 {
3642 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3643 i.tm.name);
3644 return;
3645 }
3646
3647 if (i.tm.opcode_modifier.vex)
3648 build_vex_prefix (t);
3649 else
3650 build_evex_prefix ();
3651 }
3652
3653 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3654 instructions may define INT_OPCODE as well, so avoid this corner
3655 case for those instructions that use MODRM. */
3656 if (i.tm.base_opcode == INT_OPCODE
3657 && !i.tm.opcode_modifier.modrm
3658 && i.op[0].imms->X_add_number == 3)
3659 {
3660 i.tm.base_opcode = INT3_OPCODE;
3661 i.imm_operands = 0;
3662 }
3663
3664 if ((i.tm.opcode_modifier.jump
3665 || i.tm.opcode_modifier.jumpbyte
3666 || i.tm.opcode_modifier.jumpdword)
3667 && i.op[0].disps->X_op == O_constant)
3668 {
3669 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3670 the absolute address given by the constant. Since ix86 jumps and
3671 calls are pc relative, we need to generate a reloc. */
3672 i.op[0].disps->X_add_symbol = &abs_symbol;
3673 i.op[0].disps->X_op = O_symbol;
3674 }
3675
3676 if (i.tm.opcode_modifier.rex64)
3677 i.rex |= REX_W;
3678
3679 /* For 8 bit registers we need an empty rex prefix. Also if the
3680 instruction already has a prefix, we need to convert old
3681 registers to new ones. */
3682
3683 if ((i.types[0].bitfield.reg8
3684 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3685 || (i.types[1].bitfield.reg8
3686 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3687 || ((i.types[0].bitfield.reg8
3688 || i.types[1].bitfield.reg8)
3689 && i.rex != 0))
3690 {
3691 int x;
3692
3693 i.rex |= REX_OPCODE;
3694 for (x = 0; x < 2; x++)
3695 {
3696 /* Look for 8 bit operand that uses old registers. */
3697 if (i.types[x].bitfield.reg8
3698 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3699 {
3700 /* In case it is "hi" register, give up. */
3701 if (i.op[x].regs->reg_num > 3)
3702 as_bad (_("can't encode register '%s%s' in an "
3703 "instruction requiring REX prefix."),
3704 register_prefix, i.op[x].regs->reg_name);
3705
3706 /* Otherwise it is equivalent to the extended register.
3707 Since the encoding doesn't change this is merely
3708 cosmetic cleanup for debug output. */
3709
3710 i.op[x].regs = i.op[x].regs + 8;
3711 }
3712 }
3713 }
3714
3715 if (i.rex != 0)
3716 add_prefix (REX_OPCODE | i.rex);
3717
3718 /* We are ready to output the insn. */
3719 output_insn ();
3720 }
3721
3722 static char *
3723 parse_insn (char *line, char *mnemonic)
3724 {
3725 char *l = line;
3726 char *token_start = l;
3727 char *mnem_p;
3728 int supported;
3729 const insn_template *t;
3730 char *dot_p = NULL;
3731
3732 while (1)
3733 {
3734 mnem_p = mnemonic;
3735 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3736 {
3737 if (*mnem_p == '.')
3738 dot_p = mnem_p;
3739 mnem_p++;
3740 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3741 {
3742 as_bad (_("no such instruction: `%s'"), token_start);
3743 return NULL;
3744 }
3745 l++;
3746 }
3747 if (!is_space_char (*l)
3748 && *l != END_OF_INSN
3749 && (intel_syntax
3750 || (*l != PREFIX_SEPARATOR
3751 && *l != ',')))
3752 {
3753 as_bad (_("invalid character %s in mnemonic"),
3754 output_invalid (*l));
3755 return NULL;
3756 }
3757 if (token_start == l)
3758 {
3759 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3760 as_bad (_("expecting prefix; got nothing"));
3761 else
3762 as_bad (_("expecting mnemonic; got nothing"));
3763 return NULL;
3764 }
3765
3766 /* Look up instruction (or prefix) via hash table. */
3767 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3768
3769 if (*l != END_OF_INSN
3770 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3771 && current_templates
3772 && current_templates->start->opcode_modifier.isprefix)
3773 {
3774 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3775 {
3776 as_bad ((flag_code != CODE_64BIT
3777 ? _("`%s' is only supported in 64-bit mode")
3778 : _("`%s' is not supported in 64-bit mode")),
3779 current_templates->start->name);
3780 return NULL;
3781 }
3782 /* If we are in 16-bit mode, do not allow addr16 or data16.
3783 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3784 if ((current_templates->start->opcode_modifier.size16
3785 || current_templates->start->opcode_modifier.size32)
3786 && flag_code != CODE_64BIT
3787 && (current_templates->start->opcode_modifier.size32
3788 ^ (flag_code == CODE_16BIT)))
3789 {
3790 as_bad (_("redundant %s prefix"),
3791 current_templates->start->name);
3792 return NULL;
3793 }
3794 /* Add prefix, checking for repeated prefixes. */
3795 switch (add_prefix (current_templates->start->base_opcode))
3796 {
3797 case PREFIX_EXIST:
3798 return NULL;
3799 case PREFIX_REP:
3800 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3801 i.hle_prefix = current_templates->start->name;
3802 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3803 i.bnd_prefix = current_templates->start->name;
3804 else
3805 i.rep_prefix = current_templates->start->name;
3806 break;
3807 default:
3808 break;
3809 }
3810 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3811 token_start = ++l;
3812 }
3813 else
3814 break;
3815 }
3816
3817 if (!current_templates)
3818 {
3819 /* Check if we should swap operand or force 32bit displacement in
3820 encoding. */
3821 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3822 i.swap_operand = 1;
3823 else if (mnem_p - 3 == dot_p
3824 && dot_p[1] == 'd'
3825 && dot_p[2] == '8')
3826 i.disp_encoding = disp_encoding_8bit;
3827 else if (mnem_p - 4 == dot_p
3828 && dot_p[1] == 'd'
3829 && dot_p[2] == '3'
3830 && dot_p[3] == '2')
3831 i.disp_encoding = disp_encoding_32bit;
3832 else
3833 goto check_suffix;
3834 mnem_p = dot_p;
3835 *dot_p = '\0';
3836 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3837 }
3838
3839 if (!current_templates)
3840 {
3841 check_suffix:
3842 /* See if we can get a match by trimming off a suffix. */
3843 switch (mnem_p[-1])
3844 {
3845 case WORD_MNEM_SUFFIX:
3846 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3847 i.suffix = SHORT_MNEM_SUFFIX;
3848 else
3849 case BYTE_MNEM_SUFFIX:
3850 case QWORD_MNEM_SUFFIX:
3851 i.suffix = mnem_p[-1];
3852 mnem_p[-1] = '\0';
3853 current_templates = (const templates *) hash_find (op_hash,
3854 mnemonic);
3855 break;
3856 case SHORT_MNEM_SUFFIX:
3857 case LONG_MNEM_SUFFIX:
3858 if (!intel_syntax)
3859 {
3860 i.suffix = mnem_p[-1];
3861 mnem_p[-1] = '\0';
3862 current_templates = (const templates *) hash_find (op_hash,
3863 mnemonic);
3864 }
3865 break;
3866
3867 /* Intel Syntax. */
3868 case 'd':
3869 if (intel_syntax)
3870 {
3871 if (intel_float_operand (mnemonic) == 1)
3872 i.suffix = SHORT_MNEM_SUFFIX;
3873 else
3874 i.suffix = LONG_MNEM_SUFFIX;
3875 mnem_p[-1] = '\0';
3876 current_templates = (const templates *) hash_find (op_hash,
3877 mnemonic);
3878 }
3879 break;
3880 }
3881 if (!current_templates)
3882 {
3883 as_bad (_("no such instruction: `%s'"), token_start);
3884 return NULL;
3885 }
3886 }
3887
3888 if (current_templates->start->opcode_modifier.jump
3889 || current_templates->start->opcode_modifier.jumpbyte)
3890 {
3891 /* Check for a branch hint. We allow ",pt" and ",pn" for
3892 predict taken and predict not taken respectively.
3893 I'm not sure that branch hints actually do anything on loop
3894 and jcxz insns (JumpByte) for current Pentium4 chips. They
3895 may work in the future and it doesn't hurt to accept them
3896 now. */
3897 if (l[0] == ',' && l[1] == 'p')
3898 {
3899 if (l[2] == 't')
3900 {
3901 if (!add_prefix (DS_PREFIX_OPCODE))
3902 return NULL;
3903 l += 3;
3904 }
3905 else if (l[2] == 'n')
3906 {
3907 if (!add_prefix (CS_PREFIX_OPCODE))
3908 return NULL;
3909 l += 3;
3910 }
3911 }
3912 }
3913 /* Any other comma loses. */
3914 if (*l == ',')
3915 {
3916 as_bad (_("invalid character %s in mnemonic"),
3917 output_invalid (*l));
3918 return NULL;
3919 }
3920
3921 /* Check if instruction is supported on specified architecture. */
3922 supported = 0;
3923 for (t = current_templates->start; t < current_templates->end; ++t)
3924 {
3925 supported |= cpu_flags_match (t);
3926 if (supported == CPU_FLAGS_PERFECT_MATCH)
3927 goto skip;
3928 }
3929
3930 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3931 {
3932 as_bad (flag_code == CODE_64BIT
3933 ? _("`%s' is not supported in 64-bit mode")
3934 : _("`%s' is only supported in 64-bit mode"),
3935 current_templates->start->name);
3936 return NULL;
3937 }
3938 if (supported != CPU_FLAGS_PERFECT_MATCH)
3939 {
3940 as_bad (_("`%s' is not supported on `%s%s'"),
3941 current_templates->start->name,
3942 cpu_arch_name ? cpu_arch_name : default_arch,
3943 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3944 return NULL;
3945 }
3946
3947 skip:
3948 if (!cpu_arch_flags.bitfield.cpui386
3949 && (flag_code != CODE_16BIT))
3950 {
3951 as_warn (_("use .code16 to ensure correct addressing mode"));
3952 }
3953
3954 return l;
3955 }
3956
3957 static char *
3958 parse_operands (char *l, const char *mnemonic)
3959 {
3960 char *token_start;
3961
3962 /* 1 if operand is pending after ','. */
3963 unsigned int expecting_operand = 0;
3964
3965 /* Non-zero if operand parens not balanced. */
3966 unsigned int paren_not_balanced;
3967
3968 while (*l != END_OF_INSN)
3969 {
3970 /* Skip optional white space before operand. */
3971 if (is_space_char (*l))
3972 ++l;
3973 if (!is_operand_char (*l) && *l != END_OF_INSN)
3974 {
3975 as_bad (_("invalid character %s before operand %d"),
3976 output_invalid (*l),
3977 i.operands + 1);
3978 return NULL;
3979 }
3980 token_start = l; /* after white space */
3981 paren_not_balanced = 0;
3982 while (paren_not_balanced || *l != ',')
3983 {
3984 if (*l == END_OF_INSN)
3985 {
3986 if (paren_not_balanced)
3987 {
3988 if (!intel_syntax)
3989 as_bad (_("unbalanced parenthesis in operand %d."),
3990 i.operands + 1);
3991 else
3992 as_bad (_("unbalanced brackets in operand %d."),
3993 i.operands + 1);
3994 return NULL;
3995 }
3996 else
3997 break; /* we are done */
3998 }
3999 else if (!is_operand_char (*l) && !is_space_char (*l))
4000 {
4001 as_bad (_("invalid character %s in operand %d"),
4002 output_invalid (*l),
4003 i.operands + 1);
4004 return NULL;
4005 }
4006 if (!intel_syntax)
4007 {
4008 if (*l == '(')
4009 ++paren_not_balanced;
4010 if (*l == ')')
4011 --paren_not_balanced;
4012 }
4013 else
4014 {
4015 if (*l == '[')
4016 ++paren_not_balanced;
4017 if (*l == ']')
4018 --paren_not_balanced;
4019 }
4020 l++;
4021 }
4022 if (l != token_start)
4023 { /* Yes, we've read in another operand. */
4024 unsigned int operand_ok;
4025 this_operand = i.operands++;
4026 i.types[this_operand].bitfield.unspecified = 1;
4027 if (i.operands > MAX_OPERANDS)
4028 {
4029 as_bad (_("spurious operands; (%d operands/instruction max)"),
4030 MAX_OPERANDS);
4031 return NULL;
4032 }
4033 /* Now parse operand adding info to 'i' as we go along. */
4034 END_STRING_AND_SAVE (l);
4035
4036 if (intel_syntax)
4037 operand_ok =
4038 i386_intel_operand (token_start,
4039 intel_float_operand (mnemonic));
4040 else
4041 operand_ok = i386_att_operand (token_start);
4042
4043 RESTORE_END_STRING (l);
4044 if (!operand_ok)
4045 return NULL;
4046 }
4047 else
4048 {
4049 if (expecting_operand)
4050 {
4051 expecting_operand_after_comma:
4052 as_bad (_("expecting operand after ','; got nothing"));
4053 return NULL;
4054 }
4055 if (*l == ',')
4056 {
4057 as_bad (_("expecting operand before ','; got nothing"));
4058 return NULL;
4059 }
4060 }
4061
4062 /* Now *l must be either ',' or END_OF_INSN. */
4063 if (*l == ',')
4064 {
4065 if (*++l == END_OF_INSN)
4066 {
4067 /* Just skip it, if it's \n complain. */
4068 goto expecting_operand_after_comma;
4069 }
4070 expecting_operand = 1;
4071 }
4072 }
4073 return l;
4074 }
4075
4076 static void
4077 swap_2_operands (int xchg1, int xchg2)
4078 {
4079 union i386_op temp_op;
4080 i386_operand_type temp_type;
4081 enum bfd_reloc_code_real temp_reloc;
4082
4083 temp_type = i.types[xchg2];
4084 i.types[xchg2] = i.types[xchg1];
4085 i.types[xchg1] = temp_type;
4086 temp_op = i.op[xchg2];
4087 i.op[xchg2] = i.op[xchg1];
4088 i.op[xchg1] = temp_op;
4089 temp_reloc = i.reloc[xchg2];
4090 i.reloc[xchg2] = i.reloc[xchg1];
4091 i.reloc[xchg1] = temp_reloc;
4092
4093 if (i.mask)
4094 {
4095 if (i.mask->operand == xchg1)
4096 i.mask->operand = xchg2;
4097 else if (i.mask->operand == xchg2)
4098 i.mask->operand = xchg1;
4099 }
4100 if (i.broadcast)
4101 {
4102 if (i.broadcast->operand == xchg1)
4103 i.broadcast->operand = xchg2;
4104 else if (i.broadcast->operand == xchg2)
4105 i.broadcast->operand = xchg1;
4106 }
4107 if (i.rounding)
4108 {
4109 if (i.rounding->operand == xchg1)
4110 i.rounding->operand = xchg2;
4111 else if (i.rounding->operand == xchg2)
4112 i.rounding->operand = xchg1;
4113 }
4114 }
4115
4116 static void
4117 swap_operands (void)
4118 {
4119 switch (i.operands)
4120 {
4121 case 5:
4122 case 4:
4123 swap_2_operands (1, i.operands - 2);
4124 case 3:
4125 case 2:
4126 swap_2_operands (0, i.operands - 1);
4127 break;
4128 default:
4129 abort ();
4130 }
4131
4132 if (i.mem_operands == 2)
4133 {
4134 const seg_entry *temp_seg;
4135 temp_seg = i.seg[0];
4136 i.seg[0] = i.seg[1];
4137 i.seg[1] = temp_seg;
4138 }
4139 }
4140
4141 /* Try to ensure constant immediates are represented in the smallest
4142 opcode possible. */
4143 static void
4144 optimize_imm (void)
4145 {
4146 char guess_suffix = 0;
4147 int op;
4148
4149 if (i.suffix)
4150 guess_suffix = i.suffix;
4151 else if (i.reg_operands)
4152 {
4153 /* Figure out a suffix from the last register operand specified.
4154 We can't do this properly yet, ie. excluding InOutPortReg,
4155 but the following works for instructions with immediates.
4156 In any case, we can't set i.suffix yet. */
4157 for (op = i.operands; --op >= 0;)
4158 if (i.types[op].bitfield.reg8)
4159 {
4160 guess_suffix = BYTE_MNEM_SUFFIX;
4161 break;
4162 }
4163 else if (i.types[op].bitfield.reg16)
4164 {
4165 guess_suffix = WORD_MNEM_SUFFIX;
4166 break;
4167 }
4168 else if (i.types[op].bitfield.reg32)
4169 {
4170 guess_suffix = LONG_MNEM_SUFFIX;
4171 break;
4172 }
4173 else if (i.types[op].bitfield.reg64)
4174 {
4175 guess_suffix = QWORD_MNEM_SUFFIX;
4176 break;
4177 }
4178 }
4179 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4180 guess_suffix = WORD_MNEM_SUFFIX;
4181
4182 for (op = i.operands; --op >= 0;)
4183 if (operand_type_check (i.types[op], imm))
4184 {
4185 switch (i.op[op].imms->X_op)
4186 {
4187 case O_constant:
4188 /* If a suffix is given, this operand may be shortened. */
4189 switch (guess_suffix)
4190 {
4191 case LONG_MNEM_SUFFIX:
4192 i.types[op].bitfield.imm32 = 1;
4193 i.types[op].bitfield.imm64 = 1;
4194 break;
4195 case WORD_MNEM_SUFFIX:
4196 i.types[op].bitfield.imm16 = 1;
4197 i.types[op].bitfield.imm32 = 1;
4198 i.types[op].bitfield.imm32s = 1;
4199 i.types[op].bitfield.imm64 = 1;
4200 break;
4201 case BYTE_MNEM_SUFFIX:
4202 i.types[op].bitfield.imm8 = 1;
4203 i.types[op].bitfield.imm8s = 1;
4204 i.types[op].bitfield.imm16 = 1;
4205 i.types[op].bitfield.imm32 = 1;
4206 i.types[op].bitfield.imm32s = 1;
4207 i.types[op].bitfield.imm64 = 1;
4208 break;
4209 }
4210
4211 /* If this operand is at most 16 bits, convert it
4212 to a signed 16 bit number before trying to see
4213 whether it will fit in an even smaller size.
4214 This allows a 16-bit operand such as $0xffe0 to
4215 be recognised as within Imm8S range. */
4216 if ((i.types[op].bitfield.imm16)
4217 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4218 {
4219 i.op[op].imms->X_add_number =
4220 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4221 }
4222 if ((i.types[op].bitfield.imm32)
4223 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4224 == 0))
4225 {
4226 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4227 ^ ((offsetT) 1 << 31))
4228 - ((offsetT) 1 << 31));
4229 }
4230 i.types[op]
4231 = operand_type_or (i.types[op],
4232 smallest_imm_type (i.op[op].imms->X_add_number));
4233
4234 /* We must avoid matching of Imm32 templates when 64bit
4235 only immediate is available. */
4236 if (guess_suffix == QWORD_MNEM_SUFFIX)
4237 i.types[op].bitfield.imm32 = 0;
4238 break;
4239
4240 case O_absent:
4241 case O_register:
4242 abort ();
4243
4244 /* Symbols and expressions. */
4245 default:
4246 /* Convert symbolic operand to proper sizes for matching, but don't
4247 prevent matching a set of insns that only supports sizes other
4248 than those matching the insn suffix. */
4249 {
4250 i386_operand_type mask, allowed;
4251 const insn_template *t;
4252
4253 operand_type_set (&mask, 0);
4254 operand_type_set (&allowed, 0);
4255
4256 for (t = current_templates->start;
4257 t < current_templates->end;
4258 ++t)
4259 allowed = operand_type_or (allowed,
4260 t->operand_types[op]);
4261 switch (guess_suffix)
4262 {
4263 case QWORD_MNEM_SUFFIX:
4264 mask.bitfield.imm64 = 1;
4265 mask.bitfield.imm32s = 1;
4266 break;
4267 case LONG_MNEM_SUFFIX:
4268 mask.bitfield.imm32 = 1;
4269 break;
4270 case WORD_MNEM_SUFFIX:
4271 mask.bitfield.imm16 = 1;
4272 break;
4273 case BYTE_MNEM_SUFFIX:
4274 mask.bitfield.imm8 = 1;
4275 break;
4276 default:
4277 break;
4278 }
4279 allowed = operand_type_and (mask, allowed);
4280 if (!operand_type_all_zero (&allowed))
4281 i.types[op] = operand_type_and (i.types[op], mask);
4282 }
4283 break;
4284 }
4285 }
4286 }
4287
4288 /* Try to use the smallest displacement type too. */
4289 static void
4290 optimize_disp (void)
4291 {
4292 int op;
4293
4294 for (op = i.operands; --op >= 0;)
4295 if (operand_type_check (i.types[op], disp))
4296 {
4297 if (i.op[op].disps->X_op == O_constant)
4298 {
4299 offsetT op_disp = i.op[op].disps->X_add_number;
4300
4301 if (i.types[op].bitfield.disp16
4302 && (op_disp & ~(offsetT) 0xffff) == 0)
4303 {
4304 /* If this operand is at most 16 bits, convert
4305 to a signed 16 bit number and don't use 64bit
4306 displacement. */
4307 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4308 i.types[op].bitfield.disp64 = 0;
4309 }
4310 if (i.types[op].bitfield.disp32
4311 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4312 {
4313 /* If this operand is at most 32 bits, convert
4314 to a signed 32 bit number and don't use 64bit
4315 displacement. */
4316 op_disp &= (((offsetT) 2 << 31) - 1);
4317 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4318 i.types[op].bitfield.disp64 = 0;
4319 }
4320 if (!op_disp && i.types[op].bitfield.baseindex)
4321 {
4322 i.types[op].bitfield.disp8 = 0;
4323 i.types[op].bitfield.disp16 = 0;
4324 i.types[op].bitfield.disp32 = 0;
4325 i.types[op].bitfield.disp32s = 0;
4326 i.types[op].bitfield.disp64 = 0;
4327 i.op[op].disps = 0;
4328 i.disp_operands--;
4329 }
4330 else if (flag_code == CODE_64BIT)
4331 {
4332 if (fits_in_signed_long (op_disp))
4333 {
4334 i.types[op].bitfield.disp64 = 0;
4335 i.types[op].bitfield.disp32s = 1;
4336 }
4337 if (i.prefix[ADDR_PREFIX]
4338 && fits_in_unsigned_long (op_disp))
4339 i.types[op].bitfield.disp32 = 1;
4340 }
4341 if ((i.types[op].bitfield.disp32
4342 || i.types[op].bitfield.disp32s
4343 || i.types[op].bitfield.disp16)
4344 && fits_in_signed_byte (op_disp))
4345 i.types[op].bitfield.disp8 = 1;
4346 }
4347 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4348 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4349 {
4350 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4351 i.op[op].disps, 0, i.reloc[op]);
4352 i.types[op].bitfield.disp8 = 0;
4353 i.types[op].bitfield.disp16 = 0;
4354 i.types[op].bitfield.disp32 = 0;
4355 i.types[op].bitfield.disp32s = 0;
4356 i.types[op].bitfield.disp64 = 0;
4357 }
4358 else
4359 /* We only support 64bit displacement on constants. */
4360 i.types[op].bitfield.disp64 = 0;
4361 }
4362 }
4363
4364 /* Check if operands are valid for the instruction. */
4365
4366 static int
4367 check_VecOperands (const insn_template *t)
4368 {
4369 unsigned int op;
4370
4371 /* Without VSIB byte, we can't have a vector register for index. */
4372 if (!t->opcode_modifier.vecsib
4373 && i.index_reg
4374 && (i.index_reg->reg_type.bitfield.regxmm
4375 || i.index_reg->reg_type.bitfield.regymm
4376 || i.index_reg->reg_type.bitfield.regzmm))
4377 {
4378 i.error = unsupported_vector_index_register;
4379 return 1;
4380 }
4381
4382 /* Check if default mask is allowed. */
4383 if (t->opcode_modifier.nodefmask
4384 && (!i.mask || i.mask->mask->reg_num == 0))
4385 {
4386 i.error = no_default_mask;
4387 return 1;
4388 }
4389
4390 /* For VSIB byte, we need a vector register for index, and all vector
4391 registers must be distinct. */
4392 if (t->opcode_modifier.vecsib)
4393 {
4394 if (!i.index_reg
4395 || !((t->opcode_modifier.vecsib == VecSIB128
4396 && i.index_reg->reg_type.bitfield.regxmm)
4397 || (t->opcode_modifier.vecsib == VecSIB256
4398 && i.index_reg->reg_type.bitfield.regymm)
4399 || (t->opcode_modifier.vecsib == VecSIB512
4400 && i.index_reg->reg_type.bitfield.regzmm)))
4401 {
4402 i.error = invalid_vsib_address;
4403 return 1;
4404 }
4405
4406 gas_assert (i.reg_operands == 2 || i.mask);
4407 if (i.reg_operands == 2 && !i.mask)
4408 {
4409 gas_assert (i.types[0].bitfield.regxmm
4410 || i.types[0].bitfield.regymm);
4411 gas_assert (i.types[2].bitfield.regxmm
4412 || i.types[2].bitfield.regymm);
4413 if (operand_check == check_none)
4414 return 0;
4415 if (register_number (i.op[0].regs)
4416 != register_number (i.index_reg)
4417 && register_number (i.op[2].regs)
4418 != register_number (i.index_reg)
4419 && register_number (i.op[0].regs)
4420 != register_number (i.op[2].regs))
4421 return 0;
4422 if (operand_check == check_error)
4423 {
4424 i.error = invalid_vector_register_set;
4425 return 1;
4426 }
4427 as_warn (_("mask, index, and destination registers should be distinct"));
4428 }
4429 else if (i.reg_operands == 1 && i.mask)
4430 {
4431 if ((i.types[1].bitfield.regymm
4432 || i.types[1].bitfield.regzmm)
4433 && (register_number (i.op[1].regs)
4434 == register_number (i.index_reg)))
4435 {
4436 if (operand_check == check_error)
4437 {
4438 i.error = invalid_vector_register_set;
4439 return 1;
4440 }
4441 if (operand_check != check_none)
4442 as_warn (_("index and destination registers should be distinct"));
4443 }
4444 }
4445 }
4446
4447 /* Check if broadcast is supported by the instruction and is applied
4448 to the memory operand. */
4449 if (i.broadcast)
4450 {
4451 int broadcasted_opnd_size;
4452
4453 /* Check if specified broadcast is supported in this instruction,
4454 and it's applied to memory operand of DWORD or QWORD type,
4455 depending on VecESize. */
4456 if (i.broadcast->type != t->opcode_modifier.broadcast
4457 || !i.types[i.broadcast->operand].bitfield.mem
4458 || (t->opcode_modifier.vecesize == 0
4459 && !i.types[i.broadcast->operand].bitfield.dword
4460 && !i.types[i.broadcast->operand].bitfield.unspecified)
4461 || (t->opcode_modifier.vecesize == 1
4462 && !i.types[i.broadcast->operand].bitfield.qword
4463 && !i.types[i.broadcast->operand].bitfield.unspecified))
4464 goto bad_broadcast;
4465
4466 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4467 if (i.broadcast->type == BROADCAST_1TO16)
4468 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4469 else if (i.broadcast->type == BROADCAST_1TO8)
4470 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4471 else if (i.broadcast->type == BROADCAST_1TO4)
4472 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
4473 else if (i.broadcast->type == BROADCAST_1TO2)
4474 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
4475 else
4476 goto bad_broadcast;
4477
4478 if ((broadcasted_opnd_size == 256
4479 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4480 || (broadcasted_opnd_size == 512
4481 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4482 {
4483 bad_broadcast:
4484 i.error = unsupported_broadcast;
4485 return 1;
4486 }
4487 }
4488 /* If broadcast is supported in this instruction, we need to check if
4489 operand of one-element size isn't specified without broadcast. */
4490 else if (t->opcode_modifier.broadcast && i.mem_operands)
4491 {
4492 /* Find memory operand. */
4493 for (op = 0; op < i.operands; op++)
4494 if (operand_type_check (i.types[op], anymem))
4495 break;
4496 gas_assert (op < i.operands);
4497 /* Check size of the memory operand. */
4498 if ((t->opcode_modifier.vecesize == 0
4499 && i.types[op].bitfield.dword)
4500 || (t->opcode_modifier.vecesize == 1
4501 && i.types[op].bitfield.qword))
4502 {
4503 i.error = broadcast_needed;
4504 return 1;
4505 }
4506 }
4507
4508 /* Check if requested masking is supported. */
4509 if (i.mask
4510 && (!t->opcode_modifier.masking
4511 || (i.mask->zeroing
4512 && t->opcode_modifier.masking == MERGING_MASKING)))
4513 {
4514 i.error = unsupported_masking;
4515 return 1;
4516 }
4517
4518 /* Check if masking is applied to dest operand. */
4519 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4520 {
4521 i.error = mask_not_on_destination;
4522 return 1;
4523 }
4524
4525 /* Check RC/SAE. */
4526 if (i.rounding)
4527 {
4528 if ((i.rounding->type != saeonly
4529 && !t->opcode_modifier.staticrounding)
4530 || (i.rounding->type == saeonly
4531 && (t->opcode_modifier.staticrounding
4532 || !t->opcode_modifier.sae)))
4533 {
4534 i.error = unsupported_rc_sae;
4535 return 1;
4536 }
4537 /* If the instruction has several immediate operands and one of
4538 them is rounding, the rounding operand should be the last
4539 immediate operand. */
4540 if (i.imm_operands > 1
4541 && i.rounding->operand != (int) (i.imm_operands - 1))
4542 {
4543 i.error = rc_sae_operand_not_last_imm;
4544 return 1;
4545 }
4546 }
4547
4548 /* Check vector Disp8 operand. */
4549 if (t->opcode_modifier.disp8memshift)
4550 {
4551 if (i.broadcast)
4552 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4553 else
4554 i.memshift = t->opcode_modifier.disp8memshift;
4555
4556 for (op = 0; op < i.operands; op++)
4557 if (operand_type_check (i.types[op], disp)
4558 && i.op[op].disps->X_op == O_constant)
4559 {
4560 offsetT value = i.op[op].disps->X_add_number;
4561 int vec_disp8_ok = fits_in_vec_disp8 (value);
4562 if (t->operand_types [op].bitfield.vec_disp8)
4563 {
4564 if (vec_disp8_ok)
4565 i.types[op].bitfield.vec_disp8 = 1;
4566 else
4567 {
4568 /* Vector insn can only have Vec_Disp8/Disp32 in
4569 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4570 mode. */
4571 i.types[op].bitfield.disp8 = 0;
4572 if (flag_code != CODE_16BIT)
4573 i.types[op].bitfield.disp16 = 0;
4574 }
4575 }
4576 else if (flag_code != CODE_16BIT)
4577 {
4578 /* One form of this instruction supports vector Disp8.
4579 Try vector Disp8 if we need to use Disp32. */
4580 if (vec_disp8_ok && !fits_in_signed_byte (value))
4581 {
4582 i.error = try_vector_disp8;
4583 return 1;
4584 }
4585 }
4586 }
4587 }
4588 else
4589 i.memshift = -1;
4590
4591 return 0;
4592 }
4593
4594 /* Check if operands are valid for the instruction. Update VEX
4595 operand types. */
4596
4597 static int
4598 VEX_check_operands (const insn_template *t)
4599 {
4600 /* VREX is only valid with EVEX prefix. */
4601 if (i.need_vrex && !t->opcode_modifier.evex)
4602 {
4603 i.error = invalid_register_operand;
4604 return 1;
4605 }
4606
4607 if (!t->opcode_modifier.vex)
4608 return 0;
4609
4610 /* Only check VEX_Imm4, which must be the first operand. */
4611 if (t->operand_types[0].bitfield.vec_imm4)
4612 {
4613 if (i.op[0].imms->X_op != O_constant
4614 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4615 {
4616 i.error = bad_imm4;
4617 return 1;
4618 }
4619
4620 /* Turn off Imm8 so that update_imm won't complain. */
4621 i.types[0] = vec_imm4;
4622 }
4623
4624 return 0;
4625 }
4626
4627 static const insn_template *
4628 match_template (void)
4629 {
4630 /* Points to template once we've found it. */
4631 const insn_template *t;
4632 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4633 i386_operand_type overlap4;
4634 unsigned int found_reverse_match;
4635 i386_opcode_modifier suffix_check;
4636 i386_operand_type operand_types [MAX_OPERANDS];
4637 int addr_prefix_disp;
4638 unsigned int j;
4639 unsigned int found_cpu_match;
4640 unsigned int check_register;
4641 enum i386_error specific_error = 0;
4642
4643 #if MAX_OPERANDS != 5
4644 # error "MAX_OPERANDS must be 5."
4645 #endif
4646
4647 found_reverse_match = 0;
4648 addr_prefix_disp = -1;
4649
4650 memset (&suffix_check, 0, sizeof (suffix_check));
4651 if (i.suffix == BYTE_MNEM_SUFFIX)
4652 suffix_check.no_bsuf = 1;
4653 else if (i.suffix == WORD_MNEM_SUFFIX)
4654 suffix_check.no_wsuf = 1;
4655 else if (i.suffix == SHORT_MNEM_SUFFIX)
4656 suffix_check.no_ssuf = 1;
4657 else if (i.suffix == LONG_MNEM_SUFFIX)
4658 suffix_check.no_lsuf = 1;
4659 else if (i.suffix == QWORD_MNEM_SUFFIX)
4660 suffix_check.no_qsuf = 1;
4661 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4662 suffix_check.no_ldsuf = 1;
4663
4664 /* Must have right number of operands. */
4665 i.error = number_of_operands_mismatch;
4666
4667 for (t = current_templates->start; t < current_templates->end; t++)
4668 {
4669 addr_prefix_disp = -1;
4670
4671 if (i.operands != t->operands)
4672 continue;
4673
4674 /* Check processor support. */
4675 i.error = unsupported;
4676 found_cpu_match = (cpu_flags_match (t)
4677 == CPU_FLAGS_PERFECT_MATCH);
4678 if (!found_cpu_match)
4679 continue;
4680
4681 /* Check old gcc support. */
4682 i.error = old_gcc_only;
4683 if (!old_gcc && t->opcode_modifier.oldgcc)
4684 continue;
4685
4686 /* Check AT&T mnemonic. */
4687 i.error = unsupported_with_intel_mnemonic;
4688 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4689 continue;
4690
4691 /* Check AT&T/Intel syntax. */
4692 i.error = unsupported_syntax;
4693 if ((intel_syntax && t->opcode_modifier.attsyntax)
4694 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4695 continue;
4696
4697 /* Check the suffix, except for some instructions in intel mode. */
4698 i.error = invalid_instruction_suffix;
4699 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4700 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4701 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4702 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4703 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4704 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4705 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4706 continue;
4707
4708 if (!operand_size_match (t))
4709 continue;
4710
4711 for (j = 0; j < MAX_OPERANDS; j++)
4712 operand_types[j] = t->operand_types[j];
4713
4714 /* In general, don't allow 64-bit operands in 32-bit mode. */
4715 if (i.suffix == QWORD_MNEM_SUFFIX
4716 && flag_code != CODE_64BIT
4717 && (intel_syntax
4718 ? (!t->opcode_modifier.ignoresize
4719 && !intel_float_operand (t->name))
4720 : intel_float_operand (t->name) != 2)
4721 && ((!operand_types[0].bitfield.regmmx
4722 && !operand_types[0].bitfield.regxmm
4723 && !operand_types[0].bitfield.regymm
4724 && !operand_types[0].bitfield.regzmm)
4725 || (!operand_types[t->operands > 1].bitfield.regmmx
4726 && operand_types[t->operands > 1].bitfield.regxmm
4727 && operand_types[t->operands > 1].bitfield.regymm
4728 && operand_types[t->operands > 1].bitfield.regzmm))
4729 && (t->base_opcode != 0x0fc7
4730 || t->extension_opcode != 1 /* cmpxchg8b */))
4731 continue;
4732
4733 /* In general, don't allow 32-bit operands on pre-386. */
4734 else if (i.suffix == LONG_MNEM_SUFFIX
4735 && !cpu_arch_flags.bitfield.cpui386
4736 && (intel_syntax
4737 ? (!t->opcode_modifier.ignoresize
4738 && !intel_float_operand (t->name))
4739 : intel_float_operand (t->name) != 2)
4740 && ((!operand_types[0].bitfield.regmmx
4741 && !operand_types[0].bitfield.regxmm)
4742 || (!operand_types[t->operands > 1].bitfield.regmmx
4743 && operand_types[t->operands > 1].bitfield.regxmm)))
4744 continue;
4745
4746 /* Do not verify operands when there are none. */
4747 else
4748 {
4749 if (!t->operands)
4750 /* We've found a match; break out of loop. */
4751 break;
4752 }
4753
4754 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4755 into Disp32/Disp16/Disp32 operand. */
4756 if (i.prefix[ADDR_PREFIX] != 0)
4757 {
4758 /* There should be only one Disp operand. */
4759 switch (flag_code)
4760 {
4761 case CODE_16BIT:
4762 for (j = 0; j < MAX_OPERANDS; j++)
4763 {
4764 if (operand_types[j].bitfield.disp16)
4765 {
4766 addr_prefix_disp = j;
4767 operand_types[j].bitfield.disp32 = 1;
4768 operand_types[j].bitfield.disp16 = 0;
4769 break;
4770 }
4771 }
4772 break;
4773 case CODE_32BIT:
4774 for (j = 0; j < MAX_OPERANDS; j++)
4775 {
4776 if (operand_types[j].bitfield.disp32)
4777 {
4778 addr_prefix_disp = j;
4779 operand_types[j].bitfield.disp32 = 0;
4780 operand_types[j].bitfield.disp16 = 1;
4781 break;
4782 }
4783 }
4784 break;
4785 case CODE_64BIT:
4786 for (j = 0; j < MAX_OPERANDS; j++)
4787 {
4788 if (operand_types[j].bitfield.disp64)
4789 {
4790 addr_prefix_disp = j;
4791 operand_types[j].bitfield.disp64 = 0;
4792 operand_types[j].bitfield.disp32 = 1;
4793 break;
4794 }
4795 }
4796 break;
4797 }
4798 }
4799
4800 /* We check register size if needed. */
4801 check_register = t->opcode_modifier.checkregsize;
4802 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4803 switch (t->operands)
4804 {
4805 case 1:
4806 if (!operand_type_match (overlap0, i.types[0]))
4807 continue;
4808 break;
4809 case 2:
4810 /* xchg %eax, %eax is a special case. It is an aliase for nop
4811 only in 32bit mode and we can use opcode 0x90. In 64bit
4812 mode, we can't use 0x90 for xchg %eax, %eax since it should
4813 zero-extend %eax to %rax. */
4814 if (flag_code == CODE_64BIT
4815 && t->base_opcode == 0x90
4816 && operand_type_equal (&i.types [0], &acc32)
4817 && operand_type_equal (&i.types [1], &acc32))
4818 continue;
4819 if (i.swap_operand)
4820 {
4821 /* If we swap operand in encoding, we either match
4822 the next one or reverse direction of operands. */
4823 if (t->opcode_modifier.s)
4824 continue;
4825 else if (t->opcode_modifier.d)
4826 goto check_reverse;
4827 }
4828
4829 case 3:
4830 /* If we swap operand in encoding, we match the next one. */
4831 if (i.swap_operand && t->opcode_modifier.s)
4832 continue;
4833 case 4:
4834 case 5:
4835 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4836 if (!operand_type_match (overlap0, i.types[0])
4837 || !operand_type_match (overlap1, i.types[1])
4838 || (check_register
4839 && !operand_type_register_match (overlap0, i.types[0],
4840 operand_types[0],
4841 overlap1, i.types[1],
4842 operand_types[1])))
4843 {
4844 /* Check if other direction is valid ... */
4845 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4846 continue;
4847
4848 check_reverse:
4849 /* Try reversing direction of operands. */
4850 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4851 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4852 if (!operand_type_match (overlap0, i.types[0])
4853 || !operand_type_match (overlap1, i.types[1])
4854 || (check_register
4855 && !operand_type_register_match (overlap0,
4856 i.types[0],
4857 operand_types[1],
4858 overlap1,
4859 i.types[1],
4860 operand_types[0])))
4861 {
4862 /* Does not match either direction. */
4863 continue;
4864 }
4865 /* found_reverse_match holds which of D or FloatDR
4866 we've found. */
4867 if (t->opcode_modifier.d)
4868 found_reverse_match = Opcode_D;
4869 else if (t->opcode_modifier.floatd)
4870 found_reverse_match = Opcode_FloatD;
4871 else
4872 found_reverse_match = 0;
4873 if (t->opcode_modifier.floatr)
4874 found_reverse_match |= Opcode_FloatR;
4875 }
4876 else
4877 {
4878 /* Found a forward 2 operand match here. */
4879 switch (t->operands)
4880 {
4881 case 5:
4882 overlap4 = operand_type_and (i.types[4],
4883 operand_types[4]);
4884 case 4:
4885 overlap3 = operand_type_and (i.types[3],
4886 operand_types[3]);
4887 case 3:
4888 overlap2 = operand_type_and (i.types[2],
4889 operand_types[2]);
4890 break;
4891 }
4892
4893 switch (t->operands)
4894 {
4895 case 5:
4896 if (!operand_type_match (overlap4, i.types[4])
4897 || !operand_type_register_match (overlap3,
4898 i.types[3],
4899 operand_types[3],
4900 overlap4,
4901 i.types[4],
4902 operand_types[4]))
4903 continue;
4904 case 4:
4905 if (!operand_type_match (overlap3, i.types[3])
4906 || (check_register
4907 && !operand_type_register_match (overlap2,
4908 i.types[2],
4909 operand_types[2],
4910 overlap3,
4911 i.types[3],
4912 operand_types[3])))
4913 continue;
4914 case 3:
4915 /* Here we make use of the fact that there are no
4916 reverse match 3 operand instructions, and all 3
4917 operand instructions only need to be checked for
4918 register consistency between operands 2 and 3. */
4919 if (!operand_type_match (overlap2, i.types[2])
4920 || (check_register
4921 && !operand_type_register_match (overlap1,
4922 i.types[1],
4923 operand_types[1],
4924 overlap2,
4925 i.types[2],
4926 operand_types[2])))
4927 continue;
4928 break;
4929 }
4930 }
4931 /* Found either forward/reverse 2, 3 or 4 operand match here:
4932 slip through to break. */
4933 }
4934 if (!found_cpu_match)
4935 {
4936 found_reverse_match = 0;
4937 continue;
4938 }
4939
4940 /* Check if vector and VEX operands are valid. */
4941 if (check_VecOperands (t) || VEX_check_operands (t))
4942 {
4943 specific_error = i.error;
4944 continue;
4945 }
4946
4947 /* We've found a match; break out of loop. */
4948 break;
4949 }
4950
4951 if (t == current_templates->end)
4952 {
4953 /* We found no match. */
4954 const char *err_msg;
4955 switch (specific_error ? specific_error : i.error)
4956 {
4957 default:
4958 abort ();
4959 case operand_size_mismatch:
4960 err_msg = _("operand size mismatch");
4961 break;
4962 case operand_type_mismatch:
4963 err_msg = _("operand type mismatch");
4964 break;
4965 case register_type_mismatch:
4966 err_msg = _("register type mismatch");
4967 break;
4968 case number_of_operands_mismatch:
4969 err_msg = _("number of operands mismatch");
4970 break;
4971 case invalid_instruction_suffix:
4972 err_msg = _("invalid instruction suffix");
4973 break;
4974 case bad_imm4:
4975 err_msg = _("constant doesn't fit in 4 bits");
4976 break;
4977 case old_gcc_only:
4978 err_msg = _("only supported with old gcc");
4979 break;
4980 case unsupported_with_intel_mnemonic:
4981 err_msg = _("unsupported with Intel mnemonic");
4982 break;
4983 case unsupported_syntax:
4984 err_msg = _("unsupported syntax");
4985 break;
4986 case unsupported:
4987 as_bad (_("unsupported instruction `%s'"),
4988 current_templates->start->name);
4989 return NULL;
4990 case invalid_vsib_address:
4991 err_msg = _("invalid VSIB address");
4992 break;
4993 case invalid_vector_register_set:
4994 err_msg = _("mask, index, and destination registers must be distinct");
4995 break;
4996 case unsupported_vector_index_register:
4997 err_msg = _("unsupported vector index register");
4998 break;
4999 case unsupported_broadcast:
5000 err_msg = _("unsupported broadcast");
5001 break;
5002 case broadcast_not_on_src_operand:
5003 err_msg = _("broadcast not on source memory operand");
5004 break;
5005 case broadcast_needed:
5006 err_msg = _("broadcast is needed for operand of such type");
5007 break;
5008 case unsupported_masking:
5009 err_msg = _("unsupported masking");
5010 break;
5011 case mask_not_on_destination:
5012 err_msg = _("mask not on destination operand");
5013 break;
5014 case no_default_mask:
5015 err_msg = _("default mask isn't allowed");
5016 break;
5017 case unsupported_rc_sae:
5018 err_msg = _("unsupported static rounding/sae");
5019 break;
5020 case rc_sae_operand_not_last_imm:
5021 if (intel_syntax)
5022 err_msg = _("RC/SAE operand must precede immediate operands");
5023 else
5024 err_msg = _("RC/SAE operand must follow immediate operands");
5025 break;
5026 case invalid_register_operand:
5027 err_msg = _("invalid register operand");
5028 break;
5029 }
5030 as_bad (_("%s for `%s'"), err_msg,
5031 current_templates->start->name);
5032 return NULL;
5033 }
5034
5035 if (!quiet_warnings)
5036 {
5037 if (!intel_syntax
5038 && (i.types[0].bitfield.jumpabsolute
5039 != operand_types[0].bitfield.jumpabsolute))
5040 {
5041 as_warn (_("indirect %s without `*'"), t->name);
5042 }
5043
5044 if (t->opcode_modifier.isprefix
5045 && t->opcode_modifier.ignoresize)
5046 {
5047 /* Warn them that a data or address size prefix doesn't
5048 affect assembly of the next line of code. */
5049 as_warn (_("stand-alone `%s' prefix"), t->name);
5050 }
5051 }
5052
5053 /* Copy the template we found. */
5054 i.tm = *t;
5055
5056 if (addr_prefix_disp != -1)
5057 i.tm.operand_types[addr_prefix_disp]
5058 = operand_types[addr_prefix_disp];
5059
5060 if (found_reverse_match)
5061 {
5062 /* If we found a reverse match we must alter the opcode
5063 direction bit. found_reverse_match holds bits to change
5064 (different for int & float insns). */
5065
5066 i.tm.base_opcode ^= found_reverse_match;
5067
5068 i.tm.operand_types[0] = operand_types[1];
5069 i.tm.operand_types[1] = operand_types[0];
5070 }
5071
5072 return t;
5073 }
5074
5075 static int
5076 check_string (void)
5077 {
5078 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5079 if (i.tm.operand_types[mem_op].bitfield.esseg)
5080 {
5081 if (i.seg[0] != NULL && i.seg[0] != &es)
5082 {
5083 as_bad (_("`%s' operand %d must use `%ses' segment"),
5084 i.tm.name,
5085 mem_op + 1,
5086 register_prefix);
5087 return 0;
5088 }
5089 /* There's only ever one segment override allowed per instruction.
5090 This instruction possibly has a legal segment override on the
5091 second operand, so copy the segment to where non-string
5092 instructions store it, allowing common code. */
5093 i.seg[0] = i.seg[1];
5094 }
5095 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5096 {
5097 if (i.seg[1] != NULL && i.seg[1] != &es)
5098 {
5099 as_bad (_("`%s' operand %d must use `%ses' segment"),
5100 i.tm.name,
5101 mem_op + 2,
5102 register_prefix);
5103 return 0;
5104 }
5105 }
5106 return 1;
5107 }
5108
5109 static int
5110 process_suffix (void)
5111 {
5112 /* If matched instruction specifies an explicit instruction mnemonic
5113 suffix, use it. */
5114 if (i.tm.opcode_modifier.size16)
5115 i.suffix = WORD_MNEM_SUFFIX;
5116 else if (i.tm.opcode_modifier.size32)
5117 i.suffix = LONG_MNEM_SUFFIX;
5118 else if (i.tm.opcode_modifier.size64)
5119 i.suffix = QWORD_MNEM_SUFFIX;
5120 else if (i.reg_operands)
5121 {
5122 /* If there's no instruction mnemonic suffix we try to invent one
5123 based on register operands. */
5124 if (!i.suffix)
5125 {
5126 /* We take i.suffix from the last register operand specified,
5127 Destination register type is more significant than source
5128 register type. crc32 in SSE4.2 prefers source register
5129 type. */
5130 if (i.tm.base_opcode == 0xf20f38f1)
5131 {
5132 if (i.types[0].bitfield.reg16)
5133 i.suffix = WORD_MNEM_SUFFIX;
5134 else if (i.types[0].bitfield.reg32)
5135 i.suffix = LONG_MNEM_SUFFIX;
5136 else if (i.types[0].bitfield.reg64)
5137 i.suffix = QWORD_MNEM_SUFFIX;
5138 }
5139 else if (i.tm.base_opcode == 0xf20f38f0)
5140 {
5141 if (i.types[0].bitfield.reg8)
5142 i.suffix = BYTE_MNEM_SUFFIX;
5143 }
5144
5145 if (!i.suffix)
5146 {
5147 int op;
5148
5149 if (i.tm.base_opcode == 0xf20f38f1
5150 || i.tm.base_opcode == 0xf20f38f0)
5151 {
5152 /* We have to know the operand size for crc32. */
5153 as_bad (_("ambiguous memory operand size for `%s`"),
5154 i.tm.name);
5155 return 0;
5156 }
5157
5158 for (op = i.operands; --op >= 0;)
5159 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5160 {
5161 if (i.types[op].bitfield.reg8)
5162 {
5163 i.suffix = BYTE_MNEM_SUFFIX;
5164 break;
5165 }
5166 else if (i.types[op].bitfield.reg16)
5167 {
5168 i.suffix = WORD_MNEM_SUFFIX;
5169 break;
5170 }
5171 else if (i.types[op].bitfield.reg32)
5172 {
5173 i.suffix = LONG_MNEM_SUFFIX;
5174 break;
5175 }
5176 else if (i.types[op].bitfield.reg64)
5177 {
5178 i.suffix = QWORD_MNEM_SUFFIX;
5179 break;
5180 }
5181 }
5182 }
5183 }
5184 else if (i.suffix == BYTE_MNEM_SUFFIX)
5185 {
5186 if (intel_syntax
5187 && i.tm.opcode_modifier.ignoresize
5188 && i.tm.opcode_modifier.no_bsuf)
5189 i.suffix = 0;
5190 else if (!check_byte_reg ())
5191 return 0;
5192 }
5193 else if (i.suffix == LONG_MNEM_SUFFIX)
5194 {
5195 if (intel_syntax
5196 && i.tm.opcode_modifier.ignoresize
5197 && i.tm.opcode_modifier.no_lsuf)
5198 i.suffix = 0;
5199 else if (!check_long_reg ())
5200 return 0;
5201 }
5202 else if (i.suffix == QWORD_MNEM_SUFFIX)
5203 {
5204 if (intel_syntax
5205 && i.tm.opcode_modifier.ignoresize
5206 && i.tm.opcode_modifier.no_qsuf)
5207 i.suffix = 0;
5208 else if (!check_qword_reg ())
5209 return 0;
5210 }
5211 else if (i.suffix == WORD_MNEM_SUFFIX)
5212 {
5213 if (intel_syntax
5214 && i.tm.opcode_modifier.ignoresize
5215 && i.tm.opcode_modifier.no_wsuf)
5216 i.suffix = 0;
5217 else if (!check_word_reg ())
5218 return 0;
5219 }
5220 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5221 || i.suffix == YMMWORD_MNEM_SUFFIX
5222 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5223 {
5224 /* Skip if the instruction has x/y/z suffix. match_template
5225 should check if it is a valid suffix. */
5226 }
5227 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5228 /* Do nothing if the instruction is going to ignore the prefix. */
5229 ;
5230 else
5231 abort ();
5232 }
5233 else if (i.tm.opcode_modifier.defaultsize
5234 && !i.suffix
5235 /* exclude fldenv/frstor/fsave/fstenv */
5236 && i.tm.opcode_modifier.no_ssuf)
5237 {
5238 i.suffix = stackop_size;
5239 }
5240 else if (intel_syntax
5241 && !i.suffix
5242 && (i.tm.operand_types[0].bitfield.jumpabsolute
5243 || i.tm.opcode_modifier.jumpbyte
5244 || i.tm.opcode_modifier.jumpintersegment
5245 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5246 && i.tm.extension_opcode <= 3)))
5247 {
5248 switch (flag_code)
5249 {
5250 case CODE_64BIT:
5251 if (!i.tm.opcode_modifier.no_qsuf)
5252 {
5253 i.suffix = QWORD_MNEM_SUFFIX;
5254 break;
5255 }
5256 case CODE_32BIT:
5257 if (!i.tm.opcode_modifier.no_lsuf)
5258 i.suffix = LONG_MNEM_SUFFIX;
5259 break;
5260 case CODE_16BIT:
5261 if (!i.tm.opcode_modifier.no_wsuf)
5262 i.suffix = WORD_MNEM_SUFFIX;
5263 break;
5264 }
5265 }
5266
5267 if (!i.suffix)
5268 {
5269 if (!intel_syntax)
5270 {
5271 if (i.tm.opcode_modifier.w)
5272 {
5273 as_bad (_("no instruction mnemonic suffix given and "
5274 "no register operands; can't size instruction"));
5275 return 0;
5276 }
5277 }
5278 else
5279 {
5280 unsigned int suffixes;
5281
5282 suffixes = !i.tm.opcode_modifier.no_bsuf;
5283 if (!i.tm.opcode_modifier.no_wsuf)
5284 suffixes |= 1 << 1;
5285 if (!i.tm.opcode_modifier.no_lsuf)
5286 suffixes |= 1 << 2;
5287 if (!i.tm.opcode_modifier.no_ldsuf)
5288 suffixes |= 1 << 3;
5289 if (!i.tm.opcode_modifier.no_ssuf)
5290 suffixes |= 1 << 4;
5291 if (!i.tm.opcode_modifier.no_qsuf)
5292 suffixes |= 1 << 5;
5293
5294 /* There are more than suffix matches. */
5295 if (i.tm.opcode_modifier.w
5296 || ((suffixes & (suffixes - 1))
5297 && !i.tm.opcode_modifier.defaultsize
5298 && !i.tm.opcode_modifier.ignoresize))
5299 {
5300 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5301 return 0;
5302 }
5303 }
5304 }
5305
5306 /* Change the opcode based on the operand size given by i.suffix;
5307 We don't need to change things for byte insns. */
5308
5309 if (i.suffix
5310 && i.suffix != BYTE_MNEM_SUFFIX
5311 && i.suffix != XMMWORD_MNEM_SUFFIX
5312 && i.suffix != YMMWORD_MNEM_SUFFIX
5313 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5314 {
5315 /* It's not a byte, select word/dword operation. */
5316 if (i.tm.opcode_modifier.w)
5317 {
5318 if (i.tm.opcode_modifier.shortform)
5319 i.tm.base_opcode |= 8;
5320 else
5321 i.tm.base_opcode |= 1;
5322 }
5323
5324 /* Now select between word & dword operations via the operand
5325 size prefix, except for instructions that will ignore this
5326 prefix anyway. */
5327 if (i.tm.opcode_modifier.addrprefixop0)
5328 {
5329 /* The address size override prefix changes the size of the
5330 first operand. */
5331 if ((flag_code == CODE_32BIT
5332 && i.op->regs[0].reg_type.bitfield.reg16)
5333 || (flag_code != CODE_32BIT
5334 && i.op->regs[0].reg_type.bitfield.reg32))
5335 if (!add_prefix (ADDR_PREFIX_OPCODE))
5336 return 0;
5337 }
5338 else if (i.suffix != QWORD_MNEM_SUFFIX
5339 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5340 && !i.tm.opcode_modifier.ignoresize
5341 && !i.tm.opcode_modifier.floatmf
5342 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5343 || (flag_code == CODE_64BIT
5344 && i.tm.opcode_modifier.jumpbyte)))
5345 {
5346 unsigned int prefix = DATA_PREFIX_OPCODE;
5347
5348 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5349 prefix = ADDR_PREFIX_OPCODE;
5350
5351 if (!add_prefix (prefix))
5352 return 0;
5353 }
5354
5355 /* Set mode64 for an operand. */
5356 if (i.suffix == QWORD_MNEM_SUFFIX
5357 && flag_code == CODE_64BIT
5358 && !i.tm.opcode_modifier.norex64)
5359 {
5360 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5361 need rex64. cmpxchg8b is also a special case. */
5362 if (! (i.operands == 2
5363 && i.tm.base_opcode == 0x90
5364 && i.tm.extension_opcode == None
5365 && operand_type_equal (&i.types [0], &acc64)
5366 && operand_type_equal (&i.types [1], &acc64))
5367 && ! (i.operands == 1
5368 && i.tm.base_opcode == 0xfc7
5369 && i.tm.extension_opcode == 1
5370 && !operand_type_check (i.types [0], reg)
5371 && operand_type_check (i.types [0], anymem)))
5372 i.rex |= REX_W;
5373 }
5374
5375 /* Size floating point instruction. */
5376 if (i.suffix == LONG_MNEM_SUFFIX)
5377 if (i.tm.opcode_modifier.floatmf)
5378 i.tm.base_opcode ^= 4;
5379 }
5380
5381 return 1;
5382 }
5383
5384 static int
5385 check_byte_reg (void)
5386 {
5387 int op;
5388
5389 for (op = i.operands; --op >= 0;)
5390 {
5391 /* If this is an eight bit register, it's OK. If it's the 16 or
5392 32 bit version of an eight bit register, we will just use the
5393 low portion, and that's OK too. */
5394 if (i.types[op].bitfield.reg8)
5395 continue;
5396
5397 /* I/O port address operands are OK too. */
5398 if (i.tm.operand_types[op].bitfield.inoutportreg)
5399 continue;
5400
5401 /* crc32 doesn't generate this warning. */
5402 if (i.tm.base_opcode == 0xf20f38f0)
5403 continue;
5404
5405 if ((i.types[op].bitfield.reg16
5406 || i.types[op].bitfield.reg32
5407 || i.types[op].bitfield.reg64)
5408 && i.op[op].regs->reg_num < 4
5409 /* Prohibit these changes in 64bit mode, since the lowering
5410 would be more complicated. */
5411 && flag_code != CODE_64BIT)
5412 {
5413 #if REGISTER_WARNINGS
5414 if (!quiet_warnings)
5415 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5416 register_prefix,
5417 (i.op[op].regs + (i.types[op].bitfield.reg16
5418 ? REGNAM_AL - REGNAM_AX
5419 : REGNAM_AL - REGNAM_EAX))->reg_name,
5420 register_prefix,
5421 i.op[op].regs->reg_name,
5422 i.suffix);
5423 #endif
5424 continue;
5425 }
5426 /* Any other register is bad. */
5427 if (i.types[op].bitfield.reg16
5428 || i.types[op].bitfield.reg32
5429 || i.types[op].bitfield.reg64
5430 || i.types[op].bitfield.regmmx
5431 || i.types[op].bitfield.regxmm
5432 || i.types[op].bitfield.regymm
5433 || i.types[op].bitfield.regzmm
5434 || i.types[op].bitfield.sreg2
5435 || i.types[op].bitfield.sreg3
5436 || i.types[op].bitfield.control
5437 || i.types[op].bitfield.debug
5438 || i.types[op].bitfield.test
5439 || i.types[op].bitfield.floatreg
5440 || i.types[op].bitfield.floatacc)
5441 {
5442 as_bad (_("`%s%s' not allowed with `%s%c'"),
5443 register_prefix,
5444 i.op[op].regs->reg_name,
5445 i.tm.name,
5446 i.suffix);
5447 return 0;
5448 }
5449 }
5450 return 1;
5451 }
5452
5453 static int
5454 check_long_reg (void)
5455 {
5456 int op;
5457
5458 for (op = i.operands; --op >= 0;)
5459 /* Reject eight bit registers, except where the template requires
5460 them. (eg. movzb) */
5461 if (i.types[op].bitfield.reg8
5462 && (i.tm.operand_types[op].bitfield.reg16
5463 || i.tm.operand_types[op].bitfield.reg32
5464 || i.tm.operand_types[op].bitfield.acc))
5465 {
5466 as_bad (_("`%s%s' not allowed with `%s%c'"),
5467 register_prefix,
5468 i.op[op].regs->reg_name,
5469 i.tm.name,
5470 i.suffix);
5471 return 0;
5472 }
5473 /* Warn if the e prefix on a general reg is missing. */
5474 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5475 && i.types[op].bitfield.reg16
5476 && (i.tm.operand_types[op].bitfield.reg32
5477 || i.tm.operand_types[op].bitfield.acc))
5478 {
5479 /* Prohibit these changes in the 64bit mode, since the
5480 lowering is more complicated. */
5481 if (flag_code == CODE_64BIT)
5482 {
5483 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5484 register_prefix, i.op[op].regs->reg_name,
5485 i.suffix);
5486 return 0;
5487 }
5488 #if REGISTER_WARNINGS
5489 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5490 register_prefix,
5491 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5492 register_prefix, i.op[op].regs->reg_name, i.suffix);
5493 #endif
5494 }
5495 /* Warn if the r prefix on a general reg is present. */
5496 else if (i.types[op].bitfield.reg64
5497 && (i.tm.operand_types[op].bitfield.reg32
5498 || i.tm.operand_types[op].bitfield.acc))
5499 {
5500 if (intel_syntax
5501 && i.tm.opcode_modifier.toqword
5502 && !i.types[0].bitfield.regxmm)
5503 {
5504 /* Convert to QWORD. We want REX byte. */
5505 i.suffix = QWORD_MNEM_SUFFIX;
5506 }
5507 else
5508 {
5509 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5510 register_prefix, i.op[op].regs->reg_name,
5511 i.suffix);
5512 return 0;
5513 }
5514 }
5515 return 1;
5516 }
5517
5518 static int
5519 check_qword_reg (void)
5520 {
5521 int op;
5522
5523 for (op = i.operands; --op >= 0; )
5524 /* Reject eight bit registers, except where the template requires
5525 them. (eg. movzb) */
5526 if (i.types[op].bitfield.reg8
5527 && (i.tm.operand_types[op].bitfield.reg16
5528 || i.tm.operand_types[op].bitfield.reg32
5529 || i.tm.operand_types[op].bitfield.acc))
5530 {
5531 as_bad (_("`%s%s' not allowed with `%s%c'"),
5532 register_prefix,
5533 i.op[op].regs->reg_name,
5534 i.tm.name,
5535 i.suffix);
5536 return 0;
5537 }
5538 /* Warn if the r prefix on a general reg is missing. */
5539 else if ((i.types[op].bitfield.reg16
5540 || i.types[op].bitfield.reg32)
5541 && (i.tm.operand_types[op].bitfield.reg32
5542 || i.tm.operand_types[op].bitfield.acc))
5543 {
5544 /* Prohibit these changes in the 64bit mode, since the
5545 lowering is more complicated. */
5546 if (intel_syntax
5547 && i.tm.opcode_modifier.todword
5548 && !i.types[0].bitfield.regxmm)
5549 {
5550 /* Convert to DWORD. We don't want REX byte. */
5551 i.suffix = LONG_MNEM_SUFFIX;
5552 }
5553 else
5554 {
5555 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5556 register_prefix, i.op[op].regs->reg_name,
5557 i.suffix);
5558 return 0;
5559 }
5560 }
5561 return 1;
5562 }
5563
5564 static int
5565 check_word_reg (void)
5566 {
5567 int op;
5568 for (op = i.operands; --op >= 0;)
5569 /* Reject eight bit registers, except where the template requires
5570 them. (eg. movzb) */
5571 if (i.types[op].bitfield.reg8
5572 && (i.tm.operand_types[op].bitfield.reg16
5573 || i.tm.operand_types[op].bitfield.reg32
5574 || i.tm.operand_types[op].bitfield.acc))
5575 {
5576 as_bad (_("`%s%s' not allowed with `%s%c'"),
5577 register_prefix,
5578 i.op[op].regs->reg_name,
5579 i.tm.name,
5580 i.suffix);
5581 return 0;
5582 }
5583 /* Warn if the e or r prefix on a general reg is present. */
5584 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5585 && (i.types[op].bitfield.reg32
5586 || i.types[op].bitfield.reg64)
5587 && (i.tm.operand_types[op].bitfield.reg16
5588 || i.tm.operand_types[op].bitfield.acc))
5589 {
5590 /* Prohibit these changes in the 64bit mode, since the
5591 lowering is more complicated. */
5592 if (flag_code == CODE_64BIT)
5593 {
5594 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5595 register_prefix, i.op[op].regs->reg_name,
5596 i.suffix);
5597 return 0;
5598 }
5599 #if REGISTER_WARNINGS
5600 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5601 register_prefix,
5602 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5603 register_prefix, i.op[op].regs->reg_name, i.suffix);
5604 #endif
5605 }
5606 return 1;
5607 }
5608
5609 static int
5610 update_imm (unsigned int j)
5611 {
5612 i386_operand_type overlap = i.types[j];
5613 if ((overlap.bitfield.imm8
5614 || overlap.bitfield.imm8s
5615 || overlap.bitfield.imm16
5616 || overlap.bitfield.imm32
5617 || overlap.bitfield.imm32s
5618 || overlap.bitfield.imm64)
5619 && !operand_type_equal (&overlap, &imm8)
5620 && !operand_type_equal (&overlap, &imm8s)
5621 && !operand_type_equal (&overlap, &imm16)
5622 && !operand_type_equal (&overlap, &imm32)
5623 && !operand_type_equal (&overlap, &imm32s)
5624 && !operand_type_equal (&overlap, &imm64))
5625 {
5626 if (i.suffix)
5627 {
5628 i386_operand_type temp;
5629
5630 operand_type_set (&temp, 0);
5631 if (i.suffix == BYTE_MNEM_SUFFIX)
5632 {
5633 temp.bitfield.imm8 = overlap.bitfield.imm8;
5634 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5635 }
5636 else if (i.suffix == WORD_MNEM_SUFFIX)
5637 temp.bitfield.imm16 = overlap.bitfield.imm16;
5638 else if (i.suffix == QWORD_MNEM_SUFFIX)
5639 {
5640 temp.bitfield.imm64 = overlap.bitfield.imm64;
5641 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5642 }
5643 else
5644 temp.bitfield.imm32 = overlap.bitfield.imm32;
5645 overlap = temp;
5646 }
5647 else if (operand_type_equal (&overlap, &imm16_32_32s)
5648 || operand_type_equal (&overlap, &imm16_32)
5649 || operand_type_equal (&overlap, &imm16_32s))
5650 {
5651 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5652 overlap = imm16;
5653 else
5654 overlap = imm32s;
5655 }
5656 if (!operand_type_equal (&overlap, &imm8)
5657 && !operand_type_equal (&overlap, &imm8s)
5658 && !operand_type_equal (&overlap, &imm16)
5659 && !operand_type_equal (&overlap, &imm32)
5660 && !operand_type_equal (&overlap, &imm32s)
5661 && !operand_type_equal (&overlap, &imm64))
5662 {
5663 as_bad (_("no instruction mnemonic suffix given; "
5664 "can't determine immediate size"));
5665 return 0;
5666 }
5667 }
5668 i.types[j] = overlap;
5669
5670 return 1;
5671 }
5672
5673 static int
5674 finalize_imm (void)
5675 {
5676 unsigned int j, n;
5677
5678 /* Update the first 2 immediate operands. */
5679 n = i.operands > 2 ? 2 : i.operands;
5680 if (n)
5681 {
5682 for (j = 0; j < n; j++)
5683 if (update_imm (j) == 0)
5684 return 0;
5685
5686 /* The 3rd operand can't be immediate operand. */
5687 gas_assert (operand_type_check (i.types[2], imm) == 0);
5688 }
5689
5690 return 1;
5691 }
5692
5693 static int
5694 bad_implicit_operand (int xmm)
5695 {
5696 const char *ireg = xmm ? "xmm0" : "ymm0";
5697
5698 if (intel_syntax)
5699 as_bad (_("the last operand of `%s' must be `%s%s'"),
5700 i.tm.name, register_prefix, ireg);
5701 else
5702 as_bad (_("the first operand of `%s' must be `%s%s'"),
5703 i.tm.name, register_prefix, ireg);
5704 return 0;
5705 }
5706
5707 static int
5708 process_operands (void)
5709 {
5710 /* Default segment register this instruction will use for memory
5711 accesses. 0 means unknown. This is only for optimizing out
5712 unnecessary segment overrides. */
5713 const seg_entry *default_seg = 0;
5714
5715 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5716 {
5717 unsigned int dupl = i.operands;
5718 unsigned int dest = dupl - 1;
5719 unsigned int j;
5720
5721 /* The destination must be an xmm register. */
5722 gas_assert (i.reg_operands
5723 && MAX_OPERANDS > dupl
5724 && operand_type_equal (&i.types[dest], &regxmm));
5725
5726 if (i.tm.opcode_modifier.firstxmm0)
5727 {
5728 /* The first operand is implicit and must be xmm0. */
5729 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5730 if (register_number (i.op[0].regs) != 0)
5731 return bad_implicit_operand (1);
5732
5733 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5734 {
5735 /* Keep xmm0 for instructions with VEX prefix and 3
5736 sources. */
5737 goto duplicate;
5738 }
5739 else
5740 {
5741 /* We remove the first xmm0 and keep the number of
5742 operands unchanged, which in fact duplicates the
5743 destination. */
5744 for (j = 1; j < i.operands; j++)
5745 {
5746 i.op[j - 1] = i.op[j];
5747 i.types[j - 1] = i.types[j];
5748 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5749 }
5750 }
5751 }
5752 else if (i.tm.opcode_modifier.implicit1stxmm0)
5753 {
5754 gas_assert ((MAX_OPERANDS - 1) > dupl
5755 && (i.tm.opcode_modifier.vexsources
5756 == VEX3SOURCES));
5757
5758 /* Add the implicit xmm0 for instructions with VEX prefix
5759 and 3 sources. */
5760 for (j = i.operands; j > 0; j--)
5761 {
5762 i.op[j] = i.op[j - 1];
5763 i.types[j] = i.types[j - 1];
5764 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5765 }
5766 i.op[0].regs
5767 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5768 i.types[0] = regxmm;
5769 i.tm.operand_types[0] = regxmm;
5770
5771 i.operands += 2;
5772 i.reg_operands += 2;
5773 i.tm.operands += 2;
5774
5775 dupl++;
5776 dest++;
5777 i.op[dupl] = i.op[dest];
5778 i.types[dupl] = i.types[dest];
5779 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5780 }
5781 else
5782 {
5783 duplicate:
5784 i.operands++;
5785 i.reg_operands++;
5786 i.tm.operands++;
5787
5788 i.op[dupl] = i.op[dest];
5789 i.types[dupl] = i.types[dest];
5790 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5791 }
5792
5793 if (i.tm.opcode_modifier.immext)
5794 process_immext ();
5795 }
5796 else if (i.tm.opcode_modifier.firstxmm0)
5797 {
5798 unsigned int j;
5799
5800 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5801 gas_assert (i.reg_operands
5802 && (operand_type_equal (&i.types[0], &regxmm)
5803 || operand_type_equal (&i.types[0], &regymm)
5804 || operand_type_equal (&i.types[0], &regzmm)));
5805 if (register_number (i.op[0].regs) != 0)
5806 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5807
5808 for (j = 1; j < i.operands; j++)
5809 {
5810 i.op[j - 1] = i.op[j];
5811 i.types[j - 1] = i.types[j];
5812
5813 /* We need to adjust fields in i.tm since they are used by
5814 build_modrm_byte. */
5815 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5816 }
5817
5818 i.operands--;
5819 i.reg_operands--;
5820 i.tm.operands--;
5821 }
5822 else if (i.tm.opcode_modifier.regkludge)
5823 {
5824 /* The imul $imm, %reg instruction is converted into
5825 imul $imm, %reg, %reg, and the clr %reg instruction
5826 is converted into xor %reg, %reg. */
5827
5828 unsigned int first_reg_op;
5829
5830 if (operand_type_check (i.types[0], reg))
5831 first_reg_op = 0;
5832 else
5833 first_reg_op = 1;
5834 /* Pretend we saw the extra register operand. */
5835 gas_assert (i.reg_operands == 1
5836 && i.op[first_reg_op + 1].regs == 0);
5837 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5838 i.types[first_reg_op + 1] = i.types[first_reg_op];
5839 i.operands++;
5840 i.reg_operands++;
5841 }
5842
5843 if (i.tm.opcode_modifier.shortform)
5844 {
5845 if (i.types[0].bitfield.sreg2
5846 || i.types[0].bitfield.sreg3)
5847 {
5848 if (i.tm.base_opcode == POP_SEG_SHORT
5849 && i.op[0].regs->reg_num == 1)
5850 {
5851 as_bad (_("you can't `pop %scs'"), register_prefix);
5852 return 0;
5853 }
5854 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5855 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5856 i.rex |= REX_B;
5857 }
5858 else
5859 {
5860 /* The register or float register operand is in operand
5861 0 or 1. */
5862 unsigned int op;
5863
5864 if (i.types[0].bitfield.floatreg
5865 || operand_type_check (i.types[0], reg))
5866 op = 0;
5867 else
5868 op = 1;
5869 /* Register goes in low 3 bits of opcode. */
5870 i.tm.base_opcode |= i.op[op].regs->reg_num;
5871 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5872 i.rex |= REX_B;
5873 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5874 {
5875 /* Warn about some common errors, but press on regardless.
5876 The first case can be generated by gcc (<= 2.8.1). */
5877 if (i.operands == 2)
5878 {
5879 /* Reversed arguments on faddp, fsubp, etc. */
5880 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5881 register_prefix, i.op[!intel_syntax].regs->reg_name,
5882 register_prefix, i.op[intel_syntax].regs->reg_name);
5883 }
5884 else
5885 {
5886 /* Extraneous `l' suffix on fp insn. */
5887 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5888 register_prefix, i.op[0].regs->reg_name);
5889 }
5890 }
5891 }
5892 }
5893 else if (i.tm.opcode_modifier.modrm)
5894 {
5895 /* The opcode is completed (modulo i.tm.extension_opcode which
5896 must be put into the modrm byte). Now, we make the modrm and
5897 index base bytes based on all the info we've collected. */
5898
5899 default_seg = build_modrm_byte ();
5900 }
5901 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5902 {
5903 default_seg = &ds;
5904 }
5905 else if (i.tm.opcode_modifier.isstring)
5906 {
5907 /* For the string instructions that allow a segment override
5908 on one of their operands, the default segment is ds. */
5909 default_seg = &ds;
5910 }
5911
5912 if (i.tm.base_opcode == 0x8d /* lea */
5913 && i.seg[0]
5914 && !quiet_warnings)
5915 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5916
5917 /* If a segment was explicitly specified, and the specified segment
5918 is not the default, use an opcode prefix to select it. If we
5919 never figured out what the default segment is, then default_seg
5920 will be zero at this point, and the specified segment prefix will
5921 always be used. */
5922 if ((i.seg[0]) && (i.seg[0] != default_seg))
5923 {
5924 if (!add_prefix (i.seg[0]->seg_prefix))
5925 return 0;
5926 }
5927 return 1;
5928 }
5929
5930 static const seg_entry *
5931 build_modrm_byte (void)
5932 {
5933 const seg_entry *default_seg = 0;
5934 unsigned int source, dest;
5935 int vex_3_sources;
5936
5937 /* The first operand of instructions with VEX prefix and 3 sources
5938 must be VEX_Imm4. */
5939 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5940 if (vex_3_sources)
5941 {
5942 unsigned int nds, reg_slot;
5943 expressionS *exp;
5944
5945 if (i.tm.opcode_modifier.veximmext
5946 && i.tm.opcode_modifier.immext)
5947 {
5948 dest = i.operands - 2;
5949 gas_assert (dest == 3);
5950 }
5951 else
5952 dest = i.operands - 1;
5953 nds = dest - 1;
5954
5955 /* There are 2 kinds of instructions:
5956 1. 5 operands: 4 register operands or 3 register operands
5957 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5958 VexW0 or VexW1. The destination must be either XMM, YMM or
5959 ZMM register.
5960 2. 4 operands: 4 register operands or 3 register operands
5961 plus 1 memory operand, VexXDS, and VexImmExt */
5962 gas_assert ((i.reg_operands == 4
5963 || (i.reg_operands == 3 && i.mem_operands == 1))
5964 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5965 && (i.tm.opcode_modifier.veximmext
5966 || (i.imm_operands == 1
5967 && i.types[0].bitfield.vec_imm4
5968 && (i.tm.opcode_modifier.vexw == VEXW0
5969 || i.tm.opcode_modifier.vexw == VEXW1)
5970 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5971 || operand_type_equal (&i.tm.operand_types[dest], &regymm)
5972 || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
5973
5974 if (i.imm_operands == 0)
5975 {
5976 /* When there is no immediate operand, generate an 8bit
5977 immediate operand to encode the first operand. */
5978 exp = &im_expressions[i.imm_operands++];
5979 i.op[i.operands].imms = exp;
5980 i.types[i.operands] = imm8;
5981 i.operands++;
5982 /* If VexW1 is set, the first operand is the source and
5983 the second operand is encoded in the immediate operand. */
5984 if (i.tm.opcode_modifier.vexw == VEXW1)
5985 {
5986 source = 0;
5987 reg_slot = 1;
5988 }
5989 else
5990 {
5991 source = 1;
5992 reg_slot = 0;
5993 }
5994
5995 /* FMA swaps REG and NDS. */
5996 if (i.tm.cpu_flags.bitfield.cpufma)
5997 {
5998 unsigned int tmp;
5999 tmp = reg_slot;
6000 reg_slot = nds;
6001 nds = tmp;
6002 }
6003
6004 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6005 &regxmm)
6006 || operand_type_equal (&i.tm.operand_types[reg_slot],
6007 &regymm)
6008 || operand_type_equal (&i.tm.operand_types[reg_slot],
6009 &regzmm));
6010 exp->X_op = O_constant;
6011 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
6012 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6013 }
6014 else
6015 {
6016 unsigned int imm_slot;
6017
6018 if (i.tm.opcode_modifier.vexw == VEXW0)
6019 {
6020 /* If VexW0 is set, the third operand is the source and
6021 the second operand is encoded in the immediate
6022 operand. */
6023 source = 2;
6024 reg_slot = 1;
6025 }
6026 else
6027 {
6028 /* VexW1 is set, the second operand is the source and
6029 the third operand is encoded in the immediate
6030 operand. */
6031 source = 1;
6032 reg_slot = 2;
6033 }
6034
6035 if (i.tm.opcode_modifier.immext)
6036 {
6037 /* When ImmExt is set, the immdiate byte is the last
6038 operand. */
6039 imm_slot = i.operands - 1;
6040 source--;
6041 reg_slot--;
6042 }
6043 else
6044 {
6045 imm_slot = 0;
6046
6047 /* Turn on Imm8 so that output_imm will generate it. */
6048 i.types[imm_slot].bitfield.imm8 = 1;
6049 }
6050
6051 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6052 &regxmm)
6053 || operand_type_equal (&i.tm.operand_types[reg_slot],
6054 &regymm)
6055 || operand_type_equal (&i.tm.operand_types[reg_slot],
6056 &regzmm));
6057 i.op[imm_slot].imms->X_add_number
6058 |= register_number (i.op[reg_slot].regs) << 4;
6059 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6060 }
6061
6062 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
6063 || operand_type_equal (&i.tm.operand_types[nds],
6064 &regymm)
6065 || operand_type_equal (&i.tm.operand_types[nds],
6066 &regzmm));
6067 i.vex.register_specifier = i.op[nds].regs;
6068 }
6069 else
6070 source = dest = 0;
6071
6072 /* i.reg_operands MUST be the number of real register operands;
6073 implicit registers do not count. If there are 3 register
6074 operands, it must be a instruction with VexNDS. For a
6075 instruction with VexNDD, the destination register is encoded
6076 in VEX prefix. If there are 4 register operands, it must be
6077 a instruction with VEX prefix and 3 sources. */
6078 if (i.mem_operands == 0
6079 && ((i.reg_operands == 2
6080 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6081 || (i.reg_operands == 3
6082 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6083 || (i.reg_operands == 4 && vex_3_sources)))
6084 {
6085 switch (i.operands)
6086 {
6087 case 2:
6088 source = 0;
6089 break;
6090 case 3:
6091 /* When there are 3 operands, one of them may be immediate,
6092 which may be the first or the last operand. Otherwise,
6093 the first operand must be shift count register (cl) or it
6094 is an instruction with VexNDS. */
6095 gas_assert (i.imm_operands == 1
6096 || (i.imm_operands == 0
6097 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6098 || i.types[0].bitfield.shiftcount)));
6099 if (operand_type_check (i.types[0], imm)
6100 || i.types[0].bitfield.shiftcount)
6101 source = 1;
6102 else
6103 source = 0;
6104 break;
6105 case 4:
6106 /* When there are 4 operands, the first two must be 8bit
6107 immediate operands. The source operand will be the 3rd
6108 one.
6109
6110 For instructions with VexNDS, if the first operand
6111 an imm8, the source operand is the 2nd one. If the last
6112 operand is imm8, the source operand is the first one. */
6113 gas_assert ((i.imm_operands == 2
6114 && i.types[0].bitfield.imm8
6115 && i.types[1].bitfield.imm8)
6116 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6117 && i.imm_operands == 1
6118 && (i.types[0].bitfield.imm8
6119 || i.types[i.operands - 1].bitfield.imm8
6120 || i.rounding)));
6121 if (i.imm_operands == 2)
6122 source = 2;
6123 else
6124 {
6125 if (i.types[0].bitfield.imm8)
6126 source = 1;
6127 else
6128 source = 0;
6129 }
6130 break;
6131 case 5:
6132 if (i.tm.opcode_modifier.evex)
6133 {
6134 /* For EVEX instructions, when there are 5 operands, the
6135 first one must be immediate operand. If the second one
6136 is immediate operand, the source operand is the 3th
6137 one. If the last one is immediate operand, the source
6138 operand is the 2nd one. */
6139 gas_assert (i.imm_operands == 2
6140 && i.tm.opcode_modifier.sae
6141 && operand_type_check (i.types[0], imm));
6142 if (operand_type_check (i.types[1], imm))
6143 source = 2;
6144 else if (operand_type_check (i.types[4], imm))
6145 source = 1;
6146 else
6147 abort ();
6148 }
6149 break;
6150 default:
6151 abort ();
6152 }
6153
6154 if (!vex_3_sources)
6155 {
6156 dest = source + 1;
6157
6158 /* RC/SAE operand could be between DEST and SRC. That happens
6159 when one operand is GPR and the other one is XMM/YMM/ZMM
6160 register. */
6161 if (i.rounding && i.rounding->operand == (int) dest)
6162 dest++;
6163
6164 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6165 {
6166 /* For instructions with VexNDS, the register-only source
6167 operand must be 32/64bit integer, XMM, YMM or ZMM
6168 register. It is encoded in VEX prefix. We need to
6169 clear RegMem bit before calling operand_type_equal. */
6170
6171 i386_operand_type op;
6172 unsigned int vvvv;
6173
6174 /* Check register-only source operand when two source
6175 operands are swapped. */
6176 if (!i.tm.operand_types[source].bitfield.baseindex
6177 && i.tm.operand_types[dest].bitfield.baseindex)
6178 {
6179 vvvv = source;
6180 source = dest;
6181 }
6182 else
6183 vvvv = dest;
6184
6185 op = i.tm.operand_types[vvvv];
6186 op.bitfield.regmem = 0;
6187 if ((dest + 1) >= i.operands
6188 || (!op.bitfield.reg32
6189 && op.bitfield.reg64
6190 && !operand_type_equal (&op, &regxmm)
6191 && !operand_type_equal (&op, &regymm)
6192 && !operand_type_equal (&op, &regzmm)
6193 && !operand_type_equal (&op, &regmask)))
6194 abort ();
6195 i.vex.register_specifier = i.op[vvvv].regs;
6196 dest++;
6197 }
6198 }
6199
6200 i.rm.mode = 3;
6201 /* One of the register operands will be encoded in the i.tm.reg
6202 field, the other in the combined i.tm.mode and i.tm.regmem
6203 fields. If no form of this instruction supports a memory
6204 destination operand, then we assume the source operand may
6205 sometimes be a memory operand and so we need to store the
6206 destination in the i.rm.reg field. */
6207 if (!i.tm.operand_types[dest].bitfield.regmem
6208 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6209 {
6210 i.rm.reg = i.op[dest].regs->reg_num;
6211 i.rm.regmem = i.op[source].regs->reg_num;
6212 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6213 i.rex |= REX_R;
6214 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6215 i.vrex |= REX_R;
6216 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6217 i.rex |= REX_B;
6218 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6219 i.vrex |= REX_B;
6220 }
6221 else
6222 {
6223 i.rm.reg = i.op[source].regs->reg_num;
6224 i.rm.regmem = i.op[dest].regs->reg_num;
6225 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6226 i.rex |= REX_B;
6227 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6228 i.vrex |= REX_B;
6229 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6230 i.rex |= REX_R;
6231 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6232 i.vrex |= REX_R;
6233 }
6234 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6235 {
6236 if (!i.types[0].bitfield.control
6237 && !i.types[1].bitfield.control)
6238 abort ();
6239 i.rex &= ~(REX_R | REX_B);
6240 add_prefix (LOCK_PREFIX_OPCODE);
6241 }
6242 }
6243 else
6244 { /* If it's not 2 reg operands... */
6245 unsigned int mem;
6246
6247 if (i.mem_operands)
6248 {
6249 unsigned int fake_zero_displacement = 0;
6250 unsigned int op;
6251
6252 for (op = 0; op < i.operands; op++)
6253 if (operand_type_check (i.types[op], anymem))
6254 break;
6255 gas_assert (op < i.operands);
6256
6257 if (i.tm.opcode_modifier.vecsib)
6258 {
6259 if (i.index_reg->reg_num == RegEiz
6260 || i.index_reg->reg_num == RegRiz)
6261 abort ();
6262
6263 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6264 if (!i.base_reg)
6265 {
6266 i.sib.base = NO_BASE_REGISTER;
6267 i.sib.scale = i.log2_scale_factor;
6268 /* No Vec_Disp8 if there is no base. */
6269 i.types[op].bitfield.vec_disp8 = 0;
6270 i.types[op].bitfield.disp8 = 0;
6271 i.types[op].bitfield.disp16 = 0;
6272 i.types[op].bitfield.disp64 = 0;
6273 if (flag_code != CODE_64BIT)
6274 {
6275 /* Must be 32 bit */
6276 i.types[op].bitfield.disp32 = 1;
6277 i.types[op].bitfield.disp32s = 0;
6278 }
6279 else
6280 {
6281 i.types[op].bitfield.disp32 = 0;
6282 i.types[op].bitfield.disp32s = 1;
6283 }
6284 }
6285 i.sib.index = i.index_reg->reg_num;
6286 if ((i.index_reg->reg_flags & RegRex) != 0)
6287 i.rex |= REX_X;
6288 if ((i.index_reg->reg_flags & RegVRex) != 0)
6289 i.vrex |= REX_X;
6290 }
6291
6292 default_seg = &ds;
6293
6294 if (i.base_reg == 0)
6295 {
6296 i.rm.mode = 0;
6297 if (!i.disp_operands)
6298 {
6299 fake_zero_displacement = 1;
6300 /* Instructions with VSIB byte need 32bit displacement
6301 if there is no base register. */
6302 if (i.tm.opcode_modifier.vecsib)
6303 i.types[op].bitfield.disp32 = 1;
6304 }
6305 if (i.index_reg == 0)
6306 {
6307 gas_assert (!i.tm.opcode_modifier.vecsib);
6308 /* Operand is just <disp> */
6309 if (flag_code == CODE_64BIT)
6310 {
6311 /* 64bit mode overwrites the 32bit absolute
6312 addressing by RIP relative addressing and
6313 absolute addressing is encoded by one of the
6314 redundant SIB forms. */
6315 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6316 i.sib.base = NO_BASE_REGISTER;
6317 i.sib.index = NO_INDEX_REGISTER;
6318 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6319 ? disp32s : disp32);
6320 }
6321 else if ((flag_code == CODE_16BIT)
6322 ^ (i.prefix[ADDR_PREFIX] != 0))
6323 {
6324 i.rm.regmem = NO_BASE_REGISTER_16;
6325 i.types[op] = disp16;
6326 }
6327 else
6328 {
6329 i.rm.regmem = NO_BASE_REGISTER;
6330 i.types[op] = disp32;
6331 }
6332 }
6333 else if (!i.tm.opcode_modifier.vecsib)
6334 {
6335 /* !i.base_reg && i.index_reg */
6336 if (i.index_reg->reg_num == RegEiz
6337 || i.index_reg->reg_num == RegRiz)
6338 i.sib.index = NO_INDEX_REGISTER;
6339 else
6340 i.sib.index = i.index_reg->reg_num;
6341 i.sib.base = NO_BASE_REGISTER;
6342 i.sib.scale = i.log2_scale_factor;
6343 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6344 /* No Vec_Disp8 if there is no base. */
6345 i.types[op].bitfield.vec_disp8 = 0;
6346 i.types[op].bitfield.disp8 = 0;
6347 i.types[op].bitfield.disp16 = 0;
6348 i.types[op].bitfield.disp64 = 0;
6349 if (flag_code != CODE_64BIT)
6350 {
6351 /* Must be 32 bit */
6352 i.types[op].bitfield.disp32 = 1;
6353 i.types[op].bitfield.disp32s = 0;
6354 }
6355 else
6356 {
6357 i.types[op].bitfield.disp32 = 0;
6358 i.types[op].bitfield.disp32s = 1;
6359 }
6360 if ((i.index_reg->reg_flags & RegRex) != 0)
6361 i.rex |= REX_X;
6362 }
6363 }
6364 /* RIP addressing for 64bit mode. */
6365 else if (i.base_reg->reg_num == RegRip ||
6366 i.base_reg->reg_num == RegEip)
6367 {
6368 gas_assert (!i.tm.opcode_modifier.vecsib);
6369 i.rm.regmem = NO_BASE_REGISTER;
6370 i.types[op].bitfield.disp8 = 0;
6371 i.types[op].bitfield.disp16 = 0;
6372 i.types[op].bitfield.disp32 = 0;
6373 i.types[op].bitfield.disp32s = 1;
6374 i.types[op].bitfield.disp64 = 0;
6375 i.types[op].bitfield.vec_disp8 = 0;
6376 i.flags[op] |= Operand_PCrel;
6377 if (! i.disp_operands)
6378 fake_zero_displacement = 1;
6379 }
6380 else if (i.base_reg->reg_type.bitfield.reg16)
6381 {
6382 gas_assert (!i.tm.opcode_modifier.vecsib);
6383 switch (i.base_reg->reg_num)
6384 {
6385 case 3: /* (%bx) */
6386 if (i.index_reg == 0)
6387 i.rm.regmem = 7;
6388 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6389 i.rm.regmem = i.index_reg->reg_num - 6;
6390 break;
6391 case 5: /* (%bp) */
6392 default_seg = &ss;
6393 if (i.index_reg == 0)
6394 {
6395 i.rm.regmem = 6;
6396 if (operand_type_check (i.types[op], disp) == 0)
6397 {
6398 /* fake (%bp) into 0(%bp) */
6399 if (i.tm.operand_types[op].bitfield.vec_disp8)
6400 i.types[op].bitfield.vec_disp8 = 1;
6401 else
6402 i.types[op].bitfield.disp8 = 1;
6403 fake_zero_displacement = 1;
6404 }
6405 }
6406 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6407 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6408 break;
6409 default: /* (%si) -> 4 or (%di) -> 5 */
6410 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6411 }
6412 i.rm.mode = mode_from_disp_size (i.types[op]);
6413 }
6414 else /* i.base_reg and 32/64 bit mode */
6415 {
6416 if (flag_code == CODE_64BIT
6417 && operand_type_check (i.types[op], disp))
6418 {
6419 i386_operand_type temp;
6420 operand_type_set (&temp, 0);
6421 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6422 temp.bitfield.vec_disp8
6423 = i.types[op].bitfield.vec_disp8;
6424 i.types[op] = temp;
6425 if (i.prefix[ADDR_PREFIX] == 0)
6426 i.types[op].bitfield.disp32s = 1;
6427 else
6428 i.types[op].bitfield.disp32 = 1;
6429 }
6430
6431 if (!i.tm.opcode_modifier.vecsib)
6432 i.rm.regmem = i.base_reg->reg_num;
6433 if ((i.base_reg->reg_flags & RegRex) != 0)
6434 i.rex |= REX_B;
6435 i.sib.base = i.base_reg->reg_num;
6436 /* x86-64 ignores REX prefix bit here to avoid decoder
6437 complications. */
6438 if (!(i.base_reg->reg_flags & RegRex)
6439 && (i.base_reg->reg_num == EBP_REG_NUM
6440 || i.base_reg->reg_num == ESP_REG_NUM))
6441 default_seg = &ss;
6442 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6443 {
6444 fake_zero_displacement = 1;
6445 if (i.tm.operand_types [op].bitfield.vec_disp8)
6446 i.types[op].bitfield.vec_disp8 = 1;
6447 else
6448 i.types[op].bitfield.disp8 = 1;
6449 }
6450 i.sib.scale = i.log2_scale_factor;
6451 if (i.index_reg == 0)
6452 {
6453 gas_assert (!i.tm.opcode_modifier.vecsib);
6454 /* <disp>(%esp) becomes two byte modrm with no index
6455 register. We've already stored the code for esp
6456 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6457 Any base register besides %esp will not use the
6458 extra modrm byte. */
6459 i.sib.index = NO_INDEX_REGISTER;
6460 }
6461 else if (!i.tm.opcode_modifier.vecsib)
6462 {
6463 if (i.index_reg->reg_num == RegEiz
6464 || i.index_reg->reg_num == RegRiz)
6465 i.sib.index = NO_INDEX_REGISTER;
6466 else
6467 i.sib.index = i.index_reg->reg_num;
6468 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6469 if ((i.index_reg->reg_flags & RegRex) != 0)
6470 i.rex |= REX_X;
6471 }
6472
6473 if (i.disp_operands
6474 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6475 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6476 i.rm.mode = 0;
6477 else
6478 {
6479 if (!fake_zero_displacement
6480 && !i.disp_operands
6481 && i.disp_encoding)
6482 {
6483 fake_zero_displacement = 1;
6484 if (i.disp_encoding == disp_encoding_8bit)
6485 i.types[op].bitfield.disp8 = 1;
6486 else
6487 i.types[op].bitfield.disp32 = 1;
6488 }
6489 i.rm.mode = mode_from_disp_size (i.types[op]);
6490 }
6491 }
6492
6493 if (fake_zero_displacement)
6494 {
6495 /* Fakes a zero displacement assuming that i.types[op]
6496 holds the correct displacement size. */
6497 expressionS *exp;
6498
6499 gas_assert (i.op[op].disps == 0);
6500 exp = &disp_expressions[i.disp_operands++];
6501 i.op[op].disps = exp;
6502 exp->X_op = O_constant;
6503 exp->X_add_number = 0;
6504 exp->X_add_symbol = (symbolS *) 0;
6505 exp->X_op_symbol = (symbolS *) 0;
6506 }
6507
6508 mem = op;
6509 }
6510 else
6511 mem = ~0;
6512
6513 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6514 {
6515 if (operand_type_check (i.types[0], imm))
6516 i.vex.register_specifier = NULL;
6517 else
6518 {
6519 /* VEX.vvvv encodes one of the sources when the first
6520 operand is not an immediate. */
6521 if (i.tm.opcode_modifier.vexw == VEXW0)
6522 i.vex.register_specifier = i.op[0].regs;
6523 else
6524 i.vex.register_specifier = i.op[1].regs;
6525 }
6526
6527 /* Destination is a XMM register encoded in the ModRM.reg
6528 and VEX.R bit. */
6529 i.rm.reg = i.op[2].regs->reg_num;
6530 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6531 i.rex |= REX_R;
6532
6533 /* ModRM.rm and VEX.B encodes the other source. */
6534 if (!i.mem_operands)
6535 {
6536 i.rm.mode = 3;
6537
6538 if (i.tm.opcode_modifier.vexw == VEXW0)
6539 i.rm.regmem = i.op[1].regs->reg_num;
6540 else
6541 i.rm.regmem = i.op[0].regs->reg_num;
6542
6543 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6544 i.rex |= REX_B;
6545 }
6546 }
6547 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6548 {
6549 i.vex.register_specifier = i.op[2].regs;
6550 if (!i.mem_operands)
6551 {
6552 i.rm.mode = 3;
6553 i.rm.regmem = i.op[1].regs->reg_num;
6554 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6555 i.rex |= REX_B;
6556 }
6557 }
6558 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6559 (if any) based on i.tm.extension_opcode. Again, we must be
6560 careful to make sure that segment/control/debug/test/MMX
6561 registers are coded into the i.rm.reg field. */
6562 else if (i.reg_operands)
6563 {
6564 unsigned int op;
6565 unsigned int vex_reg = ~0;
6566
6567 for (op = 0; op < i.operands; op++)
6568 if (i.types[op].bitfield.reg8
6569 || i.types[op].bitfield.reg16
6570 || i.types[op].bitfield.reg32
6571 || i.types[op].bitfield.reg64
6572 || i.types[op].bitfield.regmmx
6573 || i.types[op].bitfield.regxmm
6574 || i.types[op].bitfield.regymm
6575 || i.types[op].bitfield.regbnd
6576 || i.types[op].bitfield.regzmm
6577 || i.types[op].bitfield.regmask
6578 || i.types[op].bitfield.sreg2
6579 || i.types[op].bitfield.sreg3
6580 || i.types[op].bitfield.control
6581 || i.types[op].bitfield.debug
6582 || i.types[op].bitfield.test)
6583 break;
6584
6585 if (vex_3_sources)
6586 op = dest;
6587 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6588 {
6589 /* For instructions with VexNDS, the register-only
6590 source operand is encoded in VEX prefix. */
6591 gas_assert (mem != (unsigned int) ~0);
6592
6593 if (op > mem)
6594 {
6595 vex_reg = op++;
6596 gas_assert (op < i.operands);
6597 }
6598 else
6599 {
6600 /* Check register-only source operand when two source
6601 operands are swapped. */
6602 if (!i.tm.operand_types[op].bitfield.baseindex
6603 && i.tm.operand_types[op + 1].bitfield.baseindex)
6604 {
6605 vex_reg = op;
6606 op += 2;
6607 gas_assert (mem == (vex_reg + 1)
6608 && op < i.operands);
6609 }
6610 else
6611 {
6612 vex_reg = op + 1;
6613 gas_assert (vex_reg < i.operands);
6614 }
6615 }
6616 }
6617 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6618 {
6619 /* For instructions with VexNDD, the register destination
6620 is encoded in VEX prefix. */
6621 if (i.mem_operands == 0)
6622 {
6623 /* There is no memory operand. */
6624 gas_assert ((op + 2) == i.operands);
6625 vex_reg = op + 1;
6626 }
6627 else
6628 {
6629 /* There are only 2 operands. */
6630 gas_assert (op < 2 && i.operands == 2);
6631 vex_reg = 1;
6632 }
6633 }
6634 else
6635 gas_assert (op < i.operands);
6636
6637 if (vex_reg != (unsigned int) ~0)
6638 {
6639 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6640
6641 if (type->bitfield.reg32 != 1
6642 && type->bitfield.reg64 != 1
6643 && !operand_type_equal (type, &regxmm)
6644 && !operand_type_equal (type, &regymm)
6645 && !operand_type_equal (type, &regzmm)
6646 && !operand_type_equal (type, &regmask))
6647 abort ();
6648
6649 i.vex.register_specifier = i.op[vex_reg].regs;
6650 }
6651
6652 /* Don't set OP operand twice. */
6653 if (vex_reg != op)
6654 {
6655 /* If there is an extension opcode to put here, the
6656 register number must be put into the regmem field. */
6657 if (i.tm.extension_opcode != None)
6658 {
6659 i.rm.regmem = i.op[op].regs->reg_num;
6660 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6661 i.rex |= REX_B;
6662 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6663 i.vrex |= REX_B;
6664 }
6665 else
6666 {
6667 i.rm.reg = i.op[op].regs->reg_num;
6668 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6669 i.rex |= REX_R;
6670 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6671 i.vrex |= REX_R;
6672 }
6673 }
6674
6675 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6676 must set it to 3 to indicate this is a register operand
6677 in the regmem field. */
6678 if (!i.mem_operands)
6679 i.rm.mode = 3;
6680 }
6681
6682 /* Fill in i.rm.reg field with extension opcode (if any). */
6683 if (i.tm.extension_opcode != None)
6684 i.rm.reg = i.tm.extension_opcode;
6685 }
6686 return default_seg;
6687 }
6688
6689 static void
6690 output_branch (void)
6691 {
6692 char *p;
6693 int size;
6694 int code16;
6695 int prefix;
6696 relax_substateT subtype;
6697 symbolS *sym;
6698 offsetT off;
6699
6700 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6701 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6702
6703 prefix = 0;
6704 if (i.prefix[DATA_PREFIX] != 0)
6705 {
6706 prefix = 1;
6707 i.prefixes -= 1;
6708 code16 ^= CODE16;
6709 }
6710 /* Pentium4 branch hints. */
6711 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6712 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6713 {
6714 prefix++;
6715 i.prefixes--;
6716 }
6717 if (i.prefix[REX_PREFIX] != 0)
6718 {
6719 prefix++;
6720 i.prefixes--;
6721 }
6722
6723 /* BND prefixed jump. */
6724 if (i.prefix[BND_PREFIX] != 0)
6725 {
6726 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6727 i.prefixes -= 1;
6728 }
6729
6730 if (i.prefixes != 0 && !intel_syntax)
6731 as_warn (_("skipping prefixes on this instruction"));
6732
6733 /* It's always a symbol; End frag & setup for relax.
6734 Make sure there is enough room in this frag for the largest
6735 instruction we may generate in md_convert_frag. This is 2
6736 bytes for the opcode and room for the prefix and largest
6737 displacement. */
6738 frag_grow (prefix + 2 + 4);
6739 /* Prefix and 1 opcode byte go in fr_fix. */
6740 p = frag_more (prefix + 1);
6741 if (i.prefix[DATA_PREFIX] != 0)
6742 *p++ = DATA_PREFIX_OPCODE;
6743 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6744 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6745 *p++ = i.prefix[SEG_PREFIX];
6746 if (i.prefix[REX_PREFIX] != 0)
6747 *p++ = i.prefix[REX_PREFIX];
6748 *p = i.tm.base_opcode;
6749
6750 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6751 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6752 else if (cpu_arch_flags.bitfield.cpui386)
6753 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6754 else
6755 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6756 subtype |= code16;
6757
6758 sym = i.op[0].disps->X_add_symbol;
6759 off = i.op[0].disps->X_add_number;
6760
6761 if (i.op[0].disps->X_op != O_constant
6762 && i.op[0].disps->X_op != O_symbol)
6763 {
6764 /* Handle complex expressions. */
6765 sym = make_expr_symbol (i.op[0].disps);
6766 off = 0;
6767 }
6768
6769 /* 1 possible extra opcode + 4 byte displacement go in var part.
6770 Pass reloc in fr_var. */
6771 frag_var (rs_machine_dependent, 5,
6772 ((!object_64bit
6773 || i.reloc[0] != NO_RELOC
6774 || (i.bnd_prefix == NULL && !add_bnd_prefix))
6775 ? i.reloc[0]
6776 : BFD_RELOC_X86_64_PC32_BND),
6777 subtype, sym, off, p);
6778 }
6779
6780 static void
6781 output_jump (void)
6782 {
6783 char *p;
6784 int size;
6785 fixS *fixP;
6786
6787 if (i.tm.opcode_modifier.jumpbyte)
6788 {
6789 /* This is a loop or jecxz type instruction. */
6790 size = 1;
6791 if (i.prefix[ADDR_PREFIX] != 0)
6792 {
6793 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6794 i.prefixes -= 1;
6795 }
6796 /* Pentium4 branch hints. */
6797 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6798 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6799 {
6800 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6801 i.prefixes--;
6802 }
6803 }
6804 else
6805 {
6806 int code16;
6807
6808 code16 = 0;
6809 if (flag_code == CODE_16BIT)
6810 code16 = CODE16;
6811
6812 if (i.prefix[DATA_PREFIX] != 0)
6813 {
6814 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6815 i.prefixes -= 1;
6816 code16 ^= CODE16;
6817 }
6818
6819 size = 4;
6820 if (code16)
6821 size = 2;
6822 }
6823
6824 if (i.prefix[REX_PREFIX] != 0)
6825 {
6826 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6827 i.prefixes -= 1;
6828 }
6829
6830 /* BND prefixed jump. */
6831 if (i.prefix[BND_PREFIX] != 0)
6832 {
6833 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6834 i.prefixes -= 1;
6835 }
6836
6837 if (i.prefixes != 0 && !intel_syntax)
6838 as_warn (_("skipping prefixes on this instruction"));
6839
6840 p = frag_more (i.tm.opcode_length + size);
6841 switch (i.tm.opcode_length)
6842 {
6843 case 2:
6844 *p++ = i.tm.base_opcode >> 8;
6845 case 1:
6846 *p++ = i.tm.base_opcode;
6847 break;
6848 default:
6849 abort ();
6850 }
6851
6852 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6853 i.op[0].disps, 1, reloc (size, 1, 1,
6854 (i.bnd_prefix != NULL
6855 || add_bnd_prefix),
6856 i.reloc[0]));
6857
6858 /* All jumps handled here are signed, but don't use a signed limit
6859 check for 32 and 16 bit jumps as we want to allow wrap around at
6860 4G and 64k respectively. */
6861 if (size == 1)
6862 fixP->fx_signed = 1;
6863 }
6864
6865 static void
6866 output_interseg_jump (void)
6867 {
6868 char *p;
6869 int size;
6870 int prefix;
6871 int code16;
6872
6873 code16 = 0;
6874 if (flag_code == CODE_16BIT)
6875 code16 = CODE16;
6876
6877 prefix = 0;
6878 if (i.prefix[DATA_PREFIX] != 0)
6879 {
6880 prefix = 1;
6881 i.prefixes -= 1;
6882 code16 ^= CODE16;
6883 }
6884 if (i.prefix[REX_PREFIX] != 0)
6885 {
6886 prefix++;
6887 i.prefixes -= 1;
6888 }
6889
6890 size = 4;
6891 if (code16)
6892 size = 2;
6893
6894 if (i.prefixes != 0 && !intel_syntax)
6895 as_warn (_("skipping prefixes on this instruction"));
6896
6897 /* 1 opcode; 2 segment; offset */
6898 p = frag_more (prefix + 1 + 2 + size);
6899
6900 if (i.prefix[DATA_PREFIX] != 0)
6901 *p++ = DATA_PREFIX_OPCODE;
6902
6903 if (i.prefix[REX_PREFIX] != 0)
6904 *p++ = i.prefix[REX_PREFIX];
6905
6906 *p++ = i.tm.base_opcode;
6907 if (i.op[1].imms->X_op == O_constant)
6908 {
6909 offsetT n = i.op[1].imms->X_add_number;
6910
6911 if (size == 2
6912 && !fits_in_unsigned_word (n)
6913 && !fits_in_signed_word (n))
6914 {
6915 as_bad (_("16-bit jump out of range"));
6916 return;
6917 }
6918 md_number_to_chars (p, n, size);
6919 }
6920 else
6921 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6922 i.op[1].imms, 0, reloc (size, 0, 0, 0, i.reloc[1]));
6923 if (i.op[0].imms->X_op != O_constant)
6924 as_bad (_("can't handle non absolute segment in `%s'"),
6925 i.tm.name);
6926 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6927 }
6928
6929 static void
6930 output_insn (void)
6931 {
6932 fragS *insn_start_frag;
6933 offsetT insn_start_off;
6934
6935 /* Tie dwarf2 debug info to the address at the start of the insn.
6936 We can't do this after the insn has been output as the current
6937 frag may have been closed off. eg. by frag_var. */
6938 dwarf2_emit_insn (0);
6939
6940 insn_start_frag = frag_now;
6941 insn_start_off = frag_now_fix ();
6942
6943 /* Output jumps. */
6944 if (i.tm.opcode_modifier.jump)
6945 output_branch ();
6946 else if (i.tm.opcode_modifier.jumpbyte
6947 || i.tm.opcode_modifier.jumpdword)
6948 output_jump ();
6949 else if (i.tm.opcode_modifier.jumpintersegment)
6950 output_interseg_jump ();
6951 else
6952 {
6953 /* Output normal instructions here. */
6954 char *p;
6955 unsigned char *q;
6956 unsigned int j;
6957 unsigned int prefix;
6958
6959 /* Some processors fail on LOCK prefix. This options makes
6960 assembler ignore LOCK prefix and serves as a workaround. */
6961 if (omit_lock_prefix)
6962 {
6963 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
6964 return;
6965 i.prefix[LOCK_PREFIX] = 0;
6966 }
6967
6968 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6969 don't need the explicit prefix. */
6970 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
6971 {
6972 switch (i.tm.opcode_length)
6973 {
6974 case 3:
6975 if (i.tm.base_opcode & 0xff000000)
6976 {
6977 prefix = (i.tm.base_opcode >> 24) & 0xff;
6978 goto check_prefix;
6979 }
6980 break;
6981 case 2:
6982 if ((i.tm.base_opcode & 0xff0000) != 0)
6983 {
6984 prefix = (i.tm.base_opcode >> 16) & 0xff;
6985 if (i.tm.cpu_flags.bitfield.cpupadlock)
6986 {
6987 check_prefix:
6988 if (prefix != REPE_PREFIX_OPCODE
6989 || (i.prefix[REP_PREFIX]
6990 != REPE_PREFIX_OPCODE))
6991 add_prefix (prefix);
6992 }
6993 else
6994 add_prefix (prefix);
6995 }
6996 break;
6997 case 1:
6998 break;
6999 default:
7000 abort ();
7001 }
7002
7003 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7004 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7005 R_X86_64_GOTTPOFF relocation so that linker can safely
7006 perform IE->LE optimization. */
7007 if (x86_elf_abi == X86_64_X32_ABI
7008 && i.operands == 2
7009 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
7010 && i.prefix[REX_PREFIX] == 0)
7011 add_prefix (REX_OPCODE);
7012 #endif
7013
7014 /* The prefix bytes. */
7015 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
7016 if (*q)
7017 FRAG_APPEND_1_CHAR (*q);
7018 }
7019 else
7020 {
7021 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
7022 if (*q)
7023 switch (j)
7024 {
7025 case REX_PREFIX:
7026 /* REX byte is encoded in VEX prefix. */
7027 break;
7028 case SEG_PREFIX:
7029 case ADDR_PREFIX:
7030 FRAG_APPEND_1_CHAR (*q);
7031 break;
7032 default:
7033 /* There should be no other prefixes for instructions
7034 with VEX prefix. */
7035 abort ();
7036 }
7037
7038 /* For EVEX instructions i.vrex should become 0 after
7039 build_evex_prefix. For VEX instructions upper 16 registers
7040 aren't available, so VREX should be 0. */
7041 if (i.vrex)
7042 abort ();
7043 /* Now the VEX prefix. */
7044 p = frag_more (i.vex.length);
7045 for (j = 0; j < i.vex.length; j++)
7046 p[j] = i.vex.bytes[j];
7047 }
7048
7049 /* Now the opcode; be careful about word order here! */
7050 if (i.tm.opcode_length == 1)
7051 {
7052 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7053 }
7054 else
7055 {
7056 switch (i.tm.opcode_length)
7057 {
7058 case 4:
7059 p = frag_more (4);
7060 *p++ = (i.tm.base_opcode >> 24) & 0xff;
7061 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7062 break;
7063 case 3:
7064 p = frag_more (3);
7065 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7066 break;
7067 case 2:
7068 p = frag_more (2);
7069 break;
7070 default:
7071 abort ();
7072 break;
7073 }
7074
7075 /* Put out high byte first: can't use md_number_to_chars! */
7076 *p++ = (i.tm.base_opcode >> 8) & 0xff;
7077 *p = i.tm.base_opcode & 0xff;
7078 }
7079
7080 /* Now the modrm byte and sib byte (if present). */
7081 if (i.tm.opcode_modifier.modrm)
7082 {
7083 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7084 | i.rm.reg << 3
7085 | i.rm.mode << 6));
7086 /* If i.rm.regmem == ESP (4)
7087 && i.rm.mode != (Register mode)
7088 && not 16 bit
7089 ==> need second modrm byte. */
7090 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7091 && i.rm.mode != 3
7092 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7093 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7094 | i.sib.index << 3
7095 | i.sib.scale << 6));
7096 }
7097
7098 if (i.disp_operands)
7099 output_disp (insn_start_frag, insn_start_off);
7100
7101 if (i.imm_operands)
7102 output_imm (insn_start_frag, insn_start_off);
7103 }
7104
7105 #ifdef DEBUG386
7106 if (flag_debug)
7107 {
7108 pi ("" /*line*/, &i);
7109 }
7110 #endif /* DEBUG386 */
7111 }
7112
7113 /* Return the size of the displacement operand N. */
7114
7115 static int
7116 disp_size (unsigned int n)
7117 {
7118 int size = 4;
7119
7120 /* Vec_Disp8 has to be 8bit. */
7121 if (i.types[n].bitfield.vec_disp8)
7122 size = 1;
7123 else if (i.types[n].bitfield.disp64)
7124 size = 8;
7125 else if (i.types[n].bitfield.disp8)
7126 size = 1;
7127 else if (i.types[n].bitfield.disp16)
7128 size = 2;
7129 return size;
7130 }
7131
7132 /* Return the size of the immediate operand N. */
7133
7134 static int
7135 imm_size (unsigned int n)
7136 {
7137 int size = 4;
7138 if (i.types[n].bitfield.imm64)
7139 size = 8;
7140 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7141 size = 1;
7142 else if (i.types[n].bitfield.imm16)
7143 size = 2;
7144 return size;
7145 }
7146
7147 static void
7148 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7149 {
7150 char *p;
7151 unsigned int n;
7152
7153 for (n = 0; n < i.operands; n++)
7154 {
7155 if (i.types[n].bitfield.vec_disp8
7156 || operand_type_check (i.types[n], disp))
7157 {
7158 if (i.op[n].disps->X_op == O_constant)
7159 {
7160 int size = disp_size (n);
7161 offsetT val = i.op[n].disps->X_add_number;
7162
7163 if (i.types[n].bitfield.vec_disp8)
7164 val >>= i.memshift;
7165 val = offset_in_range (val, size);
7166 p = frag_more (size);
7167 md_number_to_chars (p, val, size);
7168 }
7169 else
7170 {
7171 enum bfd_reloc_code_real reloc_type;
7172 int size = disp_size (n);
7173 int sign = i.types[n].bitfield.disp32s;
7174 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7175
7176 /* We can't have 8 bit displacement here. */
7177 gas_assert (!i.types[n].bitfield.disp8);
7178
7179 /* The PC relative address is computed relative
7180 to the instruction boundary, so in case immediate
7181 fields follows, we need to adjust the value. */
7182 if (pcrel && i.imm_operands)
7183 {
7184 unsigned int n1;
7185 int sz = 0;
7186
7187 for (n1 = 0; n1 < i.operands; n1++)
7188 if (operand_type_check (i.types[n1], imm))
7189 {
7190 /* Only one immediate is allowed for PC
7191 relative address. */
7192 gas_assert (sz == 0);
7193 sz = imm_size (n1);
7194 i.op[n].disps->X_add_number -= sz;
7195 }
7196 /* We should find the immediate. */
7197 gas_assert (sz != 0);
7198 }
7199
7200 p = frag_more (size);
7201 reloc_type = reloc (size, pcrel, sign,
7202 (i.bnd_prefix != NULL
7203 || add_bnd_prefix),
7204 i.reloc[n]);
7205 if (GOT_symbol
7206 && GOT_symbol == i.op[n].disps->X_add_symbol
7207 && (((reloc_type == BFD_RELOC_32
7208 || reloc_type == BFD_RELOC_X86_64_32S
7209 || (reloc_type == BFD_RELOC_64
7210 && object_64bit))
7211 && (i.op[n].disps->X_op == O_symbol
7212 || (i.op[n].disps->X_op == O_add
7213 && ((symbol_get_value_expression
7214 (i.op[n].disps->X_op_symbol)->X_op)
7215 == O_subtract))))
7216 || reloc_type == BFD_RELOC_32_PCREL))
7217 {
7218 offsetT add;
7219
7220 if (insn_start_frag == frag_now)
7221 add = (p - frag_now->fr_literal) - insn_start_off;
7222 else
7223 {
7224 fragS *fr;
7225
7226 add = insn_start_frag->fr_fix - insn_start_off;
7227 for (fr = insn_start_frag->fr_next;
7228 fr && fr != frag_now; fr = fr->fr_next)
7229 add += fr->fr_fix;
7230 add += p - frag_now->fr_literal;
7231 }
7232
7233 if (!object_64bit)
7234 {
7235 reloc_type = BFD_RELOC_386_GOTPC;
7236 i.op[n].imms->X_add_number += add;
7237 }
7238 else if (reloc_type == BFD_RELOC_64)
7239 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7240 else
7241 /* Don't do the adjustment for x86-64, as there
7242 the pcrel addressing is relative to the _next_
7243 insn, and that is taken care of in other code. */
7244 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7245 }
7246 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7247 i.op[n].disps, pcrel, reloc_type);
7248 }
7249 }
7250 }
7251 }
7252
7253 static void
7254 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7255 {
7256 char *p;
7257 unsigned int n;
7258
7259 for (n = 0; n < i.operands; n++)
7260 {
7261 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7262 if (i.rounding && (int) n == i.rounding->operand)
7263 continue;
7264
7265 if (operand_type_check (i.types[n], imm))
7266 {
7267 if (i.op[n].imms->X_op == O_constant)
7268 {
7269 int size = imm_size (n);
7270 offsetT val;
7271
7272 val = offset_in_range (i.op[n].imms->X_add_number,
7273 size);
7274 p = frag_more (size);
7275 md_number_to_chars (p, val, size);
7276 }
7277 else
7278 {
7279 /* Not absolute_section.
7280 Need a 32-bit fixup (don't support 8bit
7281 non-absolute imms). Try to support other
7282 sizes ... */
7283 enum bfd_reloc_code_real reloc_type;
7284 int size = imm_size (n);
7285 int sign;
7286
7287 if (i.types[n].bitfield.imm32s
7288 && (i.suffix == QWORD_MNEM_SUFFIX
7289 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7290 sign = 1;
7291 else
7292 sign = 0;
7293
7294 p = frag_more (size);
7295 reloc_type = reloc (size, 0, sign, 0, i.reloc[n]);
7296
7297 /* This is tough to explain. We end up with this one if we
7298 * have operands that look like
7299 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7300 * obtain the absolute address of the GOT, and it is strongly
7301 * preferable from a performance point of view to avoid using
7302 * a runtime relocation for this. The actual sequence of
7303 * instructions often look something like:
7304 *
7305 * call .L66
7306 * .L66:
7307 * popl %ebx
7308 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7309 *
7310 * The call and pop essentially return the absolute address
7311 * of the label .L66 and store it in %ebx. The linker itself
7312 * will ultimately change the first operand of the addl so
7313 * that %ebx points to the GOT, but to keep things simple, the
7314 * .o file must have this operand set so that it generates not
7315 * the absolute address of .L66, but the absolute address of
7316 * itself. This allows the linker itself simply treat a GOTPC
7317 * relocation as asking for a pcrel offset to the GOT to be
7318 * added in, and the addend of the relocation is stored in the
7319 * operand field for the instruction itself.
7320 *
7321 * Our job here is to fix the operand so that it would add
7322 * the correct offset so that %ebx would point to itself. The
7323 * thing that is tricky is that .-.L66 will point to the
7324 * beginning of the instruction, so we need to further modify
7325 * the operand so that it will point to itself. There are
7326 * other cases where you have something like:
7327 *
7328 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7329 *
7330 * and here no correction would be required. Internally in
7331 * the assembler we treat operands of this form as not being
7332 * pcrel since the '.' is explicitly mentioned, and I wonder
7333 * whether it would simplify matters to do it this way. Who
7334 * knows. In earlier versions of the PIC patches, the
7335 * pcrel_adjust field was used to store the correction, but
7336 * since the expression is not pcrel, I felt it would be
7337 * confusing to do it this way. */
7338
7339 if ((reloc_type == BFD_RELOC_32
7340 || reloc_type == BFD_RELOC_X86_64_32S
7341 || reloc_type == BFD_RELOC_64)
7342 && GOT_symbol
7343 && GOT_symbol == i.op[n].imms->X_add_symbol
7344 && (i.op[n].imms->X_op == O_symbol
7345 || (i.op[n].imms->X_op == O_add
7346 && ((symbol_get_value_expression
7347 (i.op[n].imms->X_op_symbol)->X_op)
7348 == O_subtract))))
7349 {
7350 offsetT add;
7351
7352 if (insn_start_frag == frag_now)
7353 add = (p - frag_now->fr_literal) - insn_start_off;
7354 else
7355 {
7356 fragS *fr;
7357
7358 add = insn_start_frag->fr_fix - insn_start_off;
7359 for (fr = insn_start_frag->fr_next;
7360 fr && fr != frag_now; fr = fr->fr_next)
7361 add += fr->fr_fix;
7362 add += p - frag_now->fr_literal;
7363 }
7364
7365 if (!object_64bit)
7366 reloc_type = BFD_RELOC_386_GOTPC;
7367 else if (size == 4)
7368 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7369 else if (size == 8)
7370 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7371 i.op[n].imms->X_add_number += add;
7372 }
7373 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7374 i.op[n].imms, 0, reloc_type);
7375 }
7376 }
7377 }
7378 }
7379 \f
7380 /* x86_cons_fix_new is called via the expression parsing code when a
7381 reloc is needed. We use this hook to get the correct .got reloc. */
7382 static int cons_sign = -1;
7383
7384 void
7385 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7386 expressionS *exp, bfd_reloc_code_real_type r)
7387 {
7388 r = reloc (len, 0, cons_sign, 0, r);
7389
7390 #ifdef TE_PE
7391 if (exp->X_op == O_secrel)
7392 {
7393 exp->X_op = O_symbol;
7394 r = BFD_RELOC_32_SECREL;
7395 }
7396 #endif
7397
7398 fix_new_exp (frag, off, len, exp, 0, r);
7399 }
7400
7401 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7402 purpose of the `.dc.a' internal pseudo-op. */
7403
7404 int
7405 x86_address_bytes (void)
7406 {
7407 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7408 return 4;
7409 return stdoutput->arch_info->bits_per_address / 8;
7410 }
7411
7412 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7413 || defined (LEX_AT)
7414 # define lex_got(reloc, adjust, types, bnd_prefix) NULL
7415 #else
7416 /* Parse operands of the form
7417 <symbol>@GOTOFF+<nnn>
7418 and similar .plt or .got references.
7419
7420 If we find one, set up the correct relocation in RELOC and copy the
7421 input string, minus the `@GOTOFF' into a malloc'd buffer for
7422 parsing by the calling routine. Return this buffer, and if ADJUST
7423 is non-null set it to the length of the string we removed from the
7424 input line. Otherwise return NULL. */
7425 static char *
7426 lex_got (enum bfd_reloc_code_real *rel,
7427 int *adjust,
7428 i386_operand_type *types,
7429 int bnd_prefix)
7430 {
7431 /* Some of the relocations depend on the size of what field is to
7432 be relocated. But in our callers i386_immediate and i386_displacement
7433 we don't yet know the operand size (this will be set by insn
7434 matching). Hence we record the word32 relocation here,
7435 and adjust the reloc according to the real size in reloc(). */
7436 static const struct {
7437 const char *str;
7438 int len;
7439 const enum bfd_reloc_code_real rel[2];
7440 const i386_operand_type types64;
7441 } gotrel[] = {
7442 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7443 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7444 BFD_RELOC_SIZE32 },
7445 OPERAND_TYPE_IMM32_64 },
7446 #endif
7447 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7448 BFD_RELOC_X86_64_PLTOFF64 },
7449 OPERAND_TYPE_IMM64 },
7450 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7451 BFD_RELOC_X86_64_PLT32 },
7452 OPERAND_TYPE_IMM32_32S_DISP32 },
7453 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7454 BFD_RELOC_X86_64_GOTPLT64 },
7455 OPERAND_TYPE_IMM64_DISP64 },
7456 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7457 BFD_RELOC_X86_64_GOTOFF64 },
7458 OPERAND_TYPE_IMM64_DISP64 },
7459 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7460 BFD_RELOC_X86_64_GOTPCREL },
7461 OPERAND_TYPE_IMM32_32S_DISP32 },
7462 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7463 BFD_RELOC_X86_64_TLSGD },
7464 OPERAND_TYPE_IMM32_32S_DISP32 },
7465 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7466 _dummy_first_bfd_reloc_code_real },
7467 OPERAND_TYPE_NONE },
7468 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7469 BFD_RELOC_X86_64_TLSLD },
7470 OPERAND_TYPE_IMM32_32S_DISP32 },
7471 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7472 BFD_RELOC_X86_64_GOTTPOFF },
7473 OPERAND_TYPE_IMM32_32S_DISP32 },
7474 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7475 BFD_RELOC_X86_64_TPOFF32 },
7476 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7477 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7478 _dummy_first_bfd_reloc_code_real },
7479 OPERAND_TYPE_NONE },
7480 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7481 BFD_RELOC_X86_64_DTPOFF32 },
7482 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7483 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7484 _dummy_first_bfd_reloc_code_real },
7485 OPERAND_TYPE_NONE },
7486 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7487 _dummy_first_bfd_reloc_code_real },
7488 OPERAND_TYPE_NONE },
7489 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7490 BFD_RELOC_X86_64_GOT32 },
7491 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7492 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7493 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7494 OPERAND_TYPE_IMM32_32S_DISP32 },
7495 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7496 BFD_RELOC_X86_64_TLSDESC_CALL },
7497 OPERAND_TYPE_IMM32_32S_DISP32 },
7498 };
7499 char *cp;
7500 unsigned int j;
7501
7502 #if defined (OBJ_MAYBE_ELF)
7503 if (!IS_ELF)
7504 return NULL;
7505 #endif
7506
7507 for (cp = input_line_pointer; *cp != '@'; cp++)
7508 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7509 return NULL;
7510
7511 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7512 {
7513 int len = gotrel[j].len;
7514 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7515 {
7516 if (gotrel[j].rel[object_64bit] != 0)
7517 {
7518 int first, second;
7519 char *tmpbuf, *past_reloc;
7520
7521 *rel = gotrel[j].rel[object_64bit];
7522
7523 if (types)
7524 {
7525 if (flag_code != CODE_64BIT)
7526 {
7527 types->bitfield.imm32 = 1;
7528 types->bitfield.disp32 = 1;
7529 }
7530 else
7531 *types = gotrel[j].types64;
7532 }
7533
7534 if (j != 0 && GOT_symbol == NULL)
7535 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7536
7537 /* The length of the first part of our input line. */
7538 first = cp - input_line_pointer;
7539
7540 /* The second part goes from after the reloc token until
7541 (and including) an end_of_line char or comma. */
7542 past_reloc = cp + 1 + len;
7543 cp = past_reloc;
7544 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7545 ++cp;
7546 second = cp + 1 - past_reloc;
7547
7548 /* Allocate and copy string. The trailing NUL shouldn't
7549 be necessary, but be safe. */
7550 tmpbuf = (char *) xmalloc (first + second + 2);
7551 memcpy (tmpbuf, input_line_pointer, first);
7552 if (second != 0 && *past_reloc != ' ')
7553 /* Replace the relocation token with ' ', so that
7554 errors like foo@GOTOFF1 will be detected. */
7555 tmpbuf[first++] = ' ';
7556 else
7557 /* Increment length by 1 if the relocation token is
7558 removed. */
7559 len++;
7560 if (adjust)
7561 *adjust = len;
7562 memcpy (tmpbuf + first, past_reloc, second);
7563 tmpbuf[first + second] = '\0';
7564 if (bnd_prefix && *rel == BFD_RELOC_X86_64_PLT32)
7565 *rel = BFD_RELOC_X86_64_PLT32_BND;
7566 return tmpbuf;
7567 }
7568
7569 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7570 gotrel[j].str, 1 << (5 + object_64bit));
7571 return NULL;
7572 }
7573 }
7574
7575 /* Might be a symbol version string. Don't as_bad here. */
7576 return NULL;
7577 }
7578 #endif
7579
7580 #ifdef TE_PE
7581 #ifdef lex_got
7582 #undef lex_got
7583 #endif
7584 /* Parse operands of the form
7585 <symbol>@SECREL32+<nnn>
7586
7587 If we find one, set up the correct relocation in RELOC and copy the
7588 input string, minus the `@SECREL32' into a malloc'd buffer for
7589 parsing by the calling routine. Return this buffer, and if ADJUST
7590 is non-null set it to the length of the string we removed from the
7591 input line. Otherwise return NULL.
7592
7593 This function is copied from the ELF version above adjusted for PE targets. */
7594
7595 static char *
7596 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7597 int *adjust ATTRIBUTE_UNUSED,
7598 i386_operand_type *types,
7599 int bnd_prefix ATTRIBUTE_UNUSED)
7600 {
7601 static const struct
7602 {
7603 const char *str;
7604 int len;
7605 const enum bfd_reloc_code_real rel[2];
7606 const i386_operand_type types64;
7607 }
7608 gotrel[] =
7609 {
7610 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7611 BFD_RELOC_32_SECREL },
7612 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7613 };
7614
7615 char *cp;
7616 unsigned j;
7617
7618 for (cp = input_line_pointer; *cp != '@'; cp++)
7619 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7620 return NULL;
7621
7622 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7623 {
7624 int len = gotrel[j].len;
7625
7626 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7627 {
7628 if (gotrel[j].rel[object_64bit] != 0)
7629 {
7630 int first, second;
7631 char *tmpbuf, *past_reloc;
7632
7633 *rel = gotrel[j].rel[object_64bit];
7634 if (adjust)
7635 *adjust = len;
7636
7637 if (types)
7638 {
7639 if (flag_code != CODE_64BIT)
7640 {
7641 types->bitfield.imm32 = 1;
7642 types->bitfield.disp32 = 1;
7643 }
7644 else
7645 *types = gotrel[j].types64;
7646 }
7647
7648 /* The length of the first part of our input line. */
7649 first = cp - input_line_pointer;
7650
7651 /* The second part goes from after the reloc token until
7652 (and including) an end_of_line char or comma. */
7653 past_reloc = cp + 1 + len;
7654 cp = past_reloc;
7655 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7656 ++cp;
7657 second = cp + 1 - past_reloc;
7658
7659 /* Allocate and copy string. The trailing NUL shouldn't
7660 be necessary, but be safe. */
7661 tmpbuf = (char *) xmalloc (first + second + 2);
7662 memcpy (tmpbuf, input_line_pointer, first);
7663 if (second != 0 && *past_reloc != ' ')
7664 /* Replace the relocation token with ' ', so that
7665 errors like foo@SECLREL321 will be detected. */
7666 tmpbuf[first++] = ' ';
7667 memcpy (tmpbuf + first, past_reloc, second);
7668 tmpbuf[first + second] = '\0';
7669 return tmpbuf;
7670 }
7671
7672 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7673 gotrel[j].str, 1 << (5 + object_64bit));
7674 return NULL;
7675 }
7676 }
7677
7678 /* Might be a symbol version string. Don't as_bad here. */
7679 return NULL;
7680 }
7681
7682 #endif /* TE_PE */
7683
7684 bfd_reloc_code_real_type
7685 x86_cons (expressionS *exp, int size)
7686 {
7687 bfd_reloc_code_real_type got_reloc = NO_RELOC;
7688
7689 intel_syntax = -intel_syntax;
7690
7691 exp->X_md = 0;
7692 if (size == 4 || (object_64bit && size == 8))
7693 {
7694 /* Handle @GOTOFF and the like in an expression. */
7695 char *save;
7696 char *gotfree_input_line;
7697 int adjust = 0;
7698
7699 save = input_line_pointer;
7700 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL, 0);
7701 if (gotfree_input_line)
7702 input_line_pointer = gotfree_input_line;
7703
7704 expression (exp);
7705
7706 if (gotfree_input_line)
7707 {
7708 /* expression () has merrily parsed up to the end of line,
7709 or a comma - in the wrong buffer. Transfer how far
7710 input_line_pointer has moved to the right buffer. */
7711 input_line_pointer = (save
7712 + (input_line_pointer - gotfree_input_line)
7713 + adjust);
7714 free (gotfree_input_line);
7715 if (exp->X_op == O_constant
7716 || exp->X_op == O_absent
7717 || exp->X_op == O_illegal
7718 || exp->X_op == O_register
7719 || exp->X_op == O_big)
7720 {
7721 char c = *input_line_pointer;
7722 *input_line_pointer = 0;
7723 as_bad (_("missing or invalid expression `%s'"), save);
7724 *input_line_pointer = c;
7725 }
7726 }
7727 }
7728 else
7729 expression (exp);
7730
7731 intel_syntax = -intel_syntax;
7732
7733 if (intel_syntax)
7734 i386_intel_simplify (exp);
7735
7736 return got_reloc;
7737 }
7738
7739 static void
7740 signed_cons (int size)
7741 {
7742 if (flag_code == CODE_64BIT)
7743 cons_sign = 1;
7744 cons (size);
7745 cons_sign = -1;
7746 }
7747
7748 #ifdef TE_PE
7749 static void
7750 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7751 {
7752 expressionS exp;
7753
7754 do
7755 {
7756 expression (&exp);
7757 if (exp.X_op == O_symbol)
7758 exp.X_op = O_secrel;
7759
7760 emit_expr (&exp, 4);
7761 }
7762 while (*input_line_pointer++ == ',');
7763
7764 input_line_pointer--;
7765 demand_empty_rest_of_line ();
7766 }
7767 #endif
7768
7769 /* Handle Vector operations. */
7770
7771 static char *
7772 check_VecOperations (char *op_string, char *op_end)
7773 {
7774 const reg_entry *mask;
7775 const char *saved;
7776 char *end_op;
7777
7778 while (*op_string
7779 && (op_end == NULL || op_string < op_end))
7780 {
7781 saved = op_string;
7782 if (*op_string == '{')
7783 {
7784 op_string++;
7785
7786 /* Check broadcasts. */
7787 if (strncmp (op_string, "1to", 3) == 0)
7788 {
7789 int bcst_type;
7790
7791 if (i.broadcast)
7792 goto duplicated_vec_op;
7793
7794 op_string += 3;
7795 if (*op_string == '8')
7796 bcst_type = BROADCAST_1TO8;
7797 else if (*op_string == '4')
7798 bcst_type = BROADCAST_1TO4;
7799 else if (*op_string == '2')
7800 bcst_type = BROADCAST_1TO2;
7801 else if (*op_string == '1'
7802 && *(op_string+1) == '6')
7803 {
7804 bcst_type = BROADCAST_1TO16;
7805 op_string++;
7806 }
7807 else
7808 {
7809 as_bad (_("Unsupported broadcast: `%s'"), saved);
7810 return NULL;
7811 }
7812 op_string++;
7813
7814 broadcast_op.type = bcst_type;
7815 broadcast_op.operand = this_operand;
7816 i.broadcast = &broadcast_op;
7817 }
7818 /* Check masking operation. */
7819 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7820 {
7821 /* k0 can't be used for write mask. */
7822 if (mask->reg_num == 0)
7823 {
7824 as_bad (_("`%s' can't be used for write mask"),
7825 op_string);
7826 return NULL;
7827 }
7828
7829 if (!i.mask)
7830 {
7831 mask_op.mask = mask;
7832 mask_op.zeroing = 0;
7833 mask_op.operand = this_operand;
7834 i.mask = &mask_op;
7835 }
7836 else
7837 {
7838 if (i.mask->mask)
7839 goto duplicated_vec_op;
7840
7841 i.mask->mask = mask;
7842
7843 /* Only "{z}" is allowed here. No need to check
7844 zeroing mask explicitly. */
7845 if (i.mask->operand != this_operand)
7846 {
7847 as_bad (_("invalid write mask `%s'"), saved);
7848 return NULL;
7849 }
7850 }
7851
7852 op_string = end_op;
7853 }
7854 /* Check zeroing-flag for masking operation. */
7855 else if (*op_string == 'z')
7856 {
7857 if (!i.mask)
7858 {
7859 mask_op.mask = NULL;
7860 mask_op.zeroing = 1;
7861 mask_op.operand = this_operand;
7862 i.mask = &mask_op;
7863 }
7864 else
7865 {
7866 if (i.mask->zeroing)
7867 {
7868 duplicated_vec_op:
7869 as_bad (_("duplicated `%s'"), saved);
7870 return NULL;
7871 }
7872
7873 i.mask->zeroing = 1;
7874
7875 /* Only "{%k}" is allowed here. No need to check mask
7876 register explicitly. */
7877 if (i.mask->operand != this_operand)
7878 {
7879 as_bad (_("invalid zeroing-masking `%s'"),
7880 saved);
7881 return NULL;
7882 }
7883 }
7884
7885 op_string++;
7886 }
7887 else
7888 goto unknown_vec_op;
7889
7890 if (*op_string != '}')
7891 {
7892 as_bad (_("missing `}' in `%s'"), saved);
7893 return NULL;
7894 }
7895 op_string++;
7896 continue;
7897 }
7898 unknown_vec_op:
7899 /* We don't know this one. */
7900 as_bad (_("unknown vector operation: `%s'"), saved);
7901 return NULL;
7902 }
7903
7904 return op_string;
7905 }
7906
7907 static int
7908 i386_immediate (char *imm_start)
7909 {
7910 char *save_input_line_pointer;
7911 char *gotfree_input_line;
7912 segT exp_seg = 0;
7913 expressionS *exp;
7914 i386_operand_type types;
7915
7916 operand_type_set (&types, ~0);
7917
7918 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7919 {
7920 as_bad (_("at most %d immediate operands are allowed"),
7921 MAX_IMMEDIATE_OPERANDS);
7922 return 0;
7923 }
7924
7925 exp = &im_expressions[i.imm_operands++];
7926 i.op[this_operand].imms = exp;
7927
7928 if (is_space_char (*imm_start))
7929 ++imm_start;
7930
7931 save_input_line_pointer = input_line_pointer;
7932 input_line_pointer = imm_start;
7933
7934 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types,
7935 (i.bnd_prefix != NULL
7936 || add_bnd_prefix));
7937 if (gotfree_input_line)
7938 input_line_pointer = gotfree_input_line;
7939
7940 exp_seg = expression (exp);
7941
7942 SKIP_WHITESPACE ();
7943
7944 /* Handle vector operations. */
7945 if (*input_line_pointer == '{')
7946 {
7947 input_line_pointer = check_VecOperations (input_line_pointer,
7948 NULL);
7949 if (input_line_pointer == NULL)
7950 return 0;
7951 }
7952
7953 if (*input_line_pointer)
7954 as_bad (_("junk `%s' after expression"), input_line_pointer);
7955
7956 input_line_pointer = save_input_line_pointer;
7957 if (gotfree_input_line)
7958 {
7959 free (gotfree_input_line);
7960
7961 if (exp->X_op == O_constant || exp->X_op == O_register)
7962 exp->X_op = O_illegal;
7963 }
7964
7965 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7966 }
7967
7968 static int
7969 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7970 i386_operand_type types, const char *imm_start)
7971 {
7972 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7973 {
7974 if (imm_start)
7975 as_bad (_("missing or invalid immediate expression `%s'"),
7976 imm_start);
7977 return 0;
7978 }
7979 else if (exp->X_op == O_constant)
7980 {
7981 /* Size it properly later. */
7982 i.types[this_operand].bitfield.imm64 = 1;
7983 /* If not 64bit, sign extend val. */
7984 if (flag_code != CODE_64BIT
7985 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7986 exp->X_add_number
7987 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7988 }
7989 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7990 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7991 && exp_seg != absolute_section
7992 && exp_seg != text_section
7993 && exp_seg != data_section
7994 && exp_seg != bss_section
7995 && exp_seg != undefined_section
7996 && !bfd_is_com_section (exp_seg))
7997 {
7998 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7999 return 0;
8000 }
8001 #endif
8002 else if (!intel_syntax && exp_seg == reg_section)
8003 {
8004 if (imm_start)
8005 as_bad (_("illegal immediate register operand %s"), imm_start);
8006 return 0;
8007 }
8008 else
8009 {
8010 /* This is an address. The size of the address will be
8011 determined later, depending on destination register,
8012 suffix, or the default for the section. */
8013 i.types[this_operand].bitfield.imm8 = 1;
8014 i.types[this_operand].bitfield.imm16 = 1;
8015 i.types[this_operand].bitfield.imm32 = 1;
8016 i.types[this_operand].bitfield.imm32s = 1;
8017 i.types[this_operand].bitfield.imm64 = 1;
8018 i.types[this_operand] = operand_type_and (i.types[this_operand],
8019 types);
8020 }
8021
8022 return 1;
8023 }
8024
8025 static char *
8026 i386_scale (char *scale)
8027 {
8028 offsetT val;
8029 char *save = input_line_pointer;
8030
8031 input_line_pointer = scale;
8032 val = get_absolute_expression ();
8033
8034 switch (val)
8035 {
8036 case 1:
8037 i.log2_scale_factor = 0;
8038 break;
8039 case 2:
8040 i.log2_scale_factor = 1;
8041 break;
8042 case 4:
8043 i.log2_scale_factor = 2;
8044 break;
8045 case 8:
8046 i.log2_scale_factor = 3;
8047 break;
8048 default:
8049 {
8050 char sep = *input_line_pointer;
8051
8052 *input_line_pointer = '\0';
8053 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8054 scale);
8055 *input_line_pointer = sep;
8056 input_line_pointer = save;
8057 return NULL;
8058 }
8059 }
8060 if (i.log2_scale_factor != 0 && i.index_reg == 0)
8061 {
8062 as_warn (_("scale factor of %d without an index register"),
8063 1 << i.log2_scale_factor);
8064 i.log2_scale_factor = 0;
8065 }
8066 scale = input_line_pointer;
8067 input_line_pointer = save;
8068 return scale;
8069 }
8070
8071 static int
8072 i386_displacement (char *disp_start, char *disp_end)
8073 {
8074 expressionS *exp;
8075 segT exp_seg = 0;
8076 char *save_input_line_pointer;
8077 char *gotfree_input_line;
8078 int override;
8079 i386_operand_type bigdisp, types = anydisp;
8080 int ret;
8081
8082 if (i.disp_operands == MAX_MEMORY_OPERANDS)
8083 {
8084 as_bad (_("at most %d displacement operands are allowed"),
8085 MAX_MEMORY_OPERANDS);
8086 return 0;
8087 }
8088
8089 operand_type_set (&bigdisp, 0);
8090 if ((i.types[this_operand].bitfield.jumpabsolute)
8091 || (!current_templates->start->opcode_modifier.jump
8092 && !current_templates->start->opcode_modifier.jumpdword))
8093 {
8094 bigdisp.bitfield.disp32 = 1;
8095 override = (i.prefix[ADDR_PREFIX] != 0);
8096 if (flag_code == CODE_64BIT)
8097 {
8098 if (!override)
8099 {
8100 bigdisp.bitfield.disp32s = 1;
8101 bigdisp.bitfield.disp64 = 1;
8102 }
8103 }
8104 else if ((flag_code == CODE_16BIT) ^ override)
8105 {
8106 bigdisp.bitfield.disp32 = 0;
8107 bigdisp.bitfield.disp16 = 1;
8108 }
8109 }
8110 else
8111 {
8112 /* For PC-relative branches, the width of the displacement
8113 is dependent upon data size, not address size. */
8114 override = (i.prefix[DATA_PREFIX] != 0);
8115 if (flag_code == CODE_64BIT)
8116 {
8117 if (override || i.suffix == WORD_MNEM_SUFFIX)
8118 bigdisp.bitfield.disp16 = 1;
8119 else
8120 {
8121 bigdisp.bitfield.disp32 = 1;
8122 bigdisp.bitfield.disp32s = 1;
8123 }
8124 }
8125 else
8126 {
8127 if (!override)
8128 override = (i.suffix == (flag_code != CODE_16BIT
8129 ? WORD_MNEM_SUFFIX
8130 : LONG_MNEM_SUFFIX));
8131 bigdisp.bitfield.disp32 = 1;
8132 if ((flag_code == CODE_16BIT) ^ override)
8133 {
8134 bigdisp.bitfield.disp32 = 0;
8135 bigdisp.bitfield.disp16 = 1;
8136 }
8137 }
8138 }
8139 i.types[this_operand] = operand_type_or (i.types[this_operand],
8140 bigdisp);
8141
8142 exp = &disp_expressions[i.disp_operands];
8143 i.op[this_operand].disps = exp;
8144 i.disp_operands++;
8145 save_input_line_pointer = input_line_pointer;
8146 input_line_pointer = disp_start;
8147 END_STRING_AND_SAVE (disp_end);
8148
8149 #ifndef GCC_ASM_O_HACK
8150 #define GCC_ASM_O_HACK 0
8151 #endif
8152 #if GCC_ASM_O_HACK
8153 END_STRING_AND_SAVE (disp_end + 1);
8154 if (i.types[this_operand].bitfield.baseIndex
8155 && displacement_string_end[-1] == '+')
8156 {
8157 /* This hack is to avoid a warning when using the "o"
8158 constraint within gcc asm statements.
8159 For instance:
8160
8161 #define _set_tssldt_desc(n,addr,limit,type) \
8162 __asm__ __volatile__ ( \
8163 "movw %w2,%0\n\t" \
8164 "movw %w1,2+%0\n\t" \
8165 "rorl $16,%1\n\t" \
8166 "movb %b1,4+%0\n\t" \
8167 "movb %4,5+%0\n\t" \
8168 "movb $0,6+%0\n\t" \
8169 "movb %h1,7+%0\n\t" \
8170 "rorl $16,%1" \
8171 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8172
8173 This works great except that the output assembler ends
8174 up looking a bit weird if it turns out that there is
8175 no offset. You end up producing code that looks like:
8176
8177 #APP
8178 movw $235,(%eax)
8179 movw %dx,2+(%eax)
8180 rorl $16,%edx
8181 movb %dl,4+(%eax)
8182 movb $137,5+(%eax)
8183 movb $0,6+(%eax)
8184 movb %dh,7+(%eax)
8185 rorl $16,%edx
8186 #NO_APP
8187
8188 So here we provide the missing zero. */
8189
8190 *displacement_string_end = '0';
8191 }
8192 #endif
8193 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types,
8194 (i.bnd_prefix != NULL
8195 || add_bnd_prefix));
8196 if (gotfree_input_line)
8197 input_line_pointer = gotfree_input_line;
8198
8199 exp_seg = expression (exp);
8200
8201 SKIP_WHITESPACE ();
8202 if (*input_line_pointer)
8203 as_bad (_("junk `%s' after expression"), input_line_pointer);
8204 #if GCC_ASM_O_HACK
8205 RESTORE_END_STRING (disp_end + 1);
8206 #endif
8207 input_line_pointer = save_input_line_pointer;
8208 if (gotfree_input_line)
8209 {
8210 free (gotfree_input_line);
8211
8212 if (exp->X_op == O_constant || exp->X_op == O_register)
8213 exp->X_op = O_illegal;
8214 }
8215
8216 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8217
8218 RESTORE_END_STRING (disp_end);
8219
8220 return ret;
8221 }
8222
8223 static int
8224 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8225 i386_operand_type types, const char *disp_start)
8226 {
8227 i386_operand_type bigdisp;
8228 int ret = 1;
8229
8230 /* We do this to make sure that the section symbol is in
8231 the symbol table. We will ultimately change the relocation
8232 to be relative to the beginning of the section. */
8233 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8234 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8235 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8236 {
8237 if (exp->X_op != O_symbol)
8238 goto inv_disp;
8239
8240 if (S_IS_LOCAL (exp->X_add_symbol)
8241 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8242 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8243 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8244 exp->X_op = O_subtract;
8245 exp->X_op_symbol = GOT_symbol;
8246 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8247 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8248 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8249 i.reloc[this_operand] = BFD_RELOC_64;
8250 else
8251 i.reloc[this_operand] = BFD_RELOC_32;
8252 }
8253
8254 else if (exp->X_op == O_absent
8255 || exp->X_op == O_illegal
8256 || exp->X_op == O_big)
8257 {
8258 inv_disp:
8259 as_bad (_("missing or invalid displacement expression `%s'"),
8260 disp_start);
8261 ret = 0;
8262 }
8263
8264 else if (flag_code == CODE_64BIT
8265 && !i.prefix[ADDR_PREFIX]
8266 && exp->X_op == O_constant)
8267 {
8268 /* Since displacement is signed extended to 64bit, don't allow
8269 disp32 and turn off disp32s if they are out of range. */
8270 i.types[this_operand].bitfield.disp32 = 0;
8271 if (!fits_in_signed_long (exp->X_add_number))
8272 {
8273 i.types[this_operand].bitfield.disp32s = 0;
8274 if (i.types[this_operand].bitfield.baseindex)
8275 {
8276 as_bad (_("0x%lx out range of signed 32bit displacement"),
8277 (long) exp->X_add_number);
8278 ret = 0;
8279 }
8280 }
8281 }
8282
8283 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8284 else if (exp->X_op != O_constant
8285 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8286 && exp_seg != absolute_section
8287 && exp_seg != text_section
8288 && exp_seg != data_section
8289 && exp_seg != bss_section
8290 && exp_seg != undefined_section
8291 && !bfd_is_com_section (exp_seg))
8292 {
8293 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8294 ret = 0;
8295 }
8296 #endif
8297
8298 /* Check if this is a displacement only operand. */
8299 bigdisp = i.types[this_operand];
8300 bigdisp.bitfield.disp8 = 0;
8301 bigdisp.bitfield.disp16 = 0;
8302 bigdisp.bitfield.disp32 = 0;
8303 bigdisp.bitfield.disp32s = 0;
8304 bigdisp.bitfield.disp64 = 0;
8305 if (operand_type_all_zero (&bigdisp))
8306 i.types[this_operand] = operand_type_and (i.types[this_operand],
8307 types);
8308
8309 return ret;
8310 }
8311
8312 /* Make sure the memory operand we've been dealt is valid.
8313 Return 1 on success, 0 on a failure. */
8314
8315 static int
8316 i386_index_check (const char *operand_string)
8317 {
8318 const char *kind = "base/index";
8319 enum flag_code addr_mode;
8320
8321 if (i.prefix[ADDR_PREFIX])
8322 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8323 else
8324 {
8325 addr_mode = flag_code;
8326
8327 #if INFER_ADDR_PREFIX
8328 if (i.mem_operands == 0)
8329 {
8330 /* Infer address prefix from the first memory operand. */
8331 const reg_entry *addr_reg = i.base_reg;
8332
8333 if (addr_reg == NULL)
8334 addr_reg = i.index_reg;
8335
8336 if (addr_reg)
8337 {
8338 if (addr_reg->reg_num == RegEip
8339 || addr_reg->reg_num == RegEiz
8340 || addr_reg->reg_type.bitfield.reg32)
8341 addr_mode = CODE_32BIT;
8342 else if (flag_code != CODE_64BIT
8343 && addr_reg->reg_type.bitfield.reg16)
8344 addr_mode = CODE_16BIT;
8345
8346 if (addr_mode != flag_code)
8347 {
8348 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8349 i.prefixes += 1;
8350 /* Change the size of any displacement too. At most one
8351 of Disp16 or Disp32 is set.
8352 FIXME. There doesn't seem to be any real need for
8353 separate Disp16 and Disp32 flags. The same goes for
8354 Imm16 and Imm32. Removing them would probably clean
8355 up the code quite a lot. */
8356 if (flag_code != CODE_64BIT
8357 && (i.types[this_operand].bitfield.disp16
8358 || i.types[this_operand].bitfield.disp32))
8359 i.types[this_operand]
8360 = operand_type_xor (i.types[this_operand], disp16_32);
8361 }
8362 }
8363 }
8364 #endif
8365 }
8366
8367 if (current_templates->start->opcode_modifier.isstring
8368 && !current_templates->start->opcode_modifier.immext
8369 && (current_templates->end[-1].opcode_modifier.isstring
8370 || i.mem_operands))
8371 {
8372 /* Memory operands of string insns are special in that they only allow
8373 a single register (rDI, rSI, or rBX) as their memory address. */
8374 const reg_entry *expected_reg;
8375 static const char *di_si[][2] =
8376 {
8377 { "esi", "edi" },
8378 { "si", "di" },
8379 { "rsi", "rdi" }
8380 };
8381 static const char *bx[] = { "ebx", "bx", "rbx" };
8382
8383 kind = "string address";
8384
8385 if (current_templates->start->opcode_modifier.w)
8386 {
8387 i386_operand_type type = current_templates->end[-1].operand_types[0];
8388
8389 if (!type.bitfield.baseindex
8390 || ((!i.mem_operands != !intel_syntax)
8391 && current_templates->end[-1].operand_types[1]
8392 .bitfield.baseindex))
8393 type = current_templates->end[-1].operand_types[1];
8394 expected_reg = hash_find (reg_hash,
8395 di_si[addr_mode][type.bitfield.esseg]);
8396
8397 }
8398 else
8399 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8400
8401 if (i.base_reg != expected_reg
8402 || i.index_reg
8403 || operand_type_check (i.types[this_operand], disp))
8404 {
8405 /* The second memory operand must have the same size as
8406 the first one. */
8407 if (i.mem_operands
8408 && i.base_reg
8409 && !((addr_mode == CODE_64BIT
8410 && i.base_reg->reg_type.bitfield.reg64)
8411 || (addr_mode == CODE_32BIT
8412 ? i.base_reg->reg_type.bitfield.reg32
8413 : i.base_reg->reg_type.bitfield.reg16)))
8414 goto bad_address;
8415
8416 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8417 operand_string,
8418 intel_syntax ? '[' : '(',
8419 register_prefix,
8420 expected_reg->reg_name,
8421 intel_syntax ? ']' : ')');
8422 return 1;
8423 }
8424 else
8425 return 1;
8426
8427 bad_address:
8428 as_bad (_("`%s' is not a valid %s expression"),
8429 operand_string, kind);
8430 return 0;
8431 }
8432 else
8433 {
8434 if (addr_mode != CODE_16BIT)
8435 {
8436 /* 32-bit/64-bit checks. */
8437 if ((i.base_reg
8438 && (addr_mode == CODE_64BIT
8439 ? !i.base_reg->reg_type.bitfield.reg64
8440 : !i.base_reg->reg_type.bitfield.reg32)
8441 && (i.index_reg
8442 || (i.base_reg->reg_num
8443 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8444 || (i.index_reg
8445 && !i.index_reg->reg_type.bitfield.regxmm
8446 && !i.index_reg->reg_type.bitfield.regymm
8447 && !i.index_reg->reg_type.bitfield.regzmm
8448 && ((addr_mode == CODE_64BIT
8449 ? !(i.index_reg->reg_type.bitfield.reg64
8450 || i.index_reg->reg_num == RegRiz)
8451 : !(i.index_reg->reg_type.bitfield.reg32
8452 || i.index_reg->reg_num == RegEiz))
8453 || !i.index_reg->reg_type.bitfield.baseindex)))
8454 goto bad_address;
8455 }
8456 else
8457 {
8458 /* 16-bit checks. */
8459 if ((i.base_reg
8460 && (!i.base_reg->reg_type.bitfield.reg16
8461 || !i.base_reg->reg_type.bitfield.baseindex))
8462 || (i.index_reg
8463 && (!i.index_reg->reg_type.bitfield.reg16
8464 || !i.index_reg->reg_type.bitfield.baseindex
8465 || !(i.base_reg
8466 && i.base_reg->reg_num < 6
8467 && i.index_reg->reg_num >= 6
8468 && i.log2_scale_factor == 0))))
8469 goto bad_address;
8470 }
8471 }
8472 return 1;
8473 }
8474
8475 /* Handle vector immediates. */
8476
8477 static int
8478 RC_SAE_immediate (const char *imm_start)
8479 {
8480 unsigned int match_found, j;
8481 const char *pstr = imm_start;
8482 expressionS *exp;
8483
8484 if (*pstr != '{')
8485 return 0;
8486
8487 pstr++;
8488 match_found = 0;
8489 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8490 {
8491 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8492 {
8493 if (!i.rounding)
8494 {
8495 rc_op.type = RC_NamesTable[j].type;
8496 rc_op.operand = this_operand;
8497 i.rounding = &rc_op;
8498 }
8499 else
8500 {
8501 as_bad (_("duplicated `%s'"), imm_start);
8502 return 0;
8503 }
8504 pstr += RC_NamesTable[j].len;
8505 match_found = 1;
8506 break;
8507 }
8508 }
8509 if (!match_found)
8510 return 0;
8511
8512 if (*pstr++ != '}')
8513 {
8514 as_bad (_("Missing '}': '%s'"), imm_start);
8515 return 0;
8516 }
8517 /* RC/SAE immediate string should contain nothing more. */;
8518 if (*pstr != 0)
8519 {
8520 as_bad (_("Junk after '}': '%s'"), imm_start);
8521 return 0;
8522 }
8523
8524 exp = &im_expressions[i.imm_operands++];
8525 i.op[this_operand].imms = exp;
8526
8527 exp->X_op = O_constant;
8528 exp->X_add_number = 0;
8529 exp->X_add_symbol = (symbolS *) 0;
8530 exp->X_op_symbol = (symbolS *) 0;
8531
8532 i.types[this_operand].bitfield.imm8 = 1;
8533 return 1;
8534 }
8535
8536 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8537 on error. */
8538
8539 static int
8540 i386_att_operand (char *operand_string)
8541 {
8542 const reg_entry *r;
8543 char *end_op;
8544 char *op_string = operand_string;
8545
8546 if (is_space_char (*op_string))
8547 ++op_string;
8548
8549 /* We check for an absolute prefix (differentiating,
8550 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8551 if (*op_string == ABSOLUTE_PREFIX)
8552 {
8553 ++op_string;
8554 if (is_space_char (*op_string))
8555 ++op_string;
8556 i.types[this_operand].bitfield.jumpabsolute = 1;
8557 }
8558
8559 /* Check if operand is a register. */
8560 if ((r = parse_register (op_string, &end_op)) != NULL)
8561 {
8562 i386_operand_type temp;
8563
8564 /* Check for a segment override by searching for ':' after a
8565 segment register. */
8566 op_string = end_op;
8567 if (is_space_char (*op_string))
8568 ++op_string;
8569 if (*op_string == ':'
8570 && (r->reg_type.bitfield.sreg2
8571 || r->reg_type.bitfield.sreg3))
8572 {
8573 switch (r->reg_num)
8574 {
8575 case 0:
8576 i.seg[i.mem_operands] = &es;
8577 break;
8578 case 1:
8579 i.seg[i.mem_operands] = &cs;
8580 break;
8581 case 2:
8582 i.seg[i.mem_operands] = &ss;
8583 break;
8584 case 3:
8585 i.seg[i.mem_operands] = &ds;
8586 break;
8587 case 4:
8588 i.seg[i.mem_operands] = &fs;
8589 break;
8590 case 5:
8591 i.seg[i.mem_operands] = &gs;
8592 break;
8593 }
8594
8595 /* Skip the ':' and whitespace. */
8596 ++op_string;
8597 if (is_space_char (*op_string))
8598 ++op_string;
8599
8600 if (!is_digit_char (*op_string)
8601 && !is_identifier_char (*op_string)
8602 && *op_string != '('
8603 && *op_string != ABSOLUTE_PREFIX)
8604 {
8605 as_bad (_("bad memory operand `%s'"), op_string);
8606 return 0;
8607 }
8608 /* Handle case of %es:*foo. */
8609 if (*op_string == ABSOLUTE_PREFIX)
8610 {
8611 ++op_string;
8612 if (is_space_char (*op_string))
8613 ++op_string;
8614 i.types[this_operand].bitfield.jumpabsolute = 1;
8615 }
8616 goto do_memory_reference;
8617 }
8618
8619 /* Handle vector operations. */
8620 if (*op_string == '{')
8621 {
8622 op_string = check_VecOperations (op_string, NULL);
8623 if (op_string == NULL)
8624 return 0;
8625 }
8626
8627 if (*op_string)
8628 {
8629 as_bad (_("junk `%s' after register"), op_string);
8630 return 0;
8631 }
8632 temp = r->reg_type;
8633 temp.bitfield.baseindex = 0;
8634 i.types[this_operand] = operand_type_or (i.types[this_operand],
8635 temp);
8636 i.types[this_operand].bitfield.unspecified = 0;
8637 i.op[this_operand].regs = r;
8638 i.reg_operands++;
8639 }
8640 else if (*op_string == REGISTER_PREFIX)
8641 {
8642 as_bad (_("bad register name `%s'"), op_string);
8643 return 0;
8644 }
8645 else if (*op_string == IMMEDIATE_PREFIX)
8646 {
8647 ++op_string;
8648 if (i.types[this_operand].bitfield.jumpabsolute)
8649 {
8650 as_bad (_("immediate operand illegal with absolute jump"));
8651 return 0;
8652 }
8653 if (!i386_immediate (op_string))
8654 return 0;
8655 }
8656 else if (RC_SAE_immediate (operand_string))
8657 {
8658 /* If it is a RC or SAE immediate, do nothing. */
8659 ;
8660 }
8661 else if (is_digit_char (*op_string)
8662 || is_identifier_char (*op_string)
8663 || *op_string == '(')
8664 {
8665 /* This is a memory reference of some sort. */
8666 char *base_string;
8667
8668 /* Start and end of displacement string expression (if found). */
8669 char *displacement_string_start;
8670 char *displacement_string_end;
8671 char *vop_start;
8672
8673 do_memory_reference:
8674 if ((i.mem_operands == 1
8675 && !current_templates->start->opcode_modifier.isstring)
8676 || i.mem_operands == 2)
8677 {
8678 as_bad (_("too many memory references for `%s'"),
8679 current_templates->start->name);
8680 return 0;
8681 }
8682
8683 /* Check for base index form. We detect the base index form by
8684 looking for an ')' at the end of the operand, searching
8685 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8686 after the '('. */
8687 base_string = op_string + strlen (op_string);
8688
8689 /* Handle vector operations. */
8690 vop_start = strchr (op_string, '{');
8691 if (vop_start && vop_start < base_string)
8692 {
8693 if (check_VecOperations (vop_start, base_string) == NULL)
8694 return 0;
8695 base_string = vop_start;
8696 }
8697
8698 --base_string;
8699 if (is_space_char (*base_string))
8700 --base_string;
8701
8702 /* If we only have a displacement, set-up for it to be parsed later. */
8703 displacement_string_start = op_string;
8704 displacement_string_end = base_string + 1;
8705
8706 if (*base_string == ')')
8707 {
8708 char *temp_string;
8709 unsigned int parens_balanced = 1;
8710 /* We've already checked that the number of left & right ()'s are
8711 equal, so this loop will not be infinite. */
8712 do
8713 {
8714 base_string--;
8715 if (*base_string == ')')
8716 parens_balanced++;
8717 if (*base_string == '(')
8718 parens_balanced--;
8719 }
8720 while (parens_balanced);
8721
8722 temp_string = base_string;
8723
8724 /* Skip past '(' and whitespace. */
8725 ++base_string;
8726 if (is_space_char (*base_string))
8727 ++base_string;
8728
8729 if (*base_string == ','
8730 || ((i.base_reg = parse_register (base_string, &end_op))
8731 != NULL))
8732 {
8733 displacement_string_end = temp_string;
8734
8735 i.types[this_operand].bitfield.baseindex = 1;
8736
8737 if (i.base_reg)
8738 {
8739 base_string = end_op;
8740 if (is_space_char (*base_string))
8741 ++base_string;
8742 }
8743
8744 /* There may be an index reg or scale factor here. */
8745 if (*base_string == ',')
8746 {
8747 ++base_string;
8748 if (is_space_char (*base_string))
8749 ++base_string;
8750
8751 if ((i.index_reg = parse_register (base_string, &end_op))
8752 != NULL)
8753 {
8754 base_string = end_op;
8755 if (is_space_char (*base_string))
8756 ++base_string;
8757 if (*base_string == ',')
8758 {
8759 ++base_string;
8760 if (is_space_char (*base_string))
8761 ++base_string;
8762 }
8763 else if (*base_string != ')')
8764 {
8765 as_bad (_("expecting `,' or `)' "
8766 "after index register in `%s'"),
8767 operand_string);
8768 return 0;
8769 }
8770 }
8771 else if (*base_string == REGISTER_PREFIX)
8772 {
8773 end_op = strchr (base_string, ',');
8774 if (end_op)
8775 *end_op = '\0';
8776 as_bad (_("bad register name `%s'"), base_string);
8777 return 0;
8778 }
8779
8780 /* Check for scale factor. */
8781 if (*base_string != ')')
8782 {
8783 char *end_scale = i386_scale (base_string);
8784
8785 if (!end_scale)
8786 return 0;
8787
8788 base_string = end_scale;
8789 if (is_space_char (*base_string))
8790 ++base_string;
8791 if (*base_string != ')')
8792 {
8793 as_bad (_("expecting `)' "
8794 "after scale factor in `%s'"),
8795 operand_string);
8796 return 0;
8797 }
8798 }
8799 else if (!i.index_reg)
8800 {
8801 as_bad (_("expecting index register or scale factor "
8802 "after `,'; got '%c'"),
8803 *base_string);
8804 return 0;
8805 }
8806 }
8807 else if (*base_string != ')')
8808 {
8809 as_bad (_("expecting `,' or `)' "
8810 "after base register in `%s'"),
8811 operand_string);
8812 return 0;
8813 }
8814 }
8815 else if (*base_string == REGISTER_PREFIX)
8816 {
8817 end_op = strchr (base_string, ',');
8818 if (end_op)
8819 *end_op = '\0';
8820 as_bad (_("bad register name `%s'"), base_string);
8821 return 0;
8822 }
8823 }
8824
8825 /* If there's an expression beginning the operand, parse it,
8826 assuming displacement_string_start and
8827 displacement_string_end are meaningful. */
8828 if (displacement_string_start != displacement_string_end)
8829 {
8830 if (!i386_displacement (displacement_string_start,
8831 displacement_string_end))
8832 return 0;
8833 }
8834
8835 /* Special case for (%dx) while doing input/output op. */
8836 if (i.base_reg
8837 && operand_type_equal (&i.base_reg->reg_type,
8838 &reg16_inoutportreg)
8839 && i.index_reg == 0
8840 && i.log2_scale_factor == 0
8841 && i.seg[i.mem_operands] == 0
8842 && !operand_type_check (i.types[this_operand], disp))
8843 {
8844 i.types[this_operand] = inoutportreg;
8845 return 1;
8846 }
8847
8848 if (i386_index_check (operand_string) == 0)
8849 return 0;
8850 i.types[this_operand].bitfield.mem = 1;
8851 i.mem_operands++;
8852 }
8853 else
8854 {
8855 /* It's not a memory operand; argh! */
8856 as_bad (_("invalid char %s beginning operand %d `%s'"),
8857 output_invalid (*op_string),
8858 this_operand + 1,
8859 op_string);
8860 return 0;
8861 }
8862 return 1; /* Normal return. */
8863 }
8864 \f
8865 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8866 that an rs_machine_dependent frag may reach. */
8867
8868 unsigned int
8869 i386_frag_max_var (fragS *frag)
8870 {
8871 /* The only relaxable frags are for jumps.
8872 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8873 gas_assert (frag->fr_type == rs_machine_dependent);
8874 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8875 }
8876
8877 /* md_estimate_size_before_relax()
8878
8879 Called just before relax() for rs_machine_dependent frags. The x86
8880 assembler uses these frags to handle variable size jump
8881 instructions.
8882
8883 Any symbol that is now undefined will not become defined.
8884 Return the correct fr_subtype in the frag.
8885 Return the initial "guess for variable size of frag" to caller.
8886 The guess is actually the growth beyond the fixed part. Whatever
8887 we do to grow the fixed or variable part contributes to our
8888 returned value. */
8889
8890 int
8891 md_estimate_size_before_relax (fragS *fragP, segT segment)
8892 {
8893 /* We've already got fragP->fr_subtype right; all we have to do is
8894 check for un-relaxable symbols. On an ELF system, we can't relax
8895 an externally visible symbol, because it may be overridden by a
8896 shared library. */
8897 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8898 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8899 || (IS_ELF
8900 && (S_IS_EXTERNAL (fragP->fr_symbol)
8901 || S_IS_WEAK (fragP->fr_symbol)
8902 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
8903 & BSF_GNU_INDIRECT_FUNCTION))))
8904 #endif
8905 #if defined (OBJ_COFF) && defined (TE_PE)
8906 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8907 && S_IS_WEAK (fragP->fr_symbol))
8908 #endif
8909 )
8910 {
8911 /* Symbol is undefined in this segment, or we need to keep a
8912 reloc so that weak symbols can be overridden. */
8913 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
8914 enum bfd_reloc_code_real reloc_type;
8915 unsigned char *opcode;
8916 int old_fr_fix;
8917
8918 if (fragP->fr_var != NO_RELOC)
8919 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
8920 else if (size == 2)
8921 reloc_type = BFD_RELOC_16_PCREL;
8922 else
8923 reloc_type = BFD_RELOC_32_PCREL;
8924
8925 old_fr_fix = fragP->fr_fix;
8926 opcode = (unsigned char *) fragP->fr_opcode;
8927
8928 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
8929 {
8930 case UNCOND_JUMP:
8931 /* Make jmp (0xeb) a (d)word displacement jump. */
8932 opcode[0] = 0xe9;
8933 fragP->fr_fix += size;
8934 fix_new (fragP, old_fr_fix, size,
8935 fragP->fr_symbol,
8936 fragP->fr_offset, 1,
8937 reloc_type);
8938 break;
8939
8940 case COND_JUMP86:
8941 if (size == 2
8942 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
8943 {
8944 /* Negate the condition, and branch past an
8945 unconditional jump. */
8946 opcode[0] ^= 1;
8947 opcode[1] = 3;
8948 /* Insert an unconditional jump. */
8949 opcode[2] = 0xe9;
8950 /* We added two extra opcode bytes, and have a two byte
8951 offset. */
8952 fragP->fr_fix += 2 + 2;
8953 fix_new (fragP, old_fr_fix + 2, 2,
8954 fragP->fr_symbol,
8955 fragP->fr_offset, 1,
8956 reloc_type);
8957 break;
8958 }
8959 /* Fall through. */
8960
8961 case COND_JUMP:
8962 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8963 {
8964 fixS *fixP;
8965
8966 fragP->fr_fix += 1;
8967 fixP = fix_new (fragP, old_fr_fix, 1,
8968 fragP->fr_symbol,
8969 fragP->fr_offset, 1,
8970 BFD_RELOC_8_PCREL);
8971 fixP->fx_signed = 1;
8972 break;
8973 }
8974
8975 /* This changes the byte-displacement jump 0x7N
8976 to the (d)word-displacement jump 0x0f,0x8N. */
8977 opcode[1] = opcode[0] + 0x10;
8978 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8979 /* We've added an opcode byte. */
8980 fragP->fr_fix += 1 + size;
8981 fix_new (fragP, old_fr_fix + 1, size,
8982 fragP->fr_symbol,
8983 fragP->fr_offset, 1,
8984 reloc_type);
8985 break;
8986
8987 default:
8988 BAD_CASE (fragP->fr_subtype);
8989 break;
8990 }
8991 frag_wane (fragP);
8992 return fragP->fr_fix - old_fr_fix;
8993 }
8994
8995 /* Guess size depending on current relax state. Initially the relax
8996 state will correspond to a short jump and we return 1, because
8997 the variable part of the frag (the branch offset) is one byte
8998 long. However, we can relax a section more than once and in that
8999 case we must either set fr_subtype back to the unrelaxed state,
9000 or return the value for the appropriate branch. */
9001 return md_relax_table[fragP->fr_subtype].rlx_length;
9002 }
9003
9004 /* Called after relax() is finished.
9005
9006 In: Address of frag.
9007 fr_type == rs_machine_dependent.
9008 fr_subtype is what the address relaxed to.
9009
9010 Out: Any fixSs and constants are set up.
9011 Caller will turn frag into a ".space 0". */
9012
9013 void
9014 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
9015 fragS *fragP)
9016 {
9017 unsigned char *opcode;
9018 unsigned char *where_to_put_displacement = NULL;
9019 offsetT target_address;
9020 offsetT opcode_address;
9021 unsigned int extension = 0;
9022 offsetT displacement_from_opcode_start;
9023
9024 opcode = (unsigned char *) fragP->fr_opcode;
9025
9026 /* Address we want to reach in file space. */
9027 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
9028
9029 /* Address opcode resides at in file space. */
9030 opcode_address = fragP->fr_address + fragP->fr_fix;
9031
9032 /* Displacement from opcode start to fill into instruction. */
9033 displacement_from_opcode_start = target_address - opcode_address;
9034
9035 if ((fragP->fr_subtype & BIG) == 0)
9036 {
9037 /* Don't have to change opcode. */
9038 extension = 1; /* 1 opcode + 1 displacement */
9039 where_to_put_displacement = &opcode[1];
9040 }
9041 else
9042 {
9043 if (no_cond_jump_promotion
9044 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
9045 as_warn_where (fragP->fr_file, fragP->fr_line,
9046 _("long jump required"));
9047
9048 switch (fragP->fr_subtype)
9049 {
9050 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
9051 extension = 4; /* 1 opcode + 4 displacement */
9052 opcode[0] = 0xe9;
9053 where_to_put_displacement = &opcode[1];
9054 break;
9055
9056 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9057 extension = 2; /* 1 opcode + 2 displacement */
9058 opcode[0] = 0xe9;
9059 where_to_put_displacement = &opcode[1];
9060 break;
9061
9062 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9063 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9064 extension = 5; /* 2 opcode + 4 displacement */
9065 opcode[1] = opcode[0] + 0x10;
9066 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9067 where_to_put_displacement = &opcode[2];
9068 break;
9069
9070 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9071 extension = 3; /* 2 opcode + 2 displacement */
9072 opcode[1] = opcode[0] + 0x10;
9073 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9074 where_to_put_displacement = &opcode[2];
9075 break;
9076
9077 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9078 extension = 4;
9079 opcode[0] ^= 1;
9080 opcode[1] = 3;
9081 opcode[2] = 0xe9;
9082 where_to_put_displacement = &opcode[3];
9083 break;
9084
9085 default:
9086 BAD_CASE (fragP->fr_subtype);
9087 break;
9088 }
9089 }
9090
9091 /* If size if less then four we are sure that the operand fits,
9092 but if it's 4, then it could be that the displacement is larger
9093 then -/+ 2GB. */
9094 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9095 && object_64bit
9096 && ((addressT) (displacement_from_opcode_start - extension
9097 + ((addressT) 1 << 31))
9098 > (((addressT) 2 << 31) - 1)))
9099 {
9100 as_bad_where (fragP->fr_file, fragP->fr_line,
9101 _("jump target out of range"));
9102 /* Make us emit 0. */
9103 displacement_from_opcode_start = extension;
9104 }
9105 /* Now put displacement after opcode. */
9106 md_number_to_chars ((char *) where_to_put_displacement,
9107 (valueT) (displacement_from_opcode_start - extension),
9108 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9109 fragP->fr_fix += extension;
9110 }
9111 \f
9112 /* Apply a fixup (fixP) to segment data, once it has been determined
9113 by our caller that we have all the info we need to fix it up.
9114
9115 Parameter valP is the pointer to the value of the bits.
9116
9117 On the 386, immediates, displacements, and data pointers are all in
9118 the same (little-endian) format, so we don't need to care about which
9119 we are handling. */
9120
9121 void
9122 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9123 {
9124 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9125 valueT value = *valP;
9126
9127 #if !defined (TE_Mach)
9128 if (fixP->fx_pcrel)
9129 {
9130 switch (fixP->fx_r_type)
9131 {
9132 default:
9133 break;
9134
9135 case BFD_RELOC_64:
9136 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9137 break;
9138 case BFD_RELOC_32:
9139 case BFD_RELOC_X86_64_32S:
9140 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9141 break;
9142 case BFD_RELOC_16:
9143 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9144 break;
9145 case BFD_RELOC_8:
9146 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9147 break;
9148 }
9149 }
9150
9151 if (fixP->fx_addsy != NULL
9152 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9153 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9154 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9155 || fixP->fx_r_type == BFD_RELOC_8_PCREL
9156 || fixP->fx_r_type == BFD_RELOC_X86_64_PC32_BND)
9157 && !use_rela_relocations)
9158 {
9159 /* This is a hack. There should be a better way to handle this.
9160 This covers for the fact that bfd_install_relocation will
9161 subtract the current location (for partial_inplace, PC relative
9162 relocations); see more below. */
9163 #ifndef OBJ_AOUT
9164 if (IS_ELF
9165 #ifdef TE_PE
9166 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9167 #endif
9168 )
9169 value += fixP->fx_where + fixP->fx_frag->fr_address;
9170 #endif
9171 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9172 if (IS_ELF)
9173 {
9174 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9175
9176 if ((sym_seg == seg
9177 || (symbol_section_p (fixP->fx_addsy)
9178 && sym_seg != absolute_section))
9179 && !generic_force_reloc (fixP))
9180 {
9181 /* Yes, we add the values in twice. This is because
9182 bfd_install_relocation subtracts them out again. I think
9183 bfd_install_relocation is broken, but I don't dare change
9184 it. FIXME. */
9185 value += fixP->fx_where + fixP->fx_frag->fr_address;
9186 }
9187 }
9188 #endif
9189 #if defined (OBJ_COFF) && defined (TE_PE)
9190 /* For some reason, the PE format does not store a
9191 section address offset for a PC relative symbol. */
9192 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9193 || S_IS_WEAK (fixP->fx_addsy))
9194 value += md_pcrel_from (fixP);
9195 #endif
9196 }
9197 #if defined (OBJ_COFF) && defined (TE_PE)
9198 if (fixP->fx_addsy != NULL
9199 && S_IS_WEAK (fixP->fx_addsy)
9200 /* PR 16858: Do not modify weak function references. */
9201 && ! fixP->fx_pcrel)
9202 {
9203 #if !defined (TE_PEP)
9204 /* For x86 PE weak function symbols are neither PC-relative
9205 nor do they set S_IS_FUNCTION. So the only reliable way
9206 to detect them is to check the flags of their containing
9207 section. */
9208 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9209 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9210 ;
9211 else
9212 #endif
9213 value -= S_GET_VALUE (fixP->fx_addsy);
9214 }
9215 #endif
9216
9217 /* Fix a few things - the dynamic linker expects certain values here,
9218 and we must not disappoint it. */
9219 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9220 if (IS_ELF && fixP->fx_addsy)
9221 switch (fixP->fx_r_type)
9222 {
9223 case BFD_RELOC_386_PLT32:
9224 case BFD_RELOC_X86_64_PLT32:
9225 case BFD_RELOC_X86_64_PLT32_BND:
9226 /* Make the jump instruction point to the address of the operand. At
9227 runtime we merely add the offset to the actual PLT entry. */
9228 value = -4;
9229 break;
9230
9231 case BFD_RELOC_386_TLS_GD:
9232 case BFD_RELOC_386_TLS_LDM:
9233 case BFD_RELOC_386_TLS_IE_32:
9234 case BFD_RELOC_386_TLS_IE:
9235 case BFD_RELOC_386_TLS_GOTIE:
9236 case BFD_RELOC_386_TLS_GOTDESC:
9237 case BFD_RELOC_X86_64_TLSGD:
9238 case BFD_RELOC_X86_64_TLSLD:
9239 case BFD_RELOC_X86_64_GOTTPOFF:
9240 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9241 value = 0; /* Fully resolved at runtime. No addend. */
9242 /* Fallthrough */
9243 case BFD_RELOC_386_TLS_LE:
9244 case BFD_RELOC_386_TLS_LDO_32:
9245 case BFD_RELOC_386_TLS_LE_32:
9246 case BFD_RELOC_X86_64_DTPOFF32:
9247 case BFD_RELOC_X86_64_DTPOFF64:
9248 case BFD_RELOC_X86_64_TPOFF32:
9249 case BFD_RELOC_X86_64_TPOFF64:
9250 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9251 break;
9252
9253 case BFD_RELOC_386_TLS_DESC_CALL:
9254 case BFD_RELOC_X86_64_TLSDESC_CALL:
9255 value = 0; /* Fully resolved at runtime. No addend. */
9256 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9257 fixP->fx_done = 0;
9258 return;
9259
9260 case BFD_RELOC_386_GOT32:
9261 case BFD_RELOC_X86_64_GOT32:
9262 value = 0; /* Fully resolved at runtime. No addend. */
9263 break;
9264
9265 case BFD_RELOC_VTABLE_INHERIT:
9266 case BFD_RELOC_VTABLE_ENTRY:
9267 fixP->fx_done = 0;
9268 return;
9269
9270 default:
9271 break;
9272 }
9273 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9274 *valP = value;
9275 #endif /* !defined (TE_Mach) */
9276
9277 /* Are we finished with this relocation now? */
9278 if (fixP->fx_addsy == NULL)
9279 fixP->fx_done = 1;
9280 #if defined (OBJ_COFF) && defined (TE_PE)
9281 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9282 {
9283 fixP->fx_done = 0;
9284 /* Remember value for tc_gen_reloc. */
9285 fixP->fx_addnumber = value;
9286 /* Clear out the frag for now. */
9287 value = 0;
9288 }
9289 #endif
9290 else if (use_rela_relocations)
9291 {
9292 fixP->fx_no_overflow = 1;
9293 /* Remember value for tc_gen_reloc. */
9294 fixP->fx_addnumber = value;
9295 value = 0;
9296 }
9297
9298 md_number_to_chars (p, value, fixP->fx_size);
9299 }
9300 \f
9301 char *
9302 md_atof (int type, char *litP, int *sizeP)
9303 {
9304 /* This outputs the LITTLENUMs in REVERSE order;
9305 in accord with the bigendian 386. */
9306 return ieee_md_atof (type, litP, sizeP, FALSE);
9307 }
9308 \f
9309 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9310
9311 static char *
9312 output_invalid (int c)
9313 {
9314 if (ISPRINT (c))
9315 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9316 "'%c'", c);
9317 else
9318 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9319 "(0x%x)", (unsigned char) c);
9320 return output_invalid_buf;
9321 }
9322
9323 /* REG_STRING starts *before* REGISTER_PREFIX. */
9324
9325 static const reg_entry *
9326 parse_real_register (char *reg_string, char **end_op)
9327 {
9328 char *s = reg_string;
9329 char *p;
9330 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9331 const reg_entry *r;
9332
9333 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9334 if (*s == REGISTER_PREFIX)
9335 ++s;
9336
9337 if (is_space_char (*s))
9338 ++s;
9339
9340 p = reg_name_given;
9341 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9342 {
9343 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9344 return (const reg_entry *) NULL;
9345 s++;
9346 }
9347
9348 /* For naked regs, make sure that we are not dealing with an identifier.
9349 This prevents confusing an identifier like `eax_var' with register
9350 `eax'. */
9351 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9352 return (const reg_entry *) NULL;
9353
9354 *end_op = s;
9355
9356 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9357
9358 /* Handle floating point regs, allowing spaces in the (i) part. */
9359 if (r == i386_regtab /* %st is first entry of table */)
9360 {
9361 if (is_space_char (*s))
9362 ++s;
9363 if (*s == '(')
9364 {
9365 ++s;
9366 if (is_space_char (*s))
9367 ++s;
9368 if (*s >= '0' && *s <= '7')
9369 {
9370 int fpr = *s - '0';
9371 ++s;
9372 if (is_space_char (*s))
9373 ++s;
9374 if (*s == ')')
9375 {
9376 *end_op = s + 1;
9377 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9378 know (r);
9379 return r + fpr;
9380 }
9381 }
9382 /* We have "%st(" then garbage. */
9383 return (const reg_entry *) NULL;
9384 }
9385 }
9386
9387 if (r == NULL || allow_pseudo_reg)
9388 return r;
9389
9390 if (operand_type_all_zero (&r->reg_type))
9391 return (const reg_entry *) NULL;
9392
9393 if ((r->reg_type.bitfield.reg32
9394 || r->reg_type.bitfield.sreg3
9395 || r->reg_type.bitfield.control
9396 || r->reg_type.bitfield.debug
9397 || r->reg_type.bitfield.test)
9398 && !cpu_arch_flags.bitfield.cpui386)
9399 return (const reg_entry *) NULL;
9400
9401 if (r->reg_type.bitfield.floatreg
9402 && !cpu_arch_flags.bitfield.cpu8087
9403 && !cpu_arch_flags.bitfield.cpu287
9404 && !cpu_arch_flags.bitfield.cpu387)
9405 return (const reg_entry *) NULL;
9406
9407 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9408 return (const reg_entry *) NULL;
9409
9410 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9411 return (const reg_entry *) NULL;
9412
9413 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9414 return (const reg_entry *) NULL;
9415
9416 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9417 && !cpu_arch_flags.bitfield.cpuavx512f)
9418 return (const reg_entry *) NULL;
9419
9420 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9421 if (!allow_index_reg
9422 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9423 return (const reg_entry *) NULL;
9424
9425 /* Upper 16 vector register is only available with VREX in 64bit
9426 mode. */
9427 if ((r->reg_flags & RegVRex))
9428 {
9429 if (!cpu_arch_flags.bitfield.cpuvrex
9430 || flag_code != CODE_64BIT)
9431 return (const reg_entry *) NULL;
9432
9433 i.need_vrex = 1;
9434 }
9435
9436 if (((r->reg_flags & (RegRex64 | RegRex))
9437 || r->reg_type.bitfield.reg64)
9438 && (!cpu_arch_flags.bitfield.cpulm
9439 || !operand_type_equal (&r->reg_type, &control))
9440 && flag_code != CODE_64BIT)
9441 return (const reg_entry *) NULL;
9442
9443 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9444 return (const reg_entry *) NULL;
9445
9446 return r;
9447 }
9448
9449 /* REG_STRING starts *before* REGISTER_PREFIX. */
9450
9451 static const reg_entry *
9452 parse_register (char *reg_string, char **end_op)
9453 {
9454 const reg_entry *r;
9455
9456 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9457 r = parse_real_register (reg_string, end_op);
9458 else
9459 r = NULL;
9460 if (!r)
9461 {
9462 char *save = input_line_pointer;
9463 char c;
9464 symbolS *symbolP;
9465
9466 input_line_pointer = reg_string;
9467 c = get_symbol_end ();
9468 symbolP = symbol_find (reg_string);
9469 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9470 {
9471 const expressionS *e = symbol_get_value_expression (symbolP);
9472
9473 know (e->X_op == O_register);
9474 know (e->X_add_number >= 0
9475 && (valueT) e->X_add_number < i386_regtab_size);
9476 r = i386_regtab + e->X_add_number;
9477 if ((r->reg_flags & RegVRex))
9478 i.need_vrex = 1;
9479 *end_op = input_line_pointer;
9480 }
9481 *input_line_pointer = c;
9482 input_line_pointer = save;
9483 }
9484 return r;
9485 }
9486
9487 int
9488 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9489 {
9490 const reg_entry *r;
9491 char *end = input_line_pointer;
9492
9493 *end = *nextcharP;
9494 r = parse_register (name, &input_line_pointer);
9495 if (r && end <= input_line_pointer)
9496 {
9497 *nextcharP = *input_line_pointer;
9498 *input_line_pointer = 0;
9499 e->X_op = O_register;
9500 e->X_add_number = r - i386_regtab;
9501 return 1;
9502 }
9503 input_line_pointer = end;
9504 *end = 0;
9505 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9506 }
9507
9508 void
9509 md_operand (expressionS *e)
9510 {
9511 char *end;
9512 const reg_entry *r;
9513
9514 switch (*input_line_pointer)
9515 {
9516 case REGISTER_PREFIX:
9517 r = parse_real_register (input_line_pointer, &end);
9518 if (r)
9519 {
9520 e->X_op = O_register;
9521 e->X_add_number = r - i386_regtab;
9522 input_line_pointer = end;
9523 }
9524 break;
9525
9526 case '[':
9527 gas_assert (intel_syntax);
9528 end = input_line_pointer++;
9529 expression (e);
9530 if (*input_line_pointer == ']')
9531 {
9532 ++input_line_pointer;
9533 e->X_op_symbol = make_expr_symbol (e);
9534 e->X_add_symbol = NULL;
9535 e->X_add_number = 0;
9536 e->X_op = O_index;
9537 }
9538 else
9539 {
9540 e->X_op = O_absent;
9541 input_line_pointer = end;
9542 }
9543 break;
9544 }
9545 }
9546
9547 \f
9548 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9549 const char *md_shortopts = "kVQ:sqn";
9550 #else
9551 const char *md_shortopts = "qn";
9552 #endif
9553
9554 #define OPTION_32 (OPTION_MD_BASE + 0)
9555 #define OPTION_64 (OPTION_MD_BASE + 1)
9556 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9557 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9558 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9559 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9560 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9561 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9562 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9563 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9564 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9565 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9566 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9567 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9568 #define OPTION_X32 (OPTION_MD_BASE + 14)
9569 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9570 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9571 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9572 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9573 #define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9574 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9575
9576 struct option md_longopts[] =
9577 {
9578 {"32", no_argument, NULL, OPTION_32},
9579 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9580 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9581 {"64", no_argument, NULL, OPTION_64},
9582 #endif
9583 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9584 {"x32", no_argument, NULL, OPTION_X32},
9585 #endif
9586 {"divide", no_argument, NULL, OPTION_DIVIDE},
9587 {"march", required_argument, NULL, OPTION_MARCH},
9588 {"mtune", required_argument, NULL, OPTION_MTUNE},
9589 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9590 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9591 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9592 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9593 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9594 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9595 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9596 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9597 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9598 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9599 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9600 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9601 # if defined (TE_PE) || defined (TE_PEP)
9602 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
9603 #endif
9604 {"momit-lock-prefix", required_argument, NULL, OPTION_OMIT_LOCK_PREFIX},
9605 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
9606 {NULL, no_argument, NULL, 0}
9607 };
9608 size_t md_longopts_size = sizeof (md_longopts);
9609
9610 int
9611 md_parse_option (int c, char *arg)
9612 {
9613 unsigned int j;
9614 char *arch, *next;
9615
9616 switch (c)
9617 {
9618 case 'n':
9619 optimize_align_code = 0;
9620 break;
9621
9622 case 'q':
9623 quiet_warnings = 1;
9624 break;
9625
9626 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9627 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9628 should be emitted or not. FIXME: Not implemented. */
9629 case 'Q':
9630 break;
9631
9632 /* -V: SVR4 argument to print version ID. */
9633 case 'V':
9634 print_version_id ();
9635 break;
9636
9637 /* -k: Ignore for FreeBSD compatibility. */
9638 case 'k':
9639 break;
9640
9641 case 's':
9642 /* -s: On i386 Solaris, this tells the native assembler to use
9643 .stab instead of .stab.excl. We always use .stab anyhow. */
9644 break;
9645 #endif
9646 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9647 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9648 case OPTION_64:
9649 {
9650 const char **list, **l;
9651
9652 list = bfd_target_list ();
9653 for (l = list; *l != NULL; l++)
9654 if (CONST_STRNEQ (*l, "elf64-x86-64")
9655 || strcmp (*l, "coff-x86-64") == 0
9656 || strcmp (*l, "pe-x86-64") == 0
9657 || strcmp (*l, "pei-x86-64") == 0
9658 || strcmp (*l, "mach-o-x86-64") == 0)
9659 {
9660 default_arch = "x86_64";
9661 break;
9662 }
9663 if (*l == NULL)
9664 as_fatal (_("no compiled in support for x86_64"));
9665 free (list);
9666 }
9667 break;
9668 #endif
9669
9670 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9671 case OPTION_X32:
9672 if (IS_ELF)
9673 {
9674 const char **list, **l;
9675
9676 list = bfd_target_list ();
9677 for (l = list; *l != NULL; l++)
9678 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9679 {
9680 default_arch = "x86_64:32";
9681 break;
9682 }
9683 if (*l == NULL)
9684 as_fatal (_("no compiled in support for 32bit x86_64"));
9685 free (list);
9686 }
9687 else
9688 as_fatal (_("32bit x86_64 is only supported for ELF"));
9689 break;
9690 #endif
9691
9692 case OPTION_32:
9693 default_arch = "i386";
9694 break;
9695
9696 case OPTION_DIVIDE:
9697 #ifdef SVR4_COMMENT_CHARS
9698 {
9699 char *n, *t;
9700 const char *s;
9701
9702 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
9703 t = n;
9704 for (s = i386_comment_chars; *s != '\0'; s++)
9705 if (*s != '/')
9706 *t++ = *s;
9707 *t = '\0';
9708 i386_comment_chars = n;
9709 }
9710 #endif
9711 break;
9712
9713 case OPTION_MARCH:
9714 arch = xstrdup (arg);
9715 do
9716 {
9717 if (*arch == '.')
9718 as_fatal (_("invalid -march= option: `%s'"), arg);
9719 next = strchr (arch, '+');
9720 if (next)
9721 *next++ = '\0';
9722 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9723 {
9724 if (strcmp (arch, cpu_arch [j].name) == 0)
9725 {
9726 /* Processor. */
9727 if (! cpu_arch[j].flags.bitfield.cpui386)
9728 continue;
9729
9730 cpu_arch_name = cpu_arch[j].name;
9731 cpu_sub_arch_name = NULL;
9732 cpu_arch_flags = cpu_arch[j].flags;
9733 cpu_arch_isa = cpu_arch[j].type;
9734 cpu_arch_isa_flags = cpu_arch[j].flags;
9735 if (!cpu_arch_tune_set)
9736 {
9737 cpu_arch_tune = cpu_arch_isa;
9738 cpu_arch_tune_flags = cpu_arch_isa_flags;
9739 }
9740 break;
9741 }
9742 else if (*cpu_arch [j].name == '.'
9743 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9744 {
9745 /* ISA entension. */
9746 i386_cpu_flags flags;
9747
9748 if (!cpu_arch[j].negated)
9749 flags = cpu_flags_or (cpu_arch_flags,
9750 cpu_arch[j].flags);
9751 else
9752 flags = cpu_flags_and_not (cpu_arch_flags,
9753 cpu_arch[j].flags);
9754 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9755 {
9756 if (cpu_sub_arch_name)
9757 {
9758 char *name = cpu_sub_arch_name;
9759 cpu_sub_arch_name = concat (name,
9760 cpu_arch[j].name,
9761 (const char *) NULL);
9762 free (name);
9763 }
9764 else
9765 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9766 cpu_arch_flags = flags;
9767 cpu_arch_isa_flags = flags;
9768 }
9769 break;
9770 }
9771 }
9772
9773 if (j >= ARRAY_SIZE (cpu_arch))
9774 as_fatal (_("invalid -march= option: `%s'"), arg);
9775
9776 arch = next;
9777 }
9778 while (next != NULL );
9779 break;
9780
9781 case OPTION_MTUNE:
9782 if (*arg == '.')
9783 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9784 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9785 {
9786 if (strcmp (arg, cpu_arch [j].name) == 0)
9787 {
9788 cpu_arch_tune_set = 1;
9789 cpu_arch_tune = cpu_arch [j].type;
9790 cpu_arch_tune_flags = cpu_arch[j].flags;
9791 break;
9792 }
9793 }
9794 if (j >= ARRAY_SIZE (cpu_arch))
9795 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9796 break;
9797
9798 case OPTION_MMNEMONIC:
9799 if (strcasecmp (arg, "att") == 0)
9800 intel_mnemonic = 0;
9801 else if (strcasecmp (arg, "intel") == 0)
9802 intel_mnemonic = 1;
9803 else
9804 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9805 break;
9806
9807 case OPTION_MSYNTAX:
9808 if (strcasecmp (arg, "att") == 0)
9809 intel_syntax = 0;
9810 else if (strcasecmp (arg, "intel") == 0)
9811 intel_syntax = 1;
9812 else
9813 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9814 break;
9815
9816 case OPTION_MINDEX_REG:
9817 allow_index_reg = 1;
9818 break;
9819
9820 case OPTION_MNAKED_REG:
9821 allow_naked_reg = 1;
9822 break;
9823
9824 case OPTION_MOLD_GCC:
9825 old_gcc = 1;
9826 break;
9827
9828 case OPTION_MSSE2AVX:
9829 sse2avx = 1;
9830 break;
9831
9832 case OPTION_MSSE_CHECK:
9833 if (strcasecmp (arg, "error") == 0)
9834 sse_check = check_error;
9835 else if (strcasecmp (arg, "warning") == 0)
9836 sse_check = check_warning;
9837 else if (strcasecmp (arg, "none") == 0)
9838 sse_check = check_none;
9839 else
9840 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9841 break;
9842
9843 case OPTION_MOPERAND_CHECK:
9844 if (strcasecmp (arg, "error") == 0)
9845 operand_check = check_error;
9846 else if (strcasecmp (arg, "warning") == 0)
9847 operand_check = check_warning;
9848 else if (strcasecmp (arg, "none") == 0)
9849 operand_check = check_none;
9850 else
9851 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9852 break;
9853
9854 case OPTION_MAVXSCALAR:
9855 if (strcasecmp (arg, "128") == 0)
9856 avxscalar = vex128;
9857 else if (strcasecmp (arg, "256") == 0)
9858 avxscalar = vex256;
9859 else
9860 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9861 break;
9862
9863 case OPTION_MADD_BND_PREFIX:
9864 add_bnd_prefix = 1;
9865 break;
9866
9867 case OPTION_MEVEXLIG:
9868 if (strcmp (arg, "128") == 0)
9869 evexlig = evexl128;
9870 else if (strcmp (arg, "256") == 0)
9871 evexlig = evexl256;
9872 else if (strcmp (arg, "512") == 0)
9873 evexlig = evexl512;
9874 else
9875 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9876 break;
9877
9878 case OPTION_MEVEXRCIG:
9879 if (strcmp (arg, "rne") == 0)
9880 evexrcig = rne;
9881 else if (strcmp (arg, "rd") == 0)
9882 evexrcig = rd;
9883 else if (strcmp (arg, "ru") == 0)
9884 evexrcig = ru;
9885 else if (strcmp (arg, "rz") == 0)
9886 evexrcig = rz;
9887 else
9888 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
9889 break;
9890
9891 case OPTION_MEVEXWIG:
9892 if (strcmp (arg, "0") == 0)
9893 evexwig = evexw0;
9894 else if (strcmp (arg, "1") == 0)
9895 evexwig = evexw1;
9896 else
9897 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9898 break;
9899
9900 # if defined (TE_PE) || defined (TE_PEP)
9901 case OPTION_MBIG_OBJ:
9902 use_big_obj = 1;
9903 break;
9904 #endif
9905
9906 case OPTION_OMIT_LOCK_PREFIX:
9907 if (strcasecmp (arg, "yes") == 0)
9908 omit_lock_prefix = 1;
9909 else if (strcasecmp (arg, "no") == 0)
9910 omit_lock_prefix = 0;
9911 else
9912 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
9913 break;
9914
9915 default:
9916 return 0;
9917 }
9918 return 1;
9919 }
9920
9921 #define MESSAGE_TEMPLATE \
9922 " "
9923
9924 static void
9925 show_arch (FILE *stream, int ext, int check)
9926 {
9927 static char message[] = MESSAGE_TEMPLATE;
9928 char *start = message + 27;
9929 char *p;
9930 int size = sizeof (MESSAGE_TEMPLATE);
9931 int left;
9932 const char *name;
9933 int len;
9934 unsigned int j;
9935
9936 p = start;
9937 left = size - (start - message);
9938 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9939 {
9940 /* Should it be skipped? */
9941 if (cpu_arch [j].skip)
9942 continue;
9943
9944 name = cpu_arch [j].name;
9945 len = cpu_arch [j].len;
9946 if (*name == '.')
9947 {
9948 /* It is an extension. Skip if we aren't asked to show it. */
9949 if (ext)
9950 {
9951 name++;
9952 len--;
9953 }
9954 else
9955 continue;
9956 }
9957 else if (ext)
9958 {
9959 /* It is an processor. Skip if we show only extension. */
9960 continue;
9961 }
9962 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
9963 {
9964 /* It is an impossible processor - skip. */
9965 continue;
9966 }
9967
9968 /* Reserve 2 spaces for ", " or ",\0" */
9969 left -= len + 2;
9970
9971 /* Check if there is any room. */
9972 if (left >= 0)
9973 {
9974 if (p != start)
9975 {
9976 *p++ = ',';
9977 *p++ = ' ';
9978 }
9979 p = mempcpy (p, name, len);
9980 }
9981 else
9982 {
9983 /* Output the current message now and start a new one. */
9984 *p++ = ',';
9985 *p = '\0';
9986 fprintf (stream, "%s\n", message);
9987 p = start;
9988 left = size - (start - message) - len - 2;
9989
9990 gas_assert (left >= 0);
9991
9992 p = mempcpy (p, name, len);
9993 }
9994 }
9995
9996 *p = '\0';
9997 fprintf (stream, "%s\n", message);
9998 }
9999
10000 void
10001 md_show_usage (FILE *stream)
10002 {
10003 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10004 fprintf (stream, _("\
10005 -Q ignored\n\
10006 -V print assembler version number\n\
10007 -k ignored\n"));
10008 #endif
10009 fprintf (stream, _("\
10010 -n Do not optimize code alignment\n\
10011 -q quieten some warnings\n"));
10012 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10013 fprintf (stream, _("\
10014 -s ignored\n"));
10015 #endif
10016 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10017 || defined (TE_PE) || defined (TE_PEP))
10018 fprintf (stream, _("\
10019 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10020 #endif
10021 #ifdef SVR4_COMMENT_CHARS
10022 fprintf (stream, _("\
10023 --divide do not treat `/' as a comment character\n"));
10024 #else
10025 fprintf (stream, _("\
10026 --divide ignored\n"));
10027 #endif
10028 fprintf (stream, _("\
10029 -march=CPU[,+EXTENSION...]\n\
10030 generate code for CPU and EXTENSION, CPU is one of:\n"));
10031 show_arch (stream, 0, 1);
10032 fprintf (stream, _("\
10033 EXTENSION is combination of:\n"));
10034 show_arch (stream, 1, 0);
10035 fprintf (stream, _("\
10036 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10037 show_arch (stream, 0, 0);
10038 fprintf (stream, _("\
10039 -msse2avx encode SSE instructions with VEX prefix\n"));
10040 fprintf (stream, _("\
10041 -msse-check=[none|error|warning]\n\
10042 check SSE instructions\n"));
10043 fprintf (stream, _("\
10044 -moperand-check=[none|error|warning]\n\
10045 check operand combinations for validity\n"));
10046 fprintf (stream, _("\
10047 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10048 length\n"));
10049 fprintf (stream, _("\
10050 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10051 length\n"));
10052 fprintf (stream, _("\
10053 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10054 for EVEX.W bit ignored instructions\n"));
10055 fprintf (stream, _("\
10056 -mevexrcig=[rne|rd|ru|rz]\n\
10057 encode EVEX instructions with specific EVEX.RC value\n\
10058 for SAE-only ignored instructions\n"));
10059 fprintf (stream, _("\
10060 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10061 fprintf (stream, _("\
10062 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10063 fprintf (stream, _("\
10064 -mindex-reg support pseudo index registers\n"));
10065 fprintf (stream, _("\
10066 -mnaked-reg don't require `%%' prefix for registers\n"));
10067 fprintf (stream, _("\
10068 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10069 fprintf (stream, _("\
10070 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10071 # if defined (TE_PE) || defined (TE_PEP)
10072 fprintf (stream, _("\
10073 -mbig-obj generate big object files\n"));
10074 #endif
10075 fprintf (stream, _("\
10076 -momit-lock-prefix=[no|yes]\n\
10077 strip all lock prefixes\n"));
10078 }
10079
10080 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10081 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10082 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10083
10084 /* Pick the target format to use. */
10085
10086 const char *
10087 i386_target_format (void)
10088 {
10089 if (!strncmp (default_arch, "x86_64", 6))
10090 {
10091 update_code_flag (CODE_64BIT, 1);
10092 if (default_arch[6] == '\0')
10093 x86_elf_abi = X86_64_ABI;
10094 else
10095 x86_elf_abi = X86_64_X32_ABI;
10096 }
10097 else if (!strcmp (default_arch, "i386"))
10098 update_code_flag (CODE_32BIT, 1);
10099 else
10100 as_fatal (_("unknown architecture"));
10101
10102 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
10103 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10104 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10105 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10106
10107 switch (OUTPUT_FLAVOR)
10108 {
10109 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10110 case bfd_target_aout_flavour:
10111 return AOUT_TARGET_FORMAT;
10112 #endif
10113 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10114 # if defined (TE_PE) || defined (TE_PEP)
10115 case bfd_target_coff_flavour:
10116 if (flag_code == CODE_64BIT)
10117 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10118 else
10119 return "pe-i386";
10120 # elif defined (TE_GO32)
10121 case bfd_target_coff_flavour:
10122 return "coff-go32";
10123 # else
10124 case bfd_target_coff_flavour:
10125 return "coff-i386";
10126 # endif
10127 #endif
10128 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10129 case bfd_target_elf_flavour:
10130 {
10131 const char *format;
10132
10133 switch (x86_elf_abi)
10134 {
10135 default:
10136 format = ELF_TARGET_FORMAT;
10137 break;
10138 case X86_64_ABI:
10139 use_rela_relocations = 1;
10140 object_64bit = 1;
10141 format = ELF_TARGET_FORMAT64;
10142 break;
10143 case X86_64_X32_ABI:
10144 use_rela_relocations = 1;
10145 object_64bit = 1;
10146 disallow_64bit_reloc = 1;
10147 format = ELF_TARGET_FORMAT32;
10148 break;
10149 }
10150 if (cpu_arch_isa == PROCESSOR_L1OM)
10151 {
10152 if (x86_elf_abi != X86_64_ABI)
10153 as_fatal (_("Intel L1OM is 64bit only"));
10154 return ELF_TARGET_L1OM_FORMAT;
10155 }
10156 if (cpu_arch_isa == PROCESSOR_K1OM)
10157 {
10158 if (x86_elf_abi != X86_64_ABI)
10159 as_fatal (_("Intel K1OM is 64bit only"));
10160 return ELF_TARGET_K1OM_FORMAT;
10161 }
10162 else
10163 return format;
10164 }
10165 #endif
10166 #if defined (OBJ_MACH_O)
10167 case bfd_target_mach_o_flavour:
10168 if (flag_code == CODE_64BIT)
10169 {
10170 use_rela_relocations = 1;
10171 object_64bit = 1;
10172 return "mach-o-x86-64";
10173 }
10174 else
10175 return "mach-o-i386";
10176 #endif
10177 default:
10178 abort ();
10179 return NULL;
10180 }
10181 }
10182
10183 #endif /* OBJ_MAYBE_ more than one */
10184
10185 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10186 void
10187 i386_elf_emit_arch_note (void)
10188 {
10189 if (IS_ELF && cpu_arch_name != NULL)
10190 {
10191 char *p;
10192 asection *seg = now_seg;
10193 subsegT subseg = now_subseg;
10194 Elf_Internal_Note i_note;
10195 Elf_External_Note e_note;
10196 asection *note_secp;
10197 int len;
10198
10199 /* Create the .note section. */
10200 note_secp = subseg_new (".note", 0);
10201 bfd_set_section_flags (stdoutput,
10202 note_secp,
10203 SEC_HAS_CONTENTS | SEC_READONLY);
10204
10205 /* Process the arch string. */
10206 len = strlen (cpu_arch_name);
10207
10208 i_note.namesz = len + 1;
10209 i_note.descsz = 0;
10210 i_note.type = NT_ARCH;
10211 p = frag_more (sizeof (e_note.namesz));
10212 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
10213 p = frag_more (sizeof (e_note.descsz));
10214 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
10215 p = frag_more (sizeof (e_note.type));
10216 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
10217 p = frag_more (len + 1);
10218 strcpy (p, cpu_arch_name);
10219
10220 frag_align (2, 0, 0);
10221
10222 subseg_set (seg, subseg);
10223 }
10224 }
10225 #endif
10226 \f
10227 symbolS *
10228 md_undefined_symbol (char *name)
10229 {
10230 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10231 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10232 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10233 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10234 {
10235 if (!GOT_symbol)
10236 {
10237 if (symbol_find (name))
10238 as_bad (_("GOT already in symbol table"));
10239 GOT_symbol = symbol_new (name, undefined_section,
10240 (valueT) 0, &zero_address_frag);
10241 };
10242 return GOT_symbol;
10243 }
10244 return 0;
10245 }
10246
10247 /* Round up a section size to the appropriate boundary. */
10248
10249 valueT
10250 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10251 {
10252 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10253 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10254 {
10255 /* For a.out, force the section size to be aligned. If we don't do
10256 this, BFD will align it for us, but it will not write out the
10257 final bytes of the section. This may be a bug in BFD, but it is
10258 easier to fix it here since that is how the other a.out targets
10259 work. */
10260 int align;
10261
10262 align = bfd_get_section_alignment (stdoutput, segment);
10263 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
10264 }
10265 #endif
10266
10267 return size;
10268 }
10269
10270 /* On the i386, PC-relative offsets are relative to the start of the
10271 next instruction. That is, the address of the offset, plus its
10272 size, since the offset is always the last part of the insn. */
10273
10274 long
10275 md_pcrel_from (fixS *fixP)
10276 {
10277 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10278 }
10279
10280 #ifndef I386COFF
10281
10282 static void
10283 s_bss (int ignore ATTRIBUTE_UNUSED)
10284 {
10285 int temp;
10286
10287 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10288 if (IS_ELF)
10289 obj_elf_section_change_hook ();
10290 #endif
10291 temp = get_absolute_expression ();
10292 subseg_set (bss_section, (subsegT) temp);
10293 demand_empty_rest_of_line ();
10294 }
10295
10296 #endif
10297
10298 void
10299 i386_validate_fix (fixS *fixp)
10300 {
10301 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
10302 {
10303 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10304 {
10305 if (!object_64bit)
10306 abort ();
10307 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10308 }
10309 else
10310 {
10311 if (!object_64bit)
10312 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10313 else
10314 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10315 }
10316 fixp->fx_subsy = 0;
10317 }
10318 }
10319
10320 arelent *
10321 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10322 {
10323 arelent *rel;
10324 bfd_reloc_code_real_type code;
10325
10326 switch (fixp->fx_r_type)
10327 {
10328 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10329 case BFD_RELOC_SIZE32:
10330 case BFD_RELOC_SIZE64:
10331 if (S_IS_DEFINED (fixp->fx_addsy)
10332 && !S_IS_EXTERNAL (fixp->fx_addsy))
10333 {
10334 /* Resolve size relocation against local symbol to size of
10335 the symbol plus addend. */
10336 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10337 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10338 && !fits_in_unsigned_long (value))
10339 as_bad_where (fixp->fx_file, fixp->fx_line,
10340 _("symbol size computation overflow"));
10341 fixp->fx_addsy = NULL;
10342 fixp->fx_subsy = NULL;
10343 md_apply_fix (fixp, (valueT *) &value, NULL);
10344 return NULL;
10345 }
10346 #endif
10347
10348 case BFD_RELOC_X86_64_PLT32:
10349 case BFD_RELOC_X86_64_PLT32_BND:
10350 case BFD_RELOC_X86_64_GOT32:
10351 case BFD_RELOC_X86_64_GOTPCREL:
10352 case BFD_RELOC_386_PLT32:
10353 case BFD_RELOC_386_GOT32:
10354 case BFD_RELOC_386_GOTOFF:
10355 case BFD_RELOC_386_GOTPC:
10356 case BFD_RELOC_386_TLS_GD:
10357 case BFD_RELOC_386_TLS_LDM:
10358 case BFD_RELOC_386_TLS_LDO_32:
10359 case BFD_RELOC_386_TLS_IE_32:
10360 case BFD_RELOC_386_TLS_IE:
10361 case BFD_RELOC_386_TLS_GOTIE:
10362 case BFD_RELOC_386_TLS_LE_32:
10363 case BFD_RELOC_386_TLS_LE:
10364 case BFD_RELOC_386_TLS_GOTDESC:
10365 case BFD_RELOC_386_TLS_DESC_CALL:
10366 case BFD_RELOC_X86_64_TLSGD:
10367 case BFD_RELOC_X86_64_TLSLD:
10368 case BFD_RELOC_X86_64_DTPOFF32:
10369 case BFD_RELOC_X86_64_DTPOFF64:
10370 case BFD_RELOC_X86_64_GOTTPOFF:
10371 case BFD_RELOC_X86_64_TPOFF32:
10372 case BFD_RELOC_X86_64_TPOFF64:
10373 case BFD_RELOC_X86_64_GOTOFF64:
10374 case BFD_RELOC_X86_64_GOTPC32:
10375 case BFD_RELOC_X86_64_GOT64:
10376 case BFD_RELOC_X86_64_GOTPCREL64:
10377 case BFD_RELOC_X86_64_GOTPC64:
10378 case BFD_RELOC_X86_64_GOTPLT64:
10379 case BFD_RELOC_X86_64_PLTOFF64:
10380 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10381 case BFD_RELOC_X86_64_TLSDESC_CALL:
10382 case BFD_RELOC_RVA:
10383 case BFD_RELOC_VTABLE_ENTRY:
10384 case BFD_RELOC_VTABLE_INHERIT:
10385 #ifdef TE_PE
10386 case BFD_RELOC_32_SECREL:
10387 #endif
10388 code = fixp->fx_r_type;
10389 break;
10390 case BFD_RELOC_X86_64_32S:
10391 if (!fixp->fx_pcrel)
10392 {
10393 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10394 code = fixp->fx_r_type;
10395 break;
10396 }
10397 default:
10398 if (fixp->fx_pcrel)
10399 {
10400 switch (fixp->fx_size)
10401 {
10402 default:
10403 as_bad_where (fixp->fx_file, fixp->fx_line,
10404 _("can not do %d byte pc-relative relocation"),
10405 fixp->fx_size);
10406 code = BFD_RELOC_32_PCREL;
10407 break;
10408 case 1: code = BFD_RELOC_8_PCREL; break;
10409 case 2: code = BFD_RELOC_16_PCREL; break;
10410 case 4:
10411 code = (fixp->fx_r_type == BFD_RELOC_X86_64_PC32_BND
10412 ? fixp-> fx_r_type : BFD_RELOC_32_PCREL);
10413 break;
10414 #ifdef BFD64
10415 case 8: code = BFD_RELOC_64_PCREL; break;
10416 #endif
10417 }
10418 }
10419 else
10420 {
10421 switch (fixp->fx_size)
10422 {
10423 default:
10424 as_bad_where (fixp->fx_file, fixp->fx_line,
10425 _("can not do %d byte relocation"),
10426 fixp->fx_size);
10427 code = BFD_RELOC_32;
10428 break;
10429 case 1: code = BFD_RELOC_8; break;
10430 case 2: code = BFD_RELOC_16; break;
10431 case 4: code = BFD_RELOC_32; break;
10432 #ifdef BFD64
10433 case 8: code = BFD_RELOC_64; break;
10434 #endif
10435 }
10436 }
10437 break;
10438 }
10439
10440 if ((code == BFD_RELOC_32
10441 || code == BFD_RELOC_32_PCREL
10442 || code == BFD_RELOC_X86_64_32S)
10443 && GOT_symbol
10444 && fixp->fx_addsy == GOT_symbol)
10445 {
10446 if (!object_64bit)
10447 code = BFD_RELOC_386_GOTPC;
10448 else
10449 code = BFD_RELOC_X86_64_GOTPC32;
10450 }
10451 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10452 && GOT_symbol
10453 && fixp->fx_addsy == GOT_symbol)
10454 {
10455 code = BFD_RELOC_X86_64_GOTPC64;
10456 }
10457
10458 rel = (arelent *) xmalloc (sizeof (arelent));
10459 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10460 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10461
10462 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10463
10464 if (!use_rela_relocations)
10465 {
10466 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10467 vtable entry to be used in the relocation's section offset. */
10468 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10469 rel->address = fixp->fx_offset;
10470 #if defined (OBJ_COFF) && defined (TE_PE)
10471 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10472 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10473 else
10474 #endif
10475 rel->addend = 0;
10476 }
10477 /* Use the rela in 64bit mode. */
10478 else
10479 {
10480 if (disallow_64bit_reloc)
10481 switch (code)
10482 {
10483 case BFD_RELOC_X86_64_DTPOFF64:
10484 case BFD_RELOC_X86_64_TPOFF64:
10485 case BFD_RELOC_64_PCREL:
10486 case BFD_RELOC_X86_64_GOTOFF64:
10487 case BFD_RELOC_X86_64_GOT64:
10488 case BFD_RELOC_X86_64_GOTPCREL64:
10489 case BFD_RELOC_X86_64_GOTPC64:
10490 case BFD_RELOC_X86_64_GOTPLT64:
10491 case BFD_RELOC_X86_64_PLTOFF64:
10492 as_bad_where (fixp->fx_file, fixp->fx_line,
10493 _("cannot represent relocation type %s in x32 mode"),
10494 bfd_get_reloc_code_name (code));
10495 break;
10496 default:
10497 break;
10498 }
10499
10500 if (!fixp->fx_pcrel)
10501 rel->addend = fixp->fx_offset;
10502 else
10503 switch (code)
10504 {
10505 case BFD_RELOC_X86_64_PLT32:
10506 case BFD_RELOC_X86_64_PLT32_BND:
10507 case BFD_RELOC_X86_64_GOT32:
10508 case BFD_RELOC_X86_64_GOTPCREL:
10509 case BFD_RELOC_X86_64_TLSGD:
10510 case BFD_RELOC_X86_64_TLSLD:
10511 case BFD_RELOC_X86_64_GOTTPOFF:
10512 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10513 case BFD_RELOC_X86_64_TLSDESC_CALL:
10514 rel->addend = fixp->fx_offset - fixp->fx_size;
10515 break;
10516 default:
10517 rel->addend = (section->vma
10518 - fixp->fx_size
10519 + fixp->fx_addnumber
10520 + md_pcrel_from (fixp));
10521 break;
10522 }
10523 }
10524
10525 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10526 if (rel->howto == NULL)
10527 {
10528 as_bad_where (fixp->fx_file, fixp->fx_line,
10529 _("cannot represent relocation type %s"),
10530 bfd_get_reloc_code_name (code));
10531 /* Set howto to a garbage value so that we can keep going. */
10532 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10533 gas_assert (rel->howto != NULL);
10534 }
10535
10536 return rel;
10537 }
10538
10539 #include "tc-i386-intel.c"
10540
10541 void
10542 tc_x86_parse_to_dw2regnum (expressionS *exp)
10543 {
10544 int saved_naked_reg;
10545 char saved_register_dot;
10546
10547 saved_naked_reg = allow_naked_reg;
10548 allow_naked_reg = 1;
10549 saved_register_dot = register_chars['.'];
10550 register_chars['.'] = '.';
10551 allow_pseudo_reg = 1;
10552 expression_and_evaluate (exp);
10553 allow_pseudo_reg = 0;
10554 register_chars['.'] = saved_register_dot;
10555 allow_naked_reg = saved_naked_reg;
10556
10557 if (exp->X_op == O_register && exp->X_add_number >= 0)
10558 {
10559 if ((addressT) exp->X_add_number < i386_regtab_size)
10560 {
10561 exp->X_op = O_constant;
10562 exp->X_add_number = i386_regtab[exp->X_add_number]
10563 .dw2_regnum[flag_code >> 1];
10564 }
10565 else
10566 exp->X_op = O_illegal;
10567 }
10568 }
10569
10570 void
10571 tc_x86_frame_initial_instructions (void)
10572 {
10573 static unsigned int sp_regno[2];
10574
10575 if (!sp_regno[flag_code >> 1])
10576 {
10577 char *saved_input = input_line_pointer;
10578 char sp[][4] = {"esp", "rsp"};
10579 expressionS exp;
10580
10581 input_line_pointer = sp[flag_code >> 1];
10582 tc_x86_parse_to_dw2regnum (&exp);
10583 gas_assert (exp.X_op == O_constant);
10584 sp_regno[flag_code >> 1] = exp.X_add_number;
10585 input_line_pointer = saved_input;
10586 }
10587
10588 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10589 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10590 }
10591
10592 int
10593 x86_dwarf2_addr_size (void)
10594 {
10595 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10596 if (x86_elf_abi == X86_64_X32_ABI)
10597 return 4;
10598 #endif
10599 return bfd_arch_bits_per_address (stdoutput) / 8;
10600 }
10601
10602 int
10603 i386_elf_section_type (const char *str, size_t len)
10604 {
10605 if (flag_code == CODE_64BIT
10606 && len == sizeof ("unwind") - 1
10607 && strncmp (str, "unwind", 6) == 0)
10608 return SHT_X86_64_UNWIND;
10609
10610 return -1;
10611 }
10612
10613 #ifdef TE_SOLARIS
10614 void
10615 i386_solaris_fix_up_eh_frame (segT sec)
10616 {
10617 if (flag_code == CODE_64BIT)
10618 elf_section_type (sec) = SHT_X86_64_UNWIND;
10619 }
10620 #endif
10621
10622 #ifdef TE_PE
10623 void
10624 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10625 {
10626 expressionS exp;
10627
10628 exp.X_op = O_secrel;
10629 exp.X_add_symbol = symbol;
10630 exp.X_add_number = 0;
10631 emit_expr (&exp, size);
10632 }
10633 #endif
10634
10635 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10636 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10637
10638 bfd_vma
10639 x86_64_section_letter (int letter, char **ptr_msg)
10640 {
10641 if (flag_code == CODE_64BIT)
10642 {
10643 if (letter == 'l')
10644 return SHF_X86_64_LARGE;
10645
10646 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10647 }
10648 else
10649 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10650 return -1;
10651 }
10652
10653 bfd_vma
10654 x86_64_section_word (char *str, size_t len)
10655 {
10656 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10657 return SHF_X86_64_LARGE;
10658
10659 return -1;
10660 }
10661
10662 static void
10663 handle_large_common (int small ATTRIBUTE_UNUSED)
10664 {
10665 if (flag_code != CODE_64BIT)
10666 {
10667 s_comm_internal (0, elf_common_parse);
10668 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10669 }
10670 else
10671 {
10672 static segT lbss_section;
10673 asection *saved_com_section_ptr = elf_com_section_ptr;
10674 asection *saved_bss_section = bss_section;
10675
10676 if (lbss_section == NULL)
10677 {
10678 flagword applicable;
10679 segT seg = now_seg;
10680 subsegT subseg = now_subseg;
10681
10682 /* The .lbss section is for local .largecomm symbols. */
10683 lbss_section = subseg_new (".lbss", 0);
10684 applicable = bfd_applicable_section_flags (stdoutput);
10685 bfd_set_section_flags (stdoutput, lbss_section,
10686 applicable & SEC_ALLOC);
10687 seg_info (lbss_section)->bss = 1;
10688
10689 subseg_set (seg, subseg);
10690 }
10691
10692 elf_com_section_ptr = &_bfd_elf_large_com_section;
10693 bss_section = lbss_section;
10694
10695 s_comm_internal (0, elf_common_parse);
10696
10697 elf_com_section_ptr = saved_com_section_ptr;
10698 bss_section = saved_bss_section;
10699 }
10700 }
10701 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.25763 seconds and 4 git commands to generate.