Terminate register name when reporting bad register
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6
7 This file is part of GAS, the GNU Assembler.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
22 02110-1301, USA. */
23
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
30
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
38
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
41 #endif
42
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
45 #endif
46
47 #ifndef DEFAULT_ARCH
48 #define DEFAULT_ARCH "i386"
49 #endif
50
51 #ifndef INLINE
52 #if __GNUC__ >= 2
53 #define INLINE __inline__
54 #else
55 #define INLINE
56 #endif
57 #endif
58
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
64 #define WAIT_PREFIX 0
65 #define SEG_PREFIX 1
66 #define ADDR_PREFIX 2
67 #define DATA_PREFIX 3
68 #define REP_PREFIX 4
69 #define HLE_PREFIX REP_PREFIX
70 #define LOCK_PREFIX 5
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
73
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
78
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 in instructions. */
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91
92 #define END_OF_INSN '\0'
93
94 /*
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
99 END.
100 */
101 typedef struct
102 {
103 const insn_template *start;
104 const insn_template *end;
105 }
106 templates;
107
108 /* 386 operand encoding bytes: see 386 book for details of this. */
109 typedef struct
110 {
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
114 }
115 modrm_byte;
116
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
119
120 /* 386 opcode byte to code indirect addressing. */
121 typedef struct
122 {
123 unsigned base;
124 unsigned index;
125 unsigned scale;
126 }
127 sib_byte;
128
129 /* x86 arch names, types and features */
130 typedef struct
131 {
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
138 }
139 arch_entry;
140
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
149 #ifdef TE_PE
150 static void pe_directive_secrel (int);
151 #endif
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 const char *);
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
182 #ifndef I386COFF
183 static void s_bss (int);
184 #endif
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
187 #endif
188
189 static const char *default_arch = DEFAULT_ARCH;
190
191 /* VEX prefix. */
192 typedef struct
193 {
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
196 unsigned int length;
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
199 } vex_prefix;
200
201 /* 'md_assemble ()' gathers together information and puts it into a
202 i386_insn. */
203
204 union i386_op
205 {
206 expressionS *disps;
207 expressionS *imms;
208 const reg_entry *regs;
209 };
210
211 enum i386_error
212 {
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
218 bad_imm4,
219 old_gcc_only,
220 unsupported_with_intel_mnemonic,
221 unsupported_syntax,
222 unsupported,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
226 };
227
228 struct _i386_insn
229 {
230 /* TM holds the template for the insn were currently assembling. */
231 insn_template tm;
232
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
235 char suffix;
236
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
239
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
242 operands. */
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
248
249 /* Displacement expression, immediate expression, or register for each
250 operand. */
251 union i386_op op[MAX_OPERANDS];
252
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
256
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
265
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
269
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
274
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
277 modrm_byte rm;
278 rex_byte rex;
279 sib_byte sib;
280 vex_prefix vex;
281
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
284
285 /* Prefer 8bit or 32bit displacement in encoding. */
286 enum
287 {
288 disp_encoding_default = 0,
289 disp_encoding_8bit,
290 disp_encoding_32bit
291 } disp_encoding;
292
293 /* Have HLE prefix. */
294 unsigned int have_hle;
295
296 /* Error message. */
297 enum i386_error error;
298 };
299
300 typedef struct _i386_insn i386_insn;
301
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
305 #ifdef LEX_AT
306 "@"
307 #endif
308 #ifdef LEX_QM
309 "?"
310 #endif
311 ;
312
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
328
329 #else
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
332 #endif
333
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
343
344 const char line_separator_chars[] = ";";
345
346 /* Chars that can be used to separate mant from exp in floating point
347 nums. */
348 const char EXP_CHARS[] = "eE";
349
350 /* Chars that mean this number is a floating point constant
351 As in 0f12.456
352 or 0d1.2345e12. */
353 const char FLT_CHARS[] = "fFdDxX";
354
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
361
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
383
384 /* The instruction we're assembling. */
385 static i386_insn i;
386
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
389
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393
394 /* Current operand we are working on. */
395 static int this_operand = -1;
396
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
398 these. */
399
400 enum flag_code {
401 CODE_32BIT,
402 CODE_16BIT,
403 CODE_64BIT };
404
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
409
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413
414 /* The ELF ABI to use. */
415 enum x86_elf_abi
416 {
417 I386_ABI,
418 X86_64_ABI,
419 X86_64_X32_ABI
420 };
421
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
423 #endif
424
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
427 {
428 "32",
429 "16",
430 "64"
431 };
432
433 /* 1 for intel syntax,
434 0 if att syntax. */
435 static int intel_syntax = 0;
436
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
440
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
443
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
446
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
449
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
452
453 static enum check_kind
454 {
455 check_none = 0,
456 check_warning,
457 check_error
458 }
459 sse_check, operand_check = check_warning;
460
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
463
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
468
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
471
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
474
475 /* CPU name. */
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
478
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
484
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
490
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
496
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
500
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
503
504 /* Encode scalar AVX instructions with specific vector length. */
505 static enum
506 {
507 vex128 = 0,
508 vex256
509 } avxscalar;
510
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
513
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
516
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
519
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
524
525 /* Types. */
526 #define UNCOND_JUMP 0
527 #define COND_JUMP 1
528 #define COND_JUMP86 2
529
530 /* Sizes. */
531 #define CODE16 1
532 #define SMALL 0
533 #define SMALL16 (SMALL | CODE16)
534 #define BIG 2
535 #define BIG16 (BIG | CODE16)
536
537 #ifndef INLINE
538 #ifdef __GNUC__
539 #define INLINE __inline__
540 #else
541 #define INLINE
542 #endif
543 #endif
544
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
548 ((s) >> 2)
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
559
560 const relax_typeS md_relax_table[] =
561 {
562 /* The fields are:
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
567
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
573 {0, 0, 4, 0},
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
576 {0, 0, 2, 0},
577
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
583 {0, 0, 5, 0},
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
586 {0, 0, 3, 0},
587
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
593 {0, 0, 5, 0},
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
596 {0, 0, 4, 0}
597 };
598
599 static const arch_entry cpu_arch[] =
600 {
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
668 CPU_8087_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
670 CPU_287_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
672 CPU_387_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
674 CPU_ANY87_FLAGS, 0, 1 },
675 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
676 CPU_MMX_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
678 CPU_3DNOWA_FLAGS, 0, 1 },
679 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
680 CPU_SSE_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
682 CPU_SSE2_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
684 CPU_SSE3_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
686 CPU_SSSE3_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
688 CPU_SSE4_1_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
690 CPU_SSE4_2_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
692 CPU_SSE4_2_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
694 CPU_ANY_SSE_FLAGS, 0, 1 },
695 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
696 CPU_AVX_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
698 CPU_AVX2_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
700 CPU_ANY_AVX_FLAGS, 0, 1 },
701 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
702 CPU_VMX_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
704 CPU_VMFUNC_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
706 CPU_SMX_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
708 CPU_XSAVE_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
710 CPU_XSAVEOPT_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
712 CPU_AES_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
714 CPU_PCLMUL_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
716 CPU_PCLMUL_FLAGS, 1, 0 },
717 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
718 CPU_FSGSBASE_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
720 CPU_RDRND_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
722 CPU_F16C_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
724 CPU_BMI2_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
726 CPU_FMA_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
728 CPU_FMA4_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
730 CPU_XOP_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
732 CPU_LWP_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
734 CPU_MOVBE_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
736 CPU_EPT_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
738 CPU_LZCNT_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
740 CPU_HLE_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
742 CPU_RTM_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
744 CPU_INVPCID_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
746 CPU_CLFLUSH_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
748 CPU_NOP_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
750 CPU_SYSCALL_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
752 CPU_RDTSCP_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
754 CPU_3DNOW_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
756 CPU_3DNOWA_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
758 CPU_PADLOCK_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
760 CPU_SVME_FLAGS, 1, 0 },
761 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
762 CPU_SVME_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
764 CPU_SSE4A_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
766 CPU_ABM_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
768 CPU_BMI_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
770 CPU_TBM_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
772 CPU_ADX_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
774 CPU_RDSEED_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
776 CPU_PRFCHW_FLAGS, 0, 0 },
777 };
778
779 #ifdef I386COFF
780 /* Like s_lcomm_internal in gas/read.c but the alignment string
781 is allowed to be optional. */
782
783 static symbolS *
784 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
785 {
786 addressT align = 0;
787
788 SKIP_WHITESPACE ();
789
790 if (needs_align
791 && *input_line_pointer == ',')
792 {
793 align = parse_align (needs_align - 1);
794
795 if (align == (addressT) -1)
796 return NULL;
797 }
798 else
799 {
800 if (size >= 8)
801 align = 3;
802 else if (size >= 4)
803 align = 2;
804 else if (size >= 2)
805 align = 1;
806 else
807 align = 0;
808 }
809
810 bss_alloc (symbolP, size, align);
811 return symbolP;
812 }
813
814 static void
815 pe_lcomm (int needs_align)
816 {
817 s_comm_internal (needs_align * 2, pe_lcomm_internal);
818 }
819 #endif
820
821 const pseudo_typeS md_pseudo_table[] =
822 {
823 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
824 {"align", s_align_bytes, 0},
825 #else
826 {"align", s_align_ptwo, 0},
827 #endif
828 {"arch", set_cpu_arch, 0},
829 #ifndef I386COFF
830 {"bss", s_bss, 0},
831 #else
832 {"lcomm", pe_lcomm, 1},
833 #endif
834 {"ffloat", float_cons, 'f'},
835 {"dfloat", float_cons, 'd'},
836 {"tfloat", float_cons, 'x'},
837 {"value", cons, 2},
838 {"slong", signed_cons, 4},
839 {"noopt", s_ignore, 0},
840 {"optim", s_ignore, 0},
841 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
842 {"code16", set_code_flag, CODE_16BIT},
843 {"code32", set_code_flag, CODE_32BIT},
844 {"code64", set_code_flag, CODE_64BIT},
845 {"intel_syntax", set_intel_syntax, 1},
846 {"att_syntax", set_intel_syntax, 0},
847 {"intel_mnemonic", set_intel_mnemonic, 1},
848 {"att_mnemonic", set_intel_mnemonic, 0},
849 {"allow_index_reg", set_allow_index_reg, 1},
850 {"disallow_index_reg", set_allow_index_reg, 0},
851 {"sse_check", set_check, 0},
852 {"operand_check", set_check, 1},
853 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
854 {"largecomm", handle_large_common, 0},
855 #else
856 {"file", (void (*) (int)) dwarf2_directive_file, 0},
857 {"loc", dwarf2_directive_loc, 0},
858 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
859 #endif
860 #ifdef TE_PE
861 {"secrel32", pe_directive_secrel, 0},
862 #endif
863 {0, 0, 0}
864 };
865
866 /* For interface with expression (). */
867 extern char *input_line_pointer;
868
869 /* Hash table for instruction mnemonic lookup. */
870 static struct hash_control *op_hash;
871
872 /* Hash table for register lookup. */
873 static struct hash_control *reg_hash;
874 \f
875 void
876 i386_align_code (fragS *fragP, int count)
877 {
878 /* Various efficient no-op patterns for aligning code labels.
879 Note: Don't try to assemble the instructions in the comments.
880 0L and 0w are not legal. */
881 static const char f32_1[] =
882 {0x90}; /* nop */
883 static const char f32_2[] =
884 {0x66,0x90}; /* xchg %ax,%ax */
885 static const char f32_3[] =
886 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
887 static const char f32_4[] =
888 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
889 static const char f32_5[] =
890 {0x90, /* nop */
891 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
892 static const char f32_6[] =
893 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
894 static const char f32_7[] =
895 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
896 static const char f32_8[] =
897 {0x90, /* nop */
898 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
899 static const char f32_9[] =
900 {0x89,0xf6, /* movl %esi,%esi */
901 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
902 static const char f32_10[] =
903 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
904 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
905 static const char f32_11[] =
906 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
907 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
908 static const char f32_12[] =
909 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
910 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
911 static const char f32_13[] =
912 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
913 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
914 static const char f32_14[] =
915 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
916 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
917 static const char f16_3[] =
918 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
919 static const char f16_4[] =
920 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
921 static const char f16_5[] =
922 {0x90, /* nop */
923 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
924 static const char f16_6[] =
925 {0x89,0xf6, /* mov %si,%si */
926 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
927 static const char f16_7[] =
928 {0x8d,0x74,0x00, /* lea 0(%si),%si */
929 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
930 static const char f16_8[] =
931 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
932 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
933 static const char jump_31[] =
934 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
935 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
936 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
937 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
938 static const char *const f32_patt[] = {
939 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
940 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
941 };
942 static const char *const f16_patt[] = {
943 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
944 };
945 /* nopl (%[re]ax) */
946 static const char alt_3[] =
947 {0x0f,0x1f,0x00};
948 /* nopl 0(%[re]ax) */
949 static const char alt_4[] =
950 {0x0f,0x1f,0x40,0x00};
951 /* nopl 0(%[re]ax,%[re]ax,1) */
952 static const char alt_5[] =
953 {0x0f,0x1f,0x44,0x00,0x00};
954 /* nopw 0(%[re]ax,%[re]ax,1) */
955 static const char alt_6[] =
956 {0x66,0x0f,0x1f,0x44,0x00,0x00};
957 /* nopl 0L(%[re]ax) */
958 static const char alt_7[] =
959 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
960 /* nopl 0L(%[re]ax,%[re]ax,1) */
961 static const char alt_8[] =
962 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
963 /* nopw 0L(%[re]ax,%[re]ax,1) */
964 static const char alt_9[] =
965 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
966 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
967 static const char alt_10[] =
968 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
969 /* data16
970 nopw %cs:0L(%[re]ax,%[re]ax,1) */
971 static const char alt_long_11[] =
972 {0x66,
973 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
974 /* data16
975 data16
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_12[] =
978 {0x66,
979 0x66,
980 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
981 /* data16
982 data16
983 data16
984 nopw %cs:0L(%[re]ax,%[re]ax,1) */
985 static const char alt_long_13[] =
986 {0x66,
987 0x66,
988 0x66,
989 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
990 /* data16
991 data16
992 data16
993 data16
994 nopw %cs:0L(%[re]ax,%[re]ax,1) */
995 static const char alt_long_14[] =
996 {0x66,
997 0x66,
998 0x66,
999 0x66,
1000 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1001 /* data16
1002 data16
1003 data16
1004 data16
1005 data16
1006 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1007 static const char alt_long_15[] =
1008 {0x66,
1009 0x66,
1010 0x66,
1011 0x66,
1012 0x66,
1013 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 /* nopl 0(%[re]ax,%[re]ax,1)
1015 nopw 0(%[re]ax,%[re]ax,1) */
1016 static const char alt_short_11[] =
1017 {0x0f,0x1f,0x44,0x00,0x00,
1018 0x66,0x0f,0x1f,0x44,0x00,0x00};
1019 /* nopw 0(%[re]ax,%[re]ax,1)
1020 nopw 0(%[re]ax,%[re]ax,1) */
1021 static const char alt_short_12[] =
1022 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1023 0x66,0x0f,0x1f,0x44,0x00,0x00};
1024 /* nopw 0(%[re]ax,%[re]ax,1)
1025 nopl 0L(%[re]ax) */
1026 static const char alt_short_13[] =
1027 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1028 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1029 /* nopl 0L(%[re]ax)
1030 nopl 0L(%[re]ax) */
1031 static const char alt_short_14[] =
1032 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1033 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1034 /* nopl 0L(%[re]ax)
1035 nopl 0L(%[re]ax,%[re]ax,1) */
1036 static const char alt_short_15[] =
1037 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1038 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1039 static const char *const alt_short_patt[] = {
1040 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1041 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1042 alt_short_14, alt_short_15
1043 };
1044 static const char *const alt_long_patt[] = {
1045 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1046 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1047 alt_long_14, alt_long_15
1048 };
1049
1050 /* Only align for at least a positive non-zero boundary. */
1051 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1052 return;
1053
1054 /* We need to decide which NOP sequence to use for 32bit and
1055 64bit. When -mtune= is used:
1056
1057 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1058 PROCESSOR_GENERIC32, f32_patt will be used.
1059 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1060 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1061 PROCESSOR_GENERIC64, alt_long_patt will be used.
1062 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1063 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1064 will be used.
1065
1066 When -mtune= isn't used, alt_long_patt will be used if
1067 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1068 be used.
1069
1070 When -march= or .arch is used, we can't use anything beyond
1071 cpu_arch_isa_flags. */
1072
1073 if (flag_code == CODE_16BIT)
1074 {
1075 if (count > 8)
1076 {
1077 memcpy (fragP->fr_literal + fragP->fr_fix,
1078 jump_31, count);
1079 /* Adjust jump offset. */
1080 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1081 }
1082 else
1083 memcpy (fragP->fr_literal + fragP->fr_fix,
1084 f16_patt[count - 1], count);
1085 }
1086 else
1087 {
1088 const char *const *patt = NULL;
1089
1090 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1091 {
1092 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1093 switch (cpu_arch_tune)
1094 {
1095 case PROCESSOR_UNKNOWN:
1096 /* We use cpu_arch_isa_flags to check if we SHOULD
1097 optimize with nops. */
1098 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1099 patt = alt_long_patt;
1100 else
1101 patt = f32_patt;
1102 break;
1103 case PROCESSOR_PENTIUM4:
1104 case PROCESSOR_NOCONA:
1105 case PROCESSOR_CORE:
1106 case PROCESSOR_CORE2:
1107 case PROCESSOR_COREI7:
1108 case PROCESSOR_L1OM:
1109 case PROCESSOR_K1OM:
1110 case PROCESSOR_GENERIC64:
1111 patt = alt_long_patt;
1112 break;
1113 case PROCESSOR_K6:
1114 case PROCESSOR_ATHLON:
1115 case PROCESSOR_K8:
1116 case PROCESSOR_AMDFAM10:
1117 case PROCESSOR_BD:
1118 patt = alt_short_patt;
1119 break;
1120 case PROCESSOR_I386:
1121 case PROCESSOR_I486:
1122 case PROCESSOR_PENTIUM:
1123 case PROCESSOR_PENTIUMPRO:
1124 case PROCESSOR_GENERIC32:
1125 patt = f32_patt;
1126 break;
1127 }
1128 }
1129 else
1130 {
1131 switch (fragP->tc_frag_data.tune)
1132 {
1133 case PROCESSOR_UNKNOWN:
1134 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1135 PROCESSOR_UNKNOWN. */
1136 abort ();
1137 break;
1138
1139 case PROCESSOR_I386:
1140 case PROCESSOR_I486:
1141 case PROCESSOR_PENTIUM:
1142 case PROCESSOR_K6:
1143 case PROCESSOR_ATHLON:
1144 case PROCESSOR_K8:
1145 case PROCESSOR_AMDFAM10:
1146 case PROCESSOR_BD:
1147 case PROCESSOR_GENERIC32:
1148 /* We use cpu_arch_isa_flags to check if we CAN optimize
1149 with nops. */
1150 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1151 patt = alt_short_patt;
1152 else
1153 patt = f32_patt;
1154 break;
1155 case PROCESSOR_PENTIUMPRO:
1156 case PROCESSOR_PENTIUM4:
1157 case PROCESSOR_NOCONA:
1158 case PROCESSOR_CORE:
1159 case PROCESSOR_CORE2:
1160 case PROCESSOR_COREI7:
1161 case PROCESSOR_L1OM:
1162 case PROCESSOR_K1OM:
1163 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1164 patt = alt_long_patt;
1165 else
1166 patt = f32_patt;
1167 break;
1168 case PROCESSOR_GENERIC64:
1169 patt = alt_long_patt;
1170 break;
1171 }
1172 }
1173
1174 if (patt == f32_patt)
1175 {
1176 /* If the padding is less than 15 bytes, we use the normal
1177 ones. Otherwise, we use a jump instruction and adjust
1178 its offset. */
1179 int limit;
1180
1181 /* For 64bit, the limit is 3 bytes. */
1182 if (flag_code == CODE_64BIT
1183 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1184 limit = 3;
1185 else
1186 limit = 15;
1187 if (count < limit)
1188 memcpy (fragP->fr_literal + fragP->fr_fix,
1189 patt[count - 1], count);
1190 else
1191 {
1192 memcpy (fragP->fr_literal + fragP->fr_fix,
1193 jump_31, count);
1194 /* Adjust jump offset. */
1195 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1196 }
1197 }
1198 else
1199 {
1200 /* Maximum length of an instruction is 15 byte. If the
1201 padding is greater than 15 bytes and we don't use jump,
1202 we have to break it into smaller pieces. */
1203 int padding = count;
1204 while (padding > 15)
1205 {
1206 padding -= 15;
1207 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1208 patt [14], 15);
1209 }
1210
1211 if (padding)
1212 memcpy (fragP->fr_literal + fragP->fr_fix,
1213 patt [padding - 1], padding);
1214 }
1215 }
1216 fragP->fr_var = count;
1217 }
1218
1219 static INLINE int
1220 operand_type_all_zero (const union i386_operand_type *x)
1221 {
1222 switch (ARRAY_SIZE(x->array))
1223 {
1224 case 3:
1225 if (x->array[2])
1226 return 0;
1227 case 2:
1228 if (x->array[1])
1229 return 0;
1230 case 1:
1231 return !x->array[0];
1232 default:
1233 abort ();
1234 }
1235 }
1236
1237 static INLINE void
1238 operand_type_set (union i386_operand_type *x, unsigned int v)
1239 {
1240 switch (ARRAY_SIZE(x->array))
1241 {
1242 case 3:
1243 x->array[2] = v;
1244 case 2:
1245 x->array[1] = v;
1246 case 1:
1247 x->array[0] = v;
1248 break;
1249 default:
1250 abort ();
1251 }
1252 }
1253
1254 static INLINE int
1255 operand_type_equal (const union i386_operand_type *x,
1256 const union i386_operand_type *y)
1257 {
1258 switch (ARRAY_SIZE(x->array))
1259 {
1260 case 3:
1261 if (x->array[2] != y->array[2])
1262 return 0;
1263 case 2:
1264 if (x->array[1] != y->array[1])
1265 return 0;
1266 case 1:
1267 return x->array[0] == y->array[0];
1268 break;
1269 default:
1270 abort ();
1271 }
1272 }
1273
1274 static INLINE int
1275 cpu_flags_all_zero (const union i386_cpu_flags *x)
1276 {
1277 switch (ARRAY_SIZE(x->array))
1278 {
1279 case 3:
1280 if (x->array[2])
1281 return 0;
1282 case 2:
1283 if (x->array[1])
1284 return 0;
1285 case 1:
1286 return !x->array[0];
1287 default:
1288 abort ();
1289 }
1290 }
1291
1292 static INLINE void
1293 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1294 {
1295 switch (ARRAY_SIZE(x->array))
1296 {
1297 case 3:
1298 x->array[2] = v;
1299 case 2:
1300 x->array[1] = v;
1301 case 1:
1302 x->array[0] = v;
1303 break;
1304 default:
1305 abort ();
1306 }
1307 }
1308
1309 static INLINE int
1310 cpu_flags_equal (const union i386_cpu_flags *x,
1311 const union i386_cpu_flags *y)
1312 {
1313 switch (ARRAY_SIZE(x->array))
1314 {
1315 case 3:
1316 if (x->array[2] != y->array[2])
1317 return 0;
1318 case 2:
1319 if (x->array[1] != y->array[1])
1320 return 0;
1321 case 1:
1322 return x->array[0] == y->array[0];
1323 break;
1324 default:
1325 abort ();
1326 }
1327 }
1328
1329 static INLINE int
1330 cpu_flags_check_cpu64 (i386_cpu_flags f)
1331 {
1332 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1333 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1334 }
1335
1336 static INLINE i386_cpu_flags
1337 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1338 {
1339 switch (ARRAY_SIZE (x.array))
1340 {
1341 case 3:
1342 x.array [2] &= y.array [2];
1343 case 2:
1344 x.array [1] &= y.array [1];
1345 case 1:
1346 x.array [0] &= y.array [0];
1347 break;
1348 default:
1349 abort ();
1350 }
1351 return x;
1352 }
1353
1354 static INLINE i386_cpu_flags
1355 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1356 {
1357 switch (ARRAY_SIZE (x.array))
1358 {
1359 case 3:
1360 x.array [2] |= y.array [2];
1361 case 2:
1362 x.array [1] |= y.array [1];
1363 case 1:
1364 x.array [0] |= y.array [0];
1365 break;
1366 default:
1367 abort ();
1368 }
1369 return x;
1370 }
1371
1372 static INLINE i386_cpu_flags
1373 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1374 {
1375 switch (ARRAY_SIZE (x.array))
1376 {
1377 case 3:
1378 x.array [2] &= ~y.array [2];
1379 case 2:
1380 x.array [1] &= ~y.array [1];
1381 case 1:
1382 x.array [0] &= ~y.array [0];
1383 break;
1384 default:
1385 abort ();
1386 }
1387 return x;
1388 }
1389
1390 #define CPU_FLAGS_ARCH_MATCH 0x1
1391 #define CPU_FLAGS_64BIT_MATCH 0x2
1392 #define CPU_FLAGS_AES_MATCH 0x4
1393 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1394 #define CPU_FLAGS_AVX_MATCH 0x10
1395
1396 #define CPU_FLAGS_32BIT_MATCH \
1397 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1398 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1399 #define CPU_FLAGS_PERFECT_MATCH \
1400 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1401
1402 /* Return CPU flags match bits. */
1403
1404 static int
1405 cpu_flags_match (const insn_template *t)
1406 {
1407 i386_cpu_flags x = t->cpu_flags;
1408 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1409
1410 x.bitfield.cpu64 = 0;
1411 x.bitfield.cpuno64 = 0;
1412
1413 if (cpu_flags_all_zero (&x))
1414 {
1415 /* This instruction is available on all archs. */
1416 match |= CPU_FLAGS_32BIT_MATCH;
1417 }
1418 else
1419 {
1420 /* This instruction is available only on some archs. */
1421 i386_cpu_flags cpu = cpu_arch_flags;
1422
1423 cpu.bitfield.cpu64 = 0;
1424 cpu.bitfield.cpuno64 = 0;
1425 cpu = cpu_flags_and (x, cpu);
1426 if (!cpu_flags_all_zero (&cpu))
1427 {
1428 if (x.bitfield.cpuavx)
1429 {
1430 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1431 if (cpu.bitfield.cpuavx)
1432 {
1433 /* Check SSE2AVX. */
1434 if (!t->opcode_modifier.sse2avx|| sse2avx)
1435 {
1436 match |= (CPU_FLAGS_ARCH_MATCH
1437 | CPU_FLAGS_AVX_MATCH);
1438 /* Check AES. */
1439 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1440 match |= CPU_FLAGS_AES_MATCH;
1441 /* Check PCLMUL. */
1442 if (!x.bitfield.cpupclmul
1443 || cpu.bitfield.cpupclmul)
1444 match |= CPU_FLAGS_PCLMUL_MATCH;
1445 }
1446 }
1447 else
1448 match |= CPU_FLAGS_ARCH_MATCH;
1449 }
1450 else
1451 match |= CPU_FLAGS_32BIT_MATCH;
1452 }
1453 }
1454 return match;
1455 }
1456
1457 static INLINE i386_operand_type
1458 operand_type_and (i386_operand_type x, i386_operand_type y)
1459 {
1460 switch (ARRAY_SIZE (x.array))
1461 {
1462 case 3:
1463 x.array [2] &= y.array [2];
1464 case 2:
1465 x.array [1] &= y.array [1];
1466 case 1:
1467 x.array [0] &= y.array [0];
1468 break;
1469 default:
1470 abort ();
1471 }
1472 return x;
1473 }
1474
1475 static INLINE i386_operand_type
1476 operand_type_or (i386_operand_type x, i386_operand_type y)
1477 {
1478 switch (ARRAY_SIZE (x.array))
1479 {
1480 case 3:
1481 x.array [2] |= y.array [2];
1482 case 2:
1483 x.array [1] |= y.array [1];
1484 case 1:
1485 x.array [0] |= y.array [0];
1486 break;
1487 default:
1488 abort ();
1489 }
1490 return x;
1491 }
1492
1493 static INLINE i386_operand_type
1494 operand_type_xor (i386_operand_type x, i386_operand_type y)
1495 {
1496 switch (ARRAY_SIZE (x.array))
1497 {
1498 case 3:
1499 x.array [2] ^= y.array [2];
1500 case 2:
1501 x.array [1] ^= y.array [1];
1502 case 1:
1503 x.array [0] ^= y.array [0];
1504 break;
1505 default:
1506 abort ();
1507 }
1508 return x;
1509 }
1510
1511 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1512 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1513 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1514 static const i386_operand_type inoutportreg
1515 = OPERAND_TYPE_INOUTPORTREG;
1516 static const i386_operand_type reg16_inoutportreg
1517 = OPERAND_TYPE_REG16_INOUTPORTREG;
1518 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1519 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1520 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1521 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1522 static const i386_operand_type anydisp
1523 = OPERAND_TYPE_ANYDISP;
1524 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1525 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1526 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1527 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1528 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1529 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1530 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1531 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1532 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1533 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1534 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1535 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1536
1537 enum operand_type
1538 {
1539 reg,
1540 imm,
1541 disp,
1542 anymem
1543 };
1544
1545 static INLINE int
1546 operand_type_check (i386_operand_type t, enum operand_type c)
1547 {
1548 switch (c)
1549 {
1550 case reg:
1551 return (t.bitfield.reg8
1552 || t.bitfield.reg16
1553 || t.bitfield.reg32
1554 || t.bitfield.reg64);
1555
1556 case imm:
1557 return (t.bitfield.imm8
1558 || t.bitfield.imm8s
1559 || t.bitfield.imm16
1560 || t.bitfield.imm32
1561 || t.bitfield.imm32s
1562 || t.bitfield.imm64);
1563
1564 case disp:
1565 return (t.bitfield.disp8
1566 || t.bitfield.disp16
1567 || t.bitfield.disp32
1568 || t.bitfield.disp32s
1569 || t.bitfield.disp64);
1570
1571 case anymem:
1572 return (t.bitfield.disp8
1573 || t.bitfield.disp16
1574 || t.bitfield.disp32
1575 || t.bitfield.disp32s
1576 || t.bitfield.disp64
1577 || t.bitfield.baseindex);
1578
1579 default:
1580 abort ();
1581 }
1582
1583 return 0;
1584 }
1585
1586 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1587 operand J for instruction template T. */
1588
1589 static INLINE int
1590 match_reg_size (const insn_template *t, unsigned int j)
1591 {
1592 return !((i.types[j].bitfield.byte
1593 && !t->operand_types[j].bitfield.byte)
1594 || (i.types[j].bitfield.word
1595 && !t->operand_types[j].bitfield.word)
1596 || (i.types[j].bitfield.dword
1597 && !t->operand_types[j].bitfield.dword)
1598 || (i.types[j].bitfield.qword
1599 && !t->operand_types[j].bitfield.qword));
1600 }
1601
1602 /* Return 1 if there is no conflict in any size on operand J for
1603 instruction template T. */
1604
1605 static INLINE int
1606 match_mem_size (const insn_template *t, unsigned int j)
1607 {
1608 return (match_reg_size (t, j)
1609 && !((i.types[j].bitfield.unspecified
1610 && !t->operand_types[j].bitfield.unspecified)
1611 || (i.types[j].bitfield.fword
1612 && !t->operand_types[j].bitfield.fword)
1613 || (i.types[j].bitfield.tbyte
1614 && !t->operand_types[j].bitfield.tbyte)
1615 || (i.types[j].bitfield.xmmword
1616 && !t->operand_types[j].bitfield.xmmword)
1617 || (i.types[j].bitfield.ymmword
1618 && !t->operand_types[j].bitfield.ymmword)));
1619 }
1620
1621 /* Return 1 if there is no size conflict on any operands for
1622 instruction template T. */
1623
1624 static INLINE int
1625 operand_size_match (const insn_template *t)
1626 {
1627 unsigned int j;
1628 int match = 1;
1629
1630 /* Don't check jump instructions. */
1631 if (t->opcode_modifier.jump
1632 || t->opcode_modifier.jumpbyte
1633 || t->opcode_modifier.jumpdword
1634 || t->opcode_modifier.jumpintersegment)
1635 return match;
1636
1637 /* Check memory and accumulator operand size. */
1638 for (j = 0; j < i.operands; j++)
1639 {
1640 if (t->operand_types[j].bitfield.anysize)
1641 continue;
1642
1643 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1644 {
1645 match = 0;
1646 break;
1647 }
1648
1649 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1650 {
1651 match = 0;
1652 break;
1653 }
1654 }
1655
1656 if (match)
1657 return match;
1658 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1659 {
1660 mismatch:
1661 i.error = operand_size_mismatch;
1662 return 0;
1663 }
1664
1665 /* Check reverse. */
1666 gas_assert (i.operands == 2);
1667
1668 match = 1;
1669 for (j = 0; j < 2; j++)
1670 {
1671 if (t->operand_types[j].bitfield.acc
1672 && !match_reg_size (t, j ? 0 : 1))
1673 goto mismatch;
1674
1675 if (i.types[j].bitfield.mem
1676 && !match_mem_size (t, j ? 0 : 1))
1677 goto mismatch;
1678 }
1679
1680 return match;
1681 }
1682
1683 static INLINE int
1684 operand_type_match (i386_operand_type overlap,
1685 i386_operand_type given)
1686 {
1687 i386_operand_type temp = overlap;
1688
1689 temp.bitfield.jumpabsolute = 0;
1690 temp.bitfield.unspecified = 0;
1691 temp.bitfield.byte = 0;
1692 temp.bitfield.word = 0;
1693 temp.bitfield.dword = 0;
1694 temp.bitfield.fword = 0;
1695 temp.bitfield.qword = 0;
1696 temp.bitfield.tbyte = 0;
1697 temp.bitfield.xmmword = 0;
1698 temp.bitfield.ymmword = 0;
1699 if (operand_type_all_zero (&temp))
1700 goto mismatch;
1701
1702 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1703 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1704 return 1;
1705
1706 mismatch:
1707 i.error = operand_type_mismatch;
1708 return 0;
1709 }
1710
1711 /* If given types g0 and g1 are registers they must be of the same type
1712 unless the expected operand type register overlap is null.
1713 Note that Acc in a template matches every size of reg. */
1714
1715 static INLINE int
1716 operand_type_register_match (i386_operand_type m0,
1717 i386_operand_type g0,
1718 i386_operand_type t0,
1719 i386_operand_type m1,
1720 i386_operand_type g1,
1721 i386_operand_type t1)
1722 {
1723 if (!operand_type_check (g0, reg))
1724 return 1;
1725
1726 if (!operand_type_check (g1, reg))
1727 return 1;
1728
1729 if (g0.bitfield.reg8 == g1.bitfield.reg8
1730 && g0.bitfield.reg16 == g1.bitfield.reg16
1731 && g0.bitfield.reg32 == g1.bitfield.reg32
1732 && g0.bitfield.reg64 == g1.bitfield.reg64)
1733 return 1;
1734
1735 if (m0.bitfield.acc)
1736 {
1737 t0.bitfield.reg8 = 1;
1738 t0.bitfield.reg16 = 1;
1739 t0.bitfield.reg32 = 1;
1740 t0.bitfield.reg64 = 1;
1741 }
1742
1743 if (m1.bitfield.acc)
1744 {
1745 t1.bitfield.reg8 = 1;
1746 t1.bitfield.reg16 = 1;
1747 t1.bitfield.reg32 = 1;
1748 t1.bitfield.reg64 = 1;
1749 }
1750
1751 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1752 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1753 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1754 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1755 return 1;
1756
1757 i.error = register_type_mismatch;
1758
1759 return 0;
1760 }
1761
1762 static INLINE unsigned int
1763 register_number (const reg_entry *r)
1764 {
1765 unsigned int nr = r->reg_num;
1766
1767 if (r->reg_flags & RegRex)
1768 nr += 8;
1769
1770 return nr;
1771 }
1772
1773 static INLINE unsigned int
1774 mode_from_disp_size (i386_operand_type t)
1775 {
1776 if (t.bitfield.disp8)
1777 return 1;
1778 else if (t.bitfield.disp16
1779 || t.bitfield.disp32
1780 || t.bitfield.disp32s)
1781 return 2;
1782 else
1783 return 0;
1784 }
1785
1786 static INLINE int
1787 fits_in_signed_byte (offsetT num)
1788 {
1789 return (num >= -128) && (num <= 127);
1790 }
1791
1792 static INLINE int
1793 fits_in_unsigned_byte (offsetT num)
1794 {
1795 return (num & 0xff) == num;
1796 }
1797
1798 static INLINE int
1799 fits_in_unsigned_word (offsetT num)
1800 {
1801 return (num & 0xffff) == num;
1802 }
1803
1804 static INLINE int
1805 fits_in_signed_word (offsetT num)
1806 {
1807 return (-32768 <= num) && (num <= 32767);
1808 }
1809
1810 static INLINE int
1811 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1812 {
1813 #ifndef BFD64
1814 return 1;
1815 #else
1816 return (!(((offsetT) -1 << 31) & num)
1817 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1818 #endif
1819 } /* fits_in_signed_long() */
1820
1821 static INLINE int
1822 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1823 {
1824 #ifndef BFD64
1825 return 1;
1826 #else
1827 return (num & (((offsetT) 2 << 31) - 1)) == num;
1828 #endif
1829 } /* fits_in_unsigned_long() */
1830
1831 static INLINE int
1832 fits_in_imm4 (offsetT num)
1833 {
1834 return (num & 0xf) == num;
1835 }
1836
1837 static i386_operand_type
1838 smallest_imm_type (offsetT num)
1839 {
1840 i386_operand_type t;
1841
1842 operand_type_set (&t, 0);
1843 t.bitfield.imm64 = 1;
1844
1845 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1846 {
1847 /* This code is disabled on the 486 because all the Imm1 forms
1848 in the opcode table are slower on the i486. They're the
1849 versions with the implicitly specified single-position
1850 displacement, which has another syntax if you really want to
1851 use that form. */
1852 t.bitfield.imm1 = 1;
1853 t.bitfield.imm8 = 1;
1854 t.bitfield.imm8s = 1;
1855 t.bitfield.imm16 = 1;
1856 t.bitfield.imm32 = 1;
1857 t.bitfield.imm32s = 1;
1858 }
1859 else if (fits_in_signed_byte (num))
1860 {
1861 t.bitfield.imm8 = 1;
1862 t.bitfield.imm8s = 1;
1863 t.bitfield.imm16 = 1;
1864 t.bitfield.imm32 = 1;
1865 t.bitfield.imm32s = 1;
1866 }
1867 else if (fits_in_unsigned_byte (num))
1868 {
1869 t.bitfield.imm8 = 1;
1870 t.bitfield.imm16 = 1;
1871 t.bitfield.imm32 = 1;
1872 t.bitfield.imm32s = 1;
1873 }
1874 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1875 {
1876 t.bitfield.imm16 = 1;
1877 t.bitfield.imm32 = 1;
1878 t.bitfield.imm32s = 1;
1879 }
1880 else if (fits_in_signed_long (num))
1881 {
1882 t.bitfield.imm32 = 1;
1883 t.bitfield.imm32s = 1;
1884 }
1885 else if (fits_in_unsigned_long (num))
1886 t.bitfield.imm32 = 1;
1887
1888 return t;
1889 }
1890
1891 static offsetT
1892 offset_in_range (offsetT val, int size)
1893 {
1894 addressT mask;
1895
1896 switch (size)
1897 {
1898 case 1: mask = ((addressT) 1 << 8) - 1; break;
1899 case 2: mask = ((addressT) 1 << 16) - 1; break;
1900 case 4: mask = ((addressT) 2 << 31) - 1; break;
1901 #ifdef BFD64
1902 case 8: mask = ((addressT) 2 << 63) - 1; break;
1903 #endif
1904 default: abort ();
1905 }
1906
1907 #ifdef BFD64
1908 /* If BFD64, sign extend val for 32bit address mode. */
1909 if (flag_code != CODE_64BIT
1910 || i.prefix[ADDR_PREFIX])
1911 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1912 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1913 #endif
1914
1915 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1916 {
1917 char buf1[40], buf2[40];
1918
1919 sprint_value (buf1, val);
1920 sprint_value (buf2, val & mask);
1921 as_warn (_("%s shortened to %s"), buf1, buf2);
1922 }
1923 return val & mask;
1924 }
1925
1926 enum PREFIX_GROUP
1927 {
1928 PREFIX_EXIST = 0,
1929 PREFIX_LOCK,
1930 PREFIX_REP,
1931 PREFIX_OTHER
1932 };
1933
1934 /* Returns
1935 a. PREFIX_EXIST if attempting to add a prefix where one from the
1936 same class already exists.
1937 b. PREFIX_LOCK if lock prefix is added.
1938 c. PREFIX_REP if rep/repne prefix is added.
1939 d. PREFIX_OTHER if other prefix is added.
1940 */
1941
1942 static enum PREFIX_GROUP
1943 add_prefix (unsigned int prefix)
1944 {
1945 enum PREFIX_GROUP ret = PREFIX_OTHER;
1946 unsigned int q;
1947
1948 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1949 && flag_code == CODE_64BIT)
1950 {
1951 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1952 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1953 && (prefix & (REX_R | REX_X | REX_B))))
1954 ret = PREFIX_EXIST;
1955 q = REX_PREFIX;
1956 }
1957 else
1958 {
1959 switch (prefix)
1960 {
1961 default:
1962 abort ();
1963
1964 case CS_PREFIX_OPCODE:
1965 case DS_PREFIX_OPCODE:
1966 case ES_PREFIX_OPCODE:
1967 case FS_PREFIX_OPCODE:
1968 case GS_PREFIX_OPCODE:
1969 case SS_PREFIX_OPCODE:
1970 q = SEG_PREFIX;
1971 break;
1972
1973 case REPNE_PREFIX_OPCODE:
1974 case REPE_PREFIX_OPCODE:
1975 q = REP_PREFIX;
1976 ret = PREFIX_REP;
1977 break;
1978
1979 case LOCK_PREFIX_OPCODE:
1980 q = LOCK_PREFIX;
1981 ret = PREFIX_LOCK;
1982 break;
1983
1984 case FWAIT_OPCODE:
1985 q = WAIT_PREFIX;
1986 break;
1987
1988 case ADDR_PREFIX_OPCODE:
1989 q = ADDR_PREFIX;
1990 break;
1991
1992 case DATA_PREFIX_OPCODE:
1993 q = DATA_PREFIX;
1994 break;
1995 }
1996 if (i.prefix[q] != 0)
1997 ret = PREFIX_EXIST;
1998 }
1999
2000 if (ret)
2001 {
2002 if (!i.prefix[q])
2003 ++i.prefixes;
2004 i.prefix[q] |= prefix;
2005 }
2006 else
2007 as_bad (_("same type of prefix used twice"));
2008
2009 return ret;
2010 }
2011
2012 static void
2013 update_code_flag (int value, int check)
2014 {
2015 PRINTF_LIKE ((*as_error));
2016
2017 flag_code = (enum flag_code) value;
2018 if (flag_code == CODE_64BIT)
2019 {
2020 cpu_arch_flags.bitfield.cpu64 = 1;
2021 cpu_arch_flags.bitfield.cpuno64 = 0;
2022 }
2023 else
2024 {
2025 cpu_arch_flags.bitfield.cpu64 = 0;
2026 cpu_arch_flags.bitfield.cpuno64 = 1;
2027 }
2028 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2029 {
2030 if (check)
2031 as_error = as_fatal;
2032 else
2033 as_error = as_bad;
2034 (*as_error) (_("64bit mode not supported on `%s'."),
2035 cpu_arch_name ? cpu_arch_name : default_arch);
2036 }
2037 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2038 {
2039 if (check)
2040 as_error = as_fatal;
2041 else
2042 as_error = as_bad;
2043 (*as_error) (_("32bit mode not supported on `%s'."),
2044 cpu_arch_name ? cpu_arch_name : default_arch);
2045 }
2046 stackop_size = '\0';
2047 }
2048
2049 static void
2050 set_code_flag (int value)
2051 {
2052 update_code_flag (value, 0);
2053 }
2054
2055 static void
2056 set_16bit_gcc_code_flag (int new_code_flag)
2057 {
2058 flag_code = (enum flag_code) new_code_flag;
2059 if (flag_code != CODE_16BIT)
2060 abort ();
2061 cpu_arch_flags.bitfield.cpu64 = 0;
2062 cpu_arch_flags.bitfield.cpuno64 = 1;
2063 stackop_size = LONG_MNEM_SUFFIX;
2064 }
2065
2066 static void
2067 set_intel_syntax (int syntax_flag)
2068 {
2069 /* Find out if register prefixing is specified. */
2070 int ask_naked_reg = 0;
2071
2072 SKIP_WHITESPACE ();
2073 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2074 {
2075 char *string = input_line_pointer;
2076 int e = get_symbol_end ();
2077
2078 if (strcmp (string, "prefix") == 0)
2079 ask_naked_reg = 1;
2080 else if (strcmp (string, "noprefix") == 0)
2081 ask_naked_reg = -1;
2082 else
2083 as_bad (_("bad argument to syntax directive."));
2084 *input_line_pointer = e;
2085 }
2086 demand_empty_rest_of_line ();
2087
2088 intel_syntax = syntax_flag;
2089
2090 if (ask_naked_reg == 0)
2091 allow_naked_reg = (intel_syntax
2092 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2093 else
2094 allow_naked_reg = (ask_naked_reg < 0);
2095
2096 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2097
2098 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2099 identifier_chars['$'] = intel_syntax ? '$' : 0;
2100 register_prefix = allow_naked_reg ? "" : "%";
2101 }
2102
2103 static void
2104 set_intel_mnemonic (int mnemonic_flag)
2105 {
2106 intel_mnemonic = mnemonic_flag;
2107 }
2108
2109 static void
2110 set_allow_index_reg (int flag)
2111 {
2112 allow_index_reg = flag;
2113 }
2114
2115 static void
2116 set_check (int what)
2117 {
2118 enum check_kind *kind;
2119 const char *str;
2120
2121 if (what)
2122 {
2123 kind = &operand_check;
2124 str = "operand";
2125 }
2126 else
2127 {
2128 kind = &sse_check;
2129 str = "sse";
2130 }
2131
2132 SKIP_WHITESPACE ();
2133
2134 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2135 {
2136 char *string = input_line_pointer;
2137 int e = get_symbol_end ();
2138
2139 if (strcmp (string, "none") == 0)
2140 *kind = check_none;
2141 else if (strcmp (string, "warning") == 0)
2142 *kind = check_warning;
2143 else if (strcmp (string, "error") == 0)
2144 *kind = check_error;
2145 else
2146 as_bad (_("bad argument to %s_check directive."), str);
2147 *input_line_pointer = e;
2148 }
2149 else
2150 as_bad (_("missing argument for %s_check directive"), str);
2151
2152 demand_empty_rest_of_line ();
2153 }
2154
2155 static void
2156 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2157 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2158 {
2159 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2160 static const char *arch;
2161
2162 /* Intel LIOM is only supported on ELF. */
2163 if (!IS_ELF)
2164 return;
2165
2166 if (!arch)
2167 {
2168 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2169 use default_arch. */
2170 arch = cpu_arch_name;
2171 if (!arch)
2172 arch = default_arch;
2173 }
2174
2175 /* If we are targeting Intel L1OM, we must enable it. */
2176 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2177 || new_flag.bitfield.cpul1om)
2178 return;
2179
2180 /* If we are targeting Intel K1OM, we must enable it. */
2181 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2182 || new_flag.bitfield.cpuk1om)
2183 return;
2184
2185 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2186 #endif
2187 }
2188
2189 static void
2190 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2191 {
2192 SKIP_WHITESPACE ();
2193
2194 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2195 {
2196 char *string = input_line_pointer;
2197 int e = get_symbol_end ();
2198 unsigned int j;
2199 i386_cpu_flags flags;
2200
2201 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2202 {
2203 if (strcmp (string, cpu_arch[j].name) == 0)
2204 {
2205 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2206
2207 if (*string != '.')
2208 {
2209 cpu_arch_name = cpu_arch[j].name;
2210 cpu_sub_arch_name = NULL;
2211 cpu_arch_flags = cpu_arch[j].flags;
2212 if (flag_code == CODE_64BIT)
2213 {
2214 cpu_arch_flags.bitfield.cpu64 = 1;
2215 cpu_arch_flags.bitfield.cpuno64 = 0;
2216 }
2217 else
2218 {
2219 cpu_arch_flags.bitfield.cpu64 = 0;
2220 cpu_arch_flags.bitfield.cpuno64 = 1;
2221 }
2222 cpu_arch_isa = cpu_arch[j].type;
2223 cpu_arch_isa_flags = cpu_arch[j].flags;
2224 if (!cpu_arch_tune_set)
2225 {
2226 cpu_arch_tune = cpu_arch_isa;
2227 cpu_arch_tune_flags = cpu_arch_isa_flags;
2228 }
2229 break;
2230 }
2231
2232 if (!cpu_arch[j].negated)
2233 flags = cpu_flags_or (cpu_arch_flags,
2234 cpu_arch[j].flags);
2235 else
2236 flags = cpu_flags_and_not (cpu_arch_flags,
2237 cpu_arch[j].flags);
2238 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2239 {
2240 if (cpu_sub_arch_name)
2241 {
2242 char *name = cpu_sub_arch_name;
2243 cpu_sub_arch_name = concat (name,
2244 cpu_arch[j].name,
2245 (const char *) NULL);
2246 free (name);
2247 }
2248 else
2249 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2250 cpu_arch_flags = flags;
2251 cpu_arch_isa_flags = flags;
2252 }
2253 *input_line_pointer = e;
2254 demand_empty_rest_of_line ();
2255 return;
2256 }
2257 }
2258 if (j >= ARRAY_SIZE (cpu_arch))
2259 as_bad (_("no such architecture: `%s'"), string);
2260
2261 *input_line_pointer = e;
2262 }
2263 else
2264 as_bad (_("missing cpu architecture"));
2265
2266 no_cond_jump_promotion = 0;
2267 if (*input_line_pointer == ','
2268 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2269 {
2270 char *string = ++input_line_pointer;
2271 int e = get_symbol_end ();
2272
2273 if (strcmp (string, "nojumps") == 0)
2274 no_cond_jump_promotion = 1;
2275 else if (strcmp (string, "jumps") == 0)
2276 ;
2277 else
2278 as_bad (_("no such architecture modifier: `%s'"), string);
2279
2280 *input_line_pointer = e;
2281 }
2282
2283 demand_empty_rest_of_line ();
2284 }
2285
2286 enum bfd_architecture
2287 i386_arch (void)
2288 {
2289 if (cpu_arch_isa == PROCESSOR_L1OM)
2290 {
2291 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2292 || flag_code != CODE_64BIT)
2293 as_fatal (_("Intel L1OM is 64bit ELF only"));
2294 return bfd_arch_l1om;
2295 }
2296 else if (cpu_arch_isa == PROCESSOR_K1OM)
2297 {
2298 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2299 || flag_code != CODE_64BIT)
2300 as_fatal (_("Intel K1OM is 64bit ELF only"));
2301 return bfd_arch_k1om;
2302 }
2303 else
2304 return bfd_arch_i386;
2305 }
2306
2307 unsigned long
2308 i386_mach (void)
2309 {
2310 if (!strncmp (default_arch, "x86_64", 6))
2311 {
2312 if (cpu_arch_isa == PROCESSOR_L1OM)
2313 {
2314 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2315 || default_arch[6] != '\0')
2316 as_fatal (_("Intel L1OM is 64bit ELF only"));
2317 return bfd_mach_l1om;
2318 }
2319 else if (cpu_arch_isa == PROCESSOR_K1OM)
2320 {
2321 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2322 || default_arch[6] != '\0')
2323 as_fatal (_("Intel K1OM is 64bit ELF only"));
2324 return bfd_mach_k1om;
2325 }
2326 else if (default_arch[6] == '\0')
2327 return bfd_mach_x86_64;
2328 else
2329 return bfd_mach_x64_32;
2330 }
2331 else if (!strcmp (default_arch, "i386"))
2332 return bfd_mach_i386_i386;
2333 else
2334 as_fatal (_("unknown architecture"));
2335 }
2336 \f
2337 void
2338 md_begin (void)
2339 {
2340 const char *hash_err;
2341
2342 /* Initialize op_hash hash table. */
2343 op_hash = hash_new ();
2344
2345 {
2346 const insn_template *optab;
2347 templates *core_optab;
2348
2349 /* Setup for loop. */
2350 optab = i386_optab;
2351 core_optab = (templates *) xmalloc (sizeof (templates));
2352 core_optab->start = optab;
2353
2354 while (1)
2355 {
2356 ++optab;
2357 if (optab->name == NULL
2358 || strcmp (optab->name, (optab - 1)->name) != 0)
2359 {
2360 /* different name --> ship out current template list;
2361 add to hash table; & begin anew. */
2362 core_optab->end = optab;
2363 hash_err = hash_insert (op_hash,
2364 (optab - 1)->name,
2365 (void *) core_optab);
2366 if (hash_err)
2367 {
2368 as_fatal (_("internal Error: Can't hash %s: %s"),
2369 (optab - 1)->name,
2370 hash_err);
2371 }
2372 if (optab->name == NULL)
2373 break;
2374 core_optab = (templates *) xmalloc (sizeof (templates));
2375 core_optab->start = optab;
2376 }
2377 }
2378 }
2379
2380 /* Initialize reg_hash hash table. */
2381 reg_hash = hash_new ();
2382 {
2383 const reg_entry *regtab;
2384 unsigned int regtab_size = i386_regtab_size;
2385
2386 for (regtab = i386_regtab; regtab_size--; regtab++)
2387 {
2388 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2389 if (hash_err)
2390 as_fatal (_("internal Error: Can't hash %s: %s"),
2391 regtab->reg_name,
2392 hash_err);
2393 }
2394 }
2395
2396 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2397 {
2398 int c;
2399 char *p;
2400
2401 for (c = 0; c < 256; c++)
2402 {
2403 if (ISDIGIT (c))
2404 {
2405 digit_chars[c] = c;
2406 mnemonic_chars[c] = c;
2407 register_chars[c] = c;
2408 operand_chars[c] = c;
2409 }
2410 else if (ISLOWER (c))
2411 {
2412 mnemonic_chars[c] = c;
2413 register_chars[c] = c;
2414 operand_chars[c] = c;
2415 }
2416 else if (ISUPPER (c))
2417 {
2418 mnemonic_chars[c] = TOLOWER (c);
2419 register_chars[c] = mnemonic_chars[c];
2420 operand_chars[c] = c;
2421 }
2422
2423 if (ISALPHA (c) || ISDIGIT (c))
2424 identifier_chars[c] = c;
2425 else if (c >= 128)
2426 {
2427 identifier_chars[c] = c;
2428 operand_chars[c] = c;
2429 }
2430 }
2431
2432 #ifdef LEX_AT
2433 identifier_chars['@'] = '@';
2434 #endif
2435 #ifdef LEX_QM
2436 identifier_chars['?'] = '?';
2437 operand_chars['?'] = '?';
2438 #endif
2439 digit_chars['-'] = '-';
2440 mnemonic_chars['_'] = '_';
2441 mnemonic_chars['-'] = '-';
2442 mnemonic_chars['.'] = '.';
2443 identifier_chars['_'] = '_';
2444 identifier_chars['.'] = '.';
2445
2446 for (p = operand_special_chars; *p != '\0'; p++)
2447 operand_chars[(unsigned char) *p] = *p;
2448 }
2449
2450 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2451 if (IS_ELF)
2452 {
2453 record_alignment (text_section, 2);
2454 record_alignment (data_section, 2);
2455 record_alignment (bss_section, 2);
2456 }
2457 #endif
2458
2459 if (flag_code == CODE_64BIT)
2460 {
2461 #if defined (OBJ_COFF) && defined (TE_PE)
2462 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2463 ? 32 : 16);
2464 #else
2465 x86_dwarf2_return_column = 16;
2466 #endif
2467 x86_cie_data_alignment = -8;
2468 }
2469 else
2470 {
2471 x86_dwarf2_return_column = 8;
2472 x86_cie_data_alignment = -4;
2473 }
2474 }
2475
2476 void
2477 i386_print_statistics (FILE *file)
2478 {
2479 hash_print_statistics (file, "i386 opcode", op_hash);
2480 hash_print_statistics (file, "i386 register", reg_hash);
2481 }
2482 \f
2483 #ifdef DEBUG386
2484
2485 /* Debugging routines for md_assemble. */
2486 static void pte (insn_template *);
2487 static void pt (i386_operand_type);
2488 static void pe (expressionS *);
2489 static void ps (symbolS *);
2490
2491 static void
2492 pi (char *line, i386_insn *x)
2493 {
2494 unsigned int j;
2495
2496 fprintf (stdout, "%s: template ", line);
2497 pte (&x->tm);
2498 fprintf (stdout, " address: base %s index %s scale %x\n",
2499 x->base_reg ? x->base_reg->reg_name : "none",
2500 x->index_reg ? x->index_reg->reg_name : "none",
2501 x->log2_scale_factor);
2502 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2503 x->rm.mode, x->rm.reg, x->rm.regmem);
2504 fprintf (stdout, " sib: base %x index %x scale %x\n",
2505 x->sib.base, x->sib.index, x->sib.scale);
2506 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2507 (x->rex & REX_W) != 0,
2508 (x->rex & REX_R) != 0,
2509 (x->rex & REX_X) != 0,
2510 (x->rex & REX_B) != 0);
2511 for (j = 0; j < x->operands; j++)
2512 {
2513 fprintf (stdout, " #%d: ", j + 1);
2514 pt (x->types[j]);
2515 fprintf (stdout, "\n");
2516 if (x->types[j].bitfield.reg8
2517 || x->types[j].bitfield.reg16
2518 || x->types[j].bitfield.reg32
2519 || x->types[j].bitfield.reg64
2520 || x->types[j].bitfield.regmmx
2521 || x->types[j].bitfield.regxmm
2522 || x->types[j].bitfield.regymm
2523 || x->types[j].bitfield.sreg2
2524 || x->types[j].bitfield.sreg3
2525 || x->types[j].bitfield.control
2526 || x->types[j].bitfield.debug
2527 || x->types[j].bitfield.test)
2528 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2529 if (operand_type_check (x->types[j], imm))
2530 pe (x->op[j].imms);
2531 if (operand_type_check (x->types[j], disp))
2532 pe (x->op[j].disps);
2533 }
2534 }
2535
2536 static void
2537 pte (insn_template *t)
2538 {
2539 unsigned int j;
2540 fprintf (stdout, " %d operands ", t->operands);
2541 fprintf (stdout, "opcode %x ", t->base_opcode);
2542 if (t->extension_opcode != None)
2543 fprintf (stdout, "ext %x ", t->extension_opcode);
2544 if (t->opcode_modifier.d)
2545 fprintf (stdout, "D");
2546 if (t->opcode_modifier.w)
2547 fprintf (stdout, "W");
2548 fprintf (stdout, "\n");
2549 for (j = 0; j < t->operands; j++)
2550 {
2551 fprintf (stdout, " #%d type ", j + 1);
2552 pt (t->operand_types[j]);
2553 fprintf (stdout, "\n");
2554 }
2555 }
2556
2557 static void
2558 pe (expressionS *e)
2559 {
2560 fprintf (stdout, " operation %d\n", e->X_op);
2561 fprintf (stdout, " add_number %ld (%lx)\n",
2562 (long) e->X_add_number, (long) e->X_add_number);
2563 if (e->X_add_symbol)
2564 {
2565 fprintf (stdout, " add_symbol ");
2566 ps (e->X_add_symbol);
2567 fprintf (stdout, "\n");
2568 }
2569 if (e->X_op_symbol)
2570 {
2571 fprintf (stdout, " op_symbol ");
2572 ps (e->X_op_symbol);
2573 fprintf (stdout, "\n");
2574 }
2575 }
2576
2577 static void
2578 ps (symbolS *s)
2579 {
2580 fprintf (stdout, "%s type %s%s",
2581 S_GET_NAME (s),
2582 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2583 segment_name (S_GET_SEGMENT (s)));
2584 }
2585
2586 static struct type_name
2587 {
2588 i386_operand_type mask;
2589 const char *name;
2590 }
2591 const type_names[] =
2592 {
2593 { OPERAND_TYPE_REG8, "r8" },
2594 { OPERAND_TYPE_REG16, "r16" },
2595 { OPERAND_TYPE_REG32, "r32" },
2596 { OPERAND_TYPE_REG64, "r64" },
2597 { OPERAND_TYPE_IMM8, "i8" },
2598 { OPERAND_TYPE_IMM8, "i8s" },
2599 { OPERAND_TYPE_IMM16, "i16" },
2600 { OPERAND_TYPE_IMM32, "i32" },
2601 { OPERAND_TYPE_IMM32S, "i32s" },
2602 { OPERAND_TYPE_IMM64, "i64" },
2603 { OPERAND_TYPE_IMM1, "i1" },
2604 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2605 { OPERAND_TYPE_DISP8, "d8" },
2606 { OPERAND_TYPE_DISP16, "d16" },
2607 { OPERAND_TYPE_DISP32, "d32" },
2608 { OPERAND_TYPE_DISP32S, "d32s" },
2609 { OPERAND_TYPE_DISP64, "d64" },
2610 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2611 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2612 { OPERAND_TYPE_CONTROL, "control reg" },
2613 { OPERAND_TYPE_TEST, "test reg" },
2614 { OPERAND_TYPE_DEBUG, "debug reg" },
2615 { OPERAND_TYPE_FLOATREG, "FReg" },
2616 { OPERAND_TYPE_FLOATACC, "FAcc" },
2617 { OPERAND_TYPE_SREG2, "SReg2" },
2618 { OPERAND_TYPE_SREG3, "SReg3" },
2619 { OPERAND_TYPE_ACC, "Acc" },
2620 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2621 { OPERAND_TYPE_REGMMX, "rMMX" },
2622 { OPERAND_TYPE_REGXMM, "rXMM" },
2623 { OPERAND_TYPE_REGYMM, "rYMM" },
2624 { OPERAND_TYPE_ESSEG, "es" },
2625 };
2626
2627 static void
2628 pt (i386_operand_type t)
2629 {
2630 unsigned int j;
2631 i386_operand_type a;
2632
2633 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2634 {
2635 a = operand_type_and (t, type_names[j].mask);
2636 if (!operand_type_all_zero (&a))
2637 fprintf (stdout, "%s, ", type_names[j].name);
2638 }
2639 fflush (stdout);
2640 }
2641
2642 #endif /* DEBUG386 */
2643 \f
2644 static bfd_reloc_code_real_type
2645 reloc (unsigned int size,
2646 int pcrel,
2647 int sign,
2648 bfd_reloc_code_real_type other)
2649 {
2650 if (other != NO_RELOC)
2651 {
2652 reloc_howto_type *rel;
2653
2654 if (size == 8)
2655 switch (other)
2656 {
2657 case BFD_RELOC_X86_64_GOT32:
2658 return BFD_RELOC_X86_64_GOT64;
2659 break;
2660 case BFD_RELOC_X86_64_PLTOFF64:
2661 return BFD_RELOC_X86_64_PLTOFF64;
2662 break;
2663 case BFD_RELOC_X86_64_GOTPC32:
2664 other = BFD_RELOC_X86_64_GOTPC64;
2665 break;
2666 case BFD_RELOC_X86_64_GOTPCREL:
2667 other = BFD_RELOC_X86_64_GOTPCREL64;
2668 break;
2669 case BFD_RELOC_X86_64_TPOFF32:
2670 other = BFD_RELOC_X86_64_TPOFF64;
2671 break;
2672 case BFD_RELOC_X86_64_DTPOFF32:
2673 other = BFD_RELOC_X86_64_DTPOFF64;
2674 break;
2675 default:
2676 break;
2677 }
2678
2679 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2680 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2681 sign = -1;
2682
2683 rel = bfd_reloc_type_lookup (stdoutput, other);
2684 if (!rel)
2685 as_bad (_("unknown relocation (%u)"), other);
2686 else if (size != bfd_get_reloc_size (rel))
2687 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2688 bfd_get_reloc_size (rel),
2689 size);
2690 else if (pcrel && !rel->pc_relative)
2691 as_bad (_("non-pc-relative relocation for pc-relative field"));
2692 else if ((rel->complain_on_overflow == complain_overflow_signed
2693 && !sign)
2694 || (rel->complain_on_overflow == complain_overflow_unsigned
2695 && sign > 0))
2696 as_bad (_("relocated field and relocation type differ in signedness"));
2697 else
2698 return other;
2699 return NO_RELOC;
2700 }
2701
2702 if (pcrel)
2703 {
2704 if (!sign)
2705 as_bad (_("there are no unsigned pc-relative relocations"));
2706 switch (size)
2707 {
2708 case 1: return BFD_RELOC_8_PCREL;
2709 case 2: return BFD_RELOC_16_PCREL;
2710 case 4: return BFD_RELOC_32_PCREL;
2711 case 8: return BFD_RELOC_64_PCREL;
2712 }
2713 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2714 }
2715 else
2716 {
2717 if (sign > 0)
2718 switch (size)
2719 {
2720 case 4: return BFD_RELOC_X86_64_32S;
2721 }
2722 else
2723 switch (size)
2724 {
2725 case 1: return BFD_RELOC_8;
2726 case 2: return BFD_RELOC_16;
2727 case 4: return BFD_RELOC_32;
2728 case 8: return BFD_RELOC_64;
2729 }
2730 as_bad (_("cannot do %s %u byte relocation"),
2731 sign > 0 ? "signed" : "unsigned", size);
2732 }
2733
2734 return NO_RELOC;
2735 }
2736
2737 /* Here we decide which fixups can be adjusted to make them relative to
2738 the beginning of the section instead of the symbol. Basically we need
2739 to make sure that the dynamic relocations are done correctly, so in
2740 some cases we force the original symbol to be used. */
2741
2742 int
2743 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2744 {
2745 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2746 if (!IS_ELF)
2747 return 1;
2748
2749 /* Don't adjust pc-relative references to merge sections in 64-bit
2750 mode. */
2751 if (use_rela_relocations
2752 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2753 && fixP->fx_pcrel)
2754 return 0;
2755
2756 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2757 and changed later by validate_fix. */
2758 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2759 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2760 return 0;
2761
2762 /* adjust_reloc_syms doesn't know about the GOT. */
2763 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2764 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2765 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2766 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2767 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2768 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2769 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2770 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2771 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2772 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2773 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2774 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2775 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2776 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2777 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2778 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2779 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2780 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2781 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2782 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2783 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2784 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2785 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2786 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2787 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2788 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2789 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2790 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2791 return 0;
2792 #endif
2793 return 1;
2794 }
2795
2796 static int
2797 intel_float_operand (const char *mnemonic)
2798 {
2799 /* Note that the value returned is meaningful only for opcodes with (memory)
2800 operands, hence the code here is free to improperly handle opcodes that
2801 have no operands (for better performance and smaller code). */
2802
2803 if (mnemonic[0] != 'f')
2804 return 0; /* non-math */
2805
2806 switch (mnemonic[1])
2807 {
2808 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2809 the fs segment override prefix not currently handled because no
2810 call path can make opcodes without operands get here */
2811 case 'i':
2812 return 2 /* integer op */;
2813 case 'l':
2814 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2815 return 3; /* fldcw/fldenv */
2816 break;
2817 case 'n':
2818 if (mnemonic[2] != 'o' /* fnop */)
2819 return 3; /* non-waiting control op */
2820 break;
2821 case 'r':
2822 if (mnemonic[2] == 's')
2823 return 3; /* frstor/frstpm */
2824 break;
2825 case 's':
2826 if (mnemonic[2] == 'a')
2827 return 3; /* fsave */
2828 if (mnemonic[2] == 't')
2829 {
2830 switch (mnemonic[3])
2831 {
2832 case 'c': /* fstcw */
2833 case 'd': /* fstdw */
2834 case 'e': /* fstenv */
2835 case 's': /* fsts[gw] */
2836 return 3;
2837 }
2838 }
2839 break;
2840 case 'x':
2841 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2842 return 0; /* fxsave/fxrstor are not really math ops */
2843 break;
2844 }
2845
2846 return 1;
2847 }
2848
2849 /* Build the VEX prefix. */
2850
2851 static void
2852 build_vex_prefix (const insn_template *t)
2853 {
2854 unsigned int register_specifier;
2855 unsigned int implied_prefix;
2856 unsigned int vector_length;
2857
2858 /* Check register specifier. */
2859 if (i.vex.register_specifier)
2860 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2861 else
2862 register_specifier = 0xf;
2863
2864 /* Use 2-byte VEX prefix by swappping destination and source
2865 operand. */
2866 if (!i.swap_operand
2867 && i.operands == i.reg_operands
2868 && i.tm.opcode_modifier.vexopcode == VEX0F
2869 && i.tm.opcode_modifier.s
2870 && i.rex == REX_B)
2871 {
2872 unsigned int xchg = i.operands - 1;
2873 union i386_op temp_op;
2874 i386_operand_type temp_type;
2875
2876 temp_type = i.types[xchg];
2877 i.types[xchg] = i.types[0];
2878 i.types[0] = temp_type;
2879 temp_op = i.op[xchg];
2880 i.op[xchg] = i.op[0];
2881 i.op[0] = temp_op;
2882
2883 gas_assert (i.rm.mode == 3);
2884
2885 i.rex = REX_R;
2886 xchg = i.rm.regmem;
2887 i.rm.regmem = i.rm.reg;
2888 i.rm.reg = xchg;
2889
2890 /* Use the next insn. */
2891 i.tm = t[1];
2892 }
2893
2894 if (i.tm.opcode_modifier.vex == VEXScalar)
2895 vector_length = avxscalar;
2896 else
2897 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2898
2899 switch ((i.tm.base_opcode >> 8) & 0xff)
2900 {
2901 case 0:
2902 implied_prefix = 0;
2903 break;
2904 case DATA_PREFIX_OPCODE:
2905 implied_prefix = 1;
2906 break;
2907 case REPE_PREFIX_OPCODE:
2908 implied_prefix = 2;
2909 break;
2910 case REPNE_PREFIX_OPCODE:
2911 implied_prefix = 3;
2912 break;
2913 default:
2914 abort ();
2915 }
2916
2917 /* Use 2-byte VEX prefix if possible. */
2918 if (i.tm.opcode_modifier.vexopcode == VEX0F
2919 && i.tm.opcode_modifier.vexw != VEXW1
2920 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2921 {
2922 /* 2-byte VEX prefix. */
2923 unsigned int r;
2924
2925 i.vex.length = 2;
2926 i.vex.bytes[0] = 0xc5;
2927
2928 /* Check the REX.R bit. */
2929 r = (i.rex & REX_R) ? 0 : 1;
2930 i.vex.bytes[1] = (r << 7
2931 | register_specifier << 3
2932 | vector_length << 2
2933 | implied_prefix);
2934 }
2935 else
2936 {
2937 /* 3-byte VEX prefix. */
2938 unsigned int m, w;
2939
2940 i.vex.length = 3;
2941
2942 switch (i.tm.opcode_modifier.vexopcode)
2943 {
2944 case VEX0F:
2945 m = 0x1;
2946 i.vex.bytes[0] = 0xc4;
2947 break;
2948 case VEX0F38:
2949 m = 0x2;
2950 i.vex.bytes[0] = 0xc4;
2951 break;
2952 case VEX0F3A:
2953 m = 0x3;
2954 i.vex.bytes[0] = 0xc4;
2955 break;
2956 case XOP08:
2957 m = 0x8;
2958 i.vex.bytes[0] = 0x8f;
2959 break;
2960 case XOP09:
2961 m = 0x9;
2962 i.vex.bytes[0] = 0x8f;
2963 break;
2964 case XOP0A:
2965 m = 0xa;
2966 i.vex.bytes[0] = 0x8f;
2967 break;
2968 default:
2969 abort ();
2970 }
2971
2972 /* The high 3 bits of the second VEX byte are 1's compliment
2973 of RXB bits from REX. */
2974 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2975
2976 /* Check the REX.W bit. */
2977 w = (i.rex & REX_W) ? 1 : 0;
2978 if (i.tm.opcode_modifier.vexw)
2979 {
2980 if (w)
2981 abort ();
2982
2983 if (i.tm.opcode_modifier.vexw == VEXW1)
2984 w = 1;
2985 }
2986
2987 i.vex.bytes[2] = (w << 7
2988 | register_specifier << 3
2989 | vector_length << 2
2990 | implied_prefix);
2991 }
2992 }
2993
2994 static void
2995 process_immext (void)
2996 {
2997 expressionS *exp;
2998
2999 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3000 && i.operands > 0)
3001 {
3002 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3003 with an opcode suffix which is coded in the same place as an
3004 8-bit immediate field would be.
3005 Here we check those operands and remove them afterwards. */
3006 unsigned int x;
3007
3008 for (x = 0; x < i.operands; x++)
3009 if (register_number (i.op[x].regs) != x)
3010 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3011 register_prefix, i.op[x].regs->reg_name, x + 1,
3012 i.tm.name);
3013
3014 i.operands = 0;
3015 }
3016
3017 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3018 which is coded in the same place as an 8-bit immediate field
3019 would be. Here we fake an 8-bit immediate operand from the
3020 opcode suffix stored in tm.extension_opcode.
3021
3022 AVX instructions also use this encoding, for some of
3023 3 argument instructions. */
3024
3025 gas_assert (i.imm_operands == 0
3026 && (i.operands <= 2
3027 || (i.tm.opcode_modifier.vex
3028 && i.operands <= 4)));
3029
3030 exp = &im_expressions[i.imm_operands++];
3031 i.op[i.operands].imms = exp;
3032 i.types[i.operands] = imm8;
3033 i.operands++;
3034 exp->X_op = O_constant;
3035 exp->X_add_number = i.tm.extension_opcode;
3036 i.tm.extension_opcode = None;
3037 }
3038
3039
3040 static int
3041 check_hle (void)
3042 {
3043 switch (i.tm.opcode_modifier.hleprefixok)
3044 {
3045 default:
3046 abort ();
3047 case HLEPrefixNone:
3048 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3049 as_bad (_("invalid instruction `%s' after `xacquire'"),
3050 i.tm.name);
3051 else
3052 as_bad (_("invalid instruction `%s' after `xrelease'"),
3053 i.tm.name);
3054 return 0;
3055 case HLEPrefixLock:
3056 if (i.prefix[LOCK_PREFIX])
3057 return 1;
3058 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3059 as_bad (_("missing `lock' with `xacquire'"));
3060 else
3061 as_bad (_("missing `lock' with `xrelease'"));
3062 return 0;
3063 case HLEPrefixAny:
3064 return 1;
3065 case HLEPrefixRelease:
3066 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3067 {
3068 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3069 i.tm.name);
3070 return 0;
3071 }
3072 if (i.mem_operands == 0
3073 || !operand_type_check (i.types[i.operands - 1], anymem))
3074 {
3075 as_bad (_("memory destination needed for instruction `%s'"
3076 " after `xrelease'"), i.tm.name);
3077 return 0;
3078 }
3079 return 1;
3080 }
3081 }
3082
3083 /* This is the guts of the machine-dependent assembler. LINE points to a
3084 machine dependent instruction. This function is supposed to emit
3085 the frags/bytes it assembles to. */
3086
3087 void
3088 md_assemble (char *line)
3089 {
3090 unsigned int j;
3091 char mnemonic[MAX_MNEM_SIZE];
3092 const insn_template *t;
3093
3094 /* Initialize globals. */
3095 memset (&i, '\0', sizeof (i));
3096 for (j = 0; j < MAX_OPERANDS; j++)
3097 i.reloc[j] = NO_RELOC;
3098 memset (disp_expressions, '\0', sizeof (disp_expressions));
3099 memset (im_expressions, '\0', sizeof (im_expressions));
3100 save_stack_p = save_stack;
3101
3102 /* First parse an instruction mnemonic & call i386_operand for the operands.
3103 We assume that the scrubber has arranged it so that line[0] is the valid
3104 start of a (possibly prefixed) mnemonic. */
3105
3106 line = parse_insn (line, mnemonic);
3107 if (line == NULL)
3108 return;
3109
3110 line = parse_operands (line, mnemonic);
3111 this_operand = -1;
3112 if (line == NULL)
3113 return;
3114
3115 /* Now we've parsed the mnemonic into a set of templates, and have the
3116 operands at hand. */
3117
3118 /* All intel opcodes have reversed operands except for "bound" and
3119 "enter". We also don't reverse intersegment "jmp" and "call"
3120 instructions with 2 immediate operands so that the immediate segment
3121 precedes the offset, as it does when in AT&T mode. */
3122 if (intel_syntax
3123 && i.operands > 1
3124 && (strcmp (mnemonic, "bound") != 0)
3125 && (strcmp (mnemonic, "invlpga") != 0)
3126 && !(operand_type_check (i.types[0], imm)
3127 && operand_type_check (i.types[1], imm)))
3128 swap_operands ();
3129
3130 /* The order of the immediates should be reversed
3131 for 2 immediates extrq and insertq instructions */
3132 if (i.imm_operands == 2
3133 && (strcmp (mnemonic, "extrq") == 0
3134 || strcmp (mnemonic, "insertq") == 0))
3135 swap_2_operands (0, 1);
3136
3137 if (i.imm_operands)
3138 optimize_imm ();
3139
3140 /* Don't optimize displacement for movabs since it only takes 64bit
3141 displacement. */
3142 if (i.disp_operands
3143 && i.disp_encoding != disp_encoding_32bit
3144 && (flag_code != CODE_64BIT
3145 || strcmp (mnemonic, "movabs") != 0))
3146 optimize_disp ();
3147
3148 /* Next, we find a template that matches the given insn,
3149 making sure the overlap of the given operands types is consistent
3150 with the template operand types. */
3151
3152 if (!(t = match_template ()))
3153 return;
3154
3155 if (sse_check != check_none
3156 && !i.tm.opcode_modifier.noavx
3157 && (i.tm.cpu_flags.bitfield.cpusse
3158 || i.tm.cpu_flags.bitfield.cpusse2
3159 || i.tm.cpu_flags.bitfield.cpusse3
3160 || i.tm.cpu_flags.bitfield.cpussse3
3161 || i.tm.cpu_flags.bitfield.cpusse4_1
3162 || i.tm.cpu_flags.bitfield.cpusse4_2))
3163 {
3164 (sse_check == check_warning
3165 ? as_warn
3166 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3167 }
3168
3169 /* Zap movzx and movsx suffix. The suffix has been set from
3170 "word ptr" or "byte ptr" on the source operand in Intel syntax
3171 or extracted from mnemonic in AT&T syntax. But we'll use
3172 the destination register to choose the suffix for encoding. */
3173 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3174 {
3175 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3176 there is no suffix, the default will be byte extension. */
3177 if (i.reg_operands != 2
3178 && !i.suffix
3179 && intel_syntax)
3180 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3181
3182 i.suffix = 0;
3183 }
3184
3185 if (i.tm.opcode_modifier.fwait)
3186 if (!add_prefix (FWAIT_OPCODE))
3187 return;
3188
3189 /* Check for lock without a lockable instruction. Destination operand
3190 must be memory unless it is xchg (0x86). */
3191 if (i.prefix[LOCK_PREFIX]
3192 && (!i.tm.opcode_modifier.islockable
3193 || i.mem_operands == 0
3194 || (i.tm.base_opcode != 0x86
3195 && !operand_type_check (i.types[i.operands - 1], anymem))))
3196 {
3197 as_bad (_("expecting lockable instruction after `lock'"));
3198 return;
3199 }
3200
3201 /* Check if HLE prefix is OK. */
3202 if (i.have_hle && !check_hle ())
3203 return;
3204
3205 /* Check string instruction segment overrides. */
3206 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3207 {
3208 if (!check_string ())
3209 return;
3210 i.disp_operands = 0;
3211 }
3212
3213 if (!process_suffix ())
3214 return;
3215
3216 /* Update operand types. */
3217 for (j = 0; j < i.operands; j++)
3218 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3219
3220 /* Make still unresolved immediate matches conform to size of immediate
3221 given in i.suffix. */
3222 if (!finalize_imm ())
3223 return;
3224
3225 if (i.types[0].bitfield.imm1)
3226 i.imm_operands = 0; /* kludge for shift insns. */
3227
3228 /* We only need to check those implicit registers for instructions
3229 with 3 operands or less. */
3230 if (i.operands <= 3)
3231 for (j = 0; j < i.operands; j++)
3232 if (i.types[j].bitfield.inoutportreg
3233 || i.types[j].bitfield.shiftcount
3234 || i.types[j].bitfield.acc
3235 || i.types[j].bitfield.floatacc)
3236 i.reg_operands--;
3237
3238 /* ImmExt should be processed after SSE2AVX. */
3239 if (!i.tm.opcode_modifier.sse2avx
3240 && i.tm.opcode_modifier.immext)
3241 process_immext ();
3242
3243 /* For insns with operands there are more diddles to do to the opcode. */
3244 if (i.operands)
3245 {
3246 if (!process_operands ())
3247 return;
3248 }
3249 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3250 {
3251 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3252 as_warn (_("translating to `%sp'"), i.tm.name);
3253 }
3254
3255 if (i.tm.opcode_modifier.vex)
3256 build_vex_prefix (t);
3257
3258 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3259 instructions may define INT_OPCODE as well, so avoid this corner
3260 case for those instructions that use MODRM. */
3261 if (i.tm.base_opcode == INT_OPCODE
3262 && !i.tm.opcode_modifier.modrm
3263 && i.op[0].imms->X_add_number == 3)
3264 {
3265 i.tm.base_opcode = INT3_OPCODE;
3266 i.imm_operands = 0;
3267 }
3268
3269 if ((i.tm.opcode_modifier.jump
3270 || i.tm.opcode_modifier.jumpbyte
3271 || i.tm.opcode_modifier.jumpdword)
3272 && i.op[0].disps->X_op == O_constant)
3273 {
3274 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3275 the absolute address given by the constant. Since ix86 jumps and
3276 calls are pc relative, we need to generate a reloc. */
3277 i.op[0].disps->X_add_symbol = &abs_symbol;
3278 i.op[0].disps->X_op = O_symbol;
3279 }
3280
3281 if (i.tm.opcode_modifier.rex64)
3282 i.rex |= REX_W;
3283
3284 /* For 8 bit registers we need an empty rex prefix. Also if the
3285 instruction already has a prefix, we need to convert old
3286 registers to new ones. */
3287
3288 if ((i.types[0].bitfield.reg8
3289 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3290 || (i.types[1].bitfield.reg8
3291 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3292 || ((i.types[0].bitfield.reg8
3293 || i.types[1].bitfield.reg8)
3294 && i.rex != 0))
3295 {
3296 int x;
3297
3298 i.rex |= REX_OPCODE;
3299 for (x = 0; x < 2; x++)
3300 {
3301 /* Look for 8 bit operand that uses old registers. */
3302 if (i.types[x].bitfield.reg8
3303 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3304 {
3305 /* In case it is "hi" register, give up. */
3306 if (i.op[x].regs->reg_num > 3)
3307 as_bad (_("can't encode register '%s%s' in an "
3308 "instruction requiring REX prefix."),
3309 register_prefix, i.op[x].regs->reg_name);
3310
3311 /* Otherwise it is equivalent to the extended register.
3312 Since the encoding doesn't change this is merely
3313 cosmetic cleanup for debug output. */
3314
3315 i.op[x].regs = i.op[x].regs + 8;
3316 }
3317 }
3318 }
3319
3320 if (i.rex != 0)
3321 add_prefix (REX_OPCODE | i.rex);
3322
3323 /* We are ready to output the insn. */
3324 output_insn ();
3325 }
3326
3327 static char *
3328 parse_insn (char *line, char *mnemonic)
3329 {
3330 char *l = line;
3331 char *token_start = l;
3332 char *mnem_p;
3333 int supported;
3334 const insn_template *t;
3335 char *dot_p = NULL;
3336
3337 /* Non-zero if we found a prefix only acceptable with string insns. */
3338 const char *expecting_string_instruction = NULL;
3339
3340 while (1)
3341 {
3342 mnem_p = mnemonic;
3343 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3344 {
3345 if (*mnem_p == '.')
3346 dot_p = mnem_p;
3347 mnem_p++;
3348 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3349 {
3350 as_bad (_("no such instruction: `%s'"), token_start);
3351 return NULL;
3352 }
3353 l++;
3354 }
3355 if (!is_space_char (*l)
3356 && *l != END_OF_INSN
3357 && (intel_syntax
3358 || (*l != PREFIX_SEPARATOR
3359 && *l != ',')))
3360 {
3361 as_bad (_("invalid character %s in mnemonic"),
3362 output_invalid (*l));
3363 return NULL;
3364 }
3365 if (token_start == l)
3366 {
3367 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3368 as_bad (_("expecting prefix; got nothing"));
3369 else
3370 as_bad (_("expecting mnemonic; got nothing"));
3371 return NULL;
3372 }
3373
3374 /* Look up instruction (or prefix) via hash table. */
3375 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3376
3377 if (*l != END_OF_INSN
3378 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3379 && current_templates
3380 && current_templates->start->opcode_modifier.isprefix)
3381 {
3382 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3383 {
3384 as_bad ((flag_code != CODE_64BIT
3385 ? _("`%s' is only supported in 64-bit mode")
3386 : _("`%s' is not supported in 64-bit mode")),
3387 current_templates->start->name);
3388 return NULL;
3389 }
3390 /* If we are in 16-bit mode, do not allow addr16 or data16.
3391 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3392 if ((current_templates->start->opcode_modifier.size16
3393 || current_templates->start->opcode_modifier.size32)
3394 && flag_code != CODE_64BIT
3395 && (current_templates->start->opcode_modifier.size32
3396 ^ (flag_code == CODE_16BIT)))
3397 {
3398 as_bad (_("redundant %s prefix"),
3399 current_templates->start->name);
3400 return NULL;
3401 }
3402 /* Add prefix, checking for repeated prefixes. */
3403 switch (add_prefix (current_templates->start->base_opcode))
3404 {
3405 case PREFIX_EXIST:
3406 return NULL;
3407 case PREFIX_REP:
3408 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3409 i.have_hle = 1;
3410 else
3411 expecting_string_instruction = current_templates->start->name;
3412 break;
3413 default:
3414 break;
3415 }
3416 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3417 token_start = ++l;
3418 }
3419 else
3420 break;
3421 }
3422
3423 if (!current_templates)
3424 {
3425 /* Check if we should swap operand or force 32bit displacement in
3426 encoding. */
3427 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3428 i.swap_operand = 1;
3429 else if (mnem_p - 3 == dot_p
3430 && dot_p[1] == 'd'
3431 && dot_p[2] == '8')
3432 i.disp_encoding = disp_encoding_8bit;
3433 else if (mnem_p - 4 == dot_p
3434 && dot_p[1] == 'd'
3435 && dot_p[2] == '3'
3436 && dot_p[3] == '2')
3437 i.disp_encoding = disp_encoding_32bit;
3438 else
3439 goto check_suffix;
3440 mnem_p = dot_p;
3441 *dot_p = '\0';
3442 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3443 }
3444
3445 if (!current_templates)
3446 {
3447 check_suffix:
3448 /* See if we can get a match by trimming off a suffix. */
3449 switch (mnem_p[-1])
3450 {
3451 case WORD_MNEM_SUFFIX:
3452 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3453 i.suffix = SHORT_MNEM_SUFFIX;
3454 else
3455 case BYTE_MNEM_SUFFIX:
3456 case QWORD_MNEM_SUFFIX:
3457 i.suffix = mnem_p[-1];
3458 mnem_p[-1] = '\0';
3459 current_templates = (const templates *) hash_find (op_hash,
3460 mnemonic);
3461 break;
3462 case SHORT_MNEM_SUFFIX:
3463 case LONG_MNEM_SUFFIX:
3464 if (!intel_syntax)
3465 {
3466 i.suffix = mnem_p[-1];
3467 mnem_p[-1] = '\0';
3468 current_templates = (const templates *) hash_find (op_hash,
3469 mnemonic);
3470 }
3471 break;
3472
3473 /* Intel Syntax. */
3474 case 'd':
3475 if (intel_syntax)
3476 {
3477 if (intel_float_operand (mnemonic) == 1)
3478 i.suffix = SHORT_MNEM_SUFFIX;
3479 else
3480 i.suffix = LONG_MNEM_SUFFIX;
3481 mnem_p[-1] = '\0';
3482 current_templates = (const templates *) hash_find (op_hash,
3483 mnemonic);
3484 }
3485 break;
3486 }
3487 if (!current_templates)
3488 {
3489 as_bad (_("no such instruction: `%s'"), token_start);
3490 return NULL;
3491 }
3492 }
3493
3494 if (current_templates->start->opcode_modifier.jump
3495 || current_templates->start->opcode_modifier.jumpbyte)
3496 {
3497 /* Check for a branch hint. We allow ",pt" and ",pn" for
3498 predict taken and predict not taken respectively.
3499 I'm not sure that branch hints actually do anything on loop
3500 and jcxz insns (JumpByte) for current Pentium4 chips. They
3501 may work in the future and it doesn't hurt to accept them
3502 now. */
3503 if (l[0] == ',' && l[1] == 'p')
3504 {
3505 if (l[2] == 't')
3506 {
3507 if (!add_prefix (DS_PREFIX_OPCODE))
3508 return NULL;
3509 l += 3;
3510 }
3511 else if (l[2] == 'n')
3512 {
3513 if (!add_prefix (CS_PREFIX_OPCODE))
3514 return NULL;
3515 l += 3;
3516 }
3517 }
3518 }
3519 /* Any other comma loses. */
3520 if (*l == ',')
3521 {
3522 as_bad (_("invalid character %s in mnemonic"),
3523 output_invalid (*l));
3524 return NULL;
3525 }
3526
3527 /* Check if instruction is supported on specified architecture. */
3528 supported = 0;
3529 for (t = current_templates->start; t < current_templates->end; ++t)
3530 {
3531 supported |= cpu_flags_match (t);
3532 if (supported == CPU_FLAGS_PERFECT_MATCH)
3533 goto skip;
3534 }
3535
3536 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3537 {
3538 as_bad (flag_code == CODE_64BIT
3539 ? _("`%s' is not supported in 64-bit mode")
3540 : _("`%s' is only supported in 64-bit mode"),
3541 current_templates->start->name);
3542 return NULL;
3543 }
3544 if (supported != CPU_FLAGS_PERFECT_MATCH)
3545 {
3546 as_bad (_("`%s' is not supported on `%s%s'"),
3547 current_templates->start->name,
3548 cpu_arch_name ? cpu_arch_name : default_arch,
3549 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3550 return NULL;
3551 }
3552
3553 skip:
3554 if (!cpu_arch_flags.bitfield.cpui386
3555 && (flag_code != CODE_16BIT))
3556 {
3557 as_warn (_("use .code16 to ensure correct addressing mode"));
3558 }
3559
3560 /* Check for rep/repne without a string (or other allowed) instruction. */
3561 if (expecting_string_instruction)
3562 {
3563 static templates override;
3564
3565 for (t = current_templates->start; t < current_templates->end; ++t)
3566 if (t->opcode_modifier.repprefixok)
3567 break;
3568 if (t >= current_templates->end)
3569 {
3570 as_bad (_("expecting string instruction after `%s'"),
3571 expecting_string_instruction);
3572 return NULL;
3573 }
3574 for (override.start = t; t < current_templates->end; ++t)
3575 if (!t->opcode_modifier.repprefixok)
3576 break;
3577 override.end = t;
3578 current_templates = &override;
3579 }
3580
3581 return l;
3582 }
3583
3584 static char *
3585 parse_operands (char *l, const char *mnemonic)
3586 {
3587 char *token_start;
3588
3589 /* 1 if operand is pending after ','. */
3590 unsigned int expecting_operand = 0;
3591
3592 /* Non-zero if operand parens not balanced. */
3593 unsigned int paren_not_balanced;
3594
3595 while (*l != END_OF_INSN)
3596 {
3597 /* Skip optional white space before operand. */
3598 if (is_space_char (*l))
3599 ++l;
3600 if (!is_operand_char (*l) && *l != END_OF_INSN)
3601 {
3602 as_bad (_("invalid character %s before operand %d"),
3603 output_invalid (*l),
3604 i.operands + 1);
3605 return NULL;
3606 }
3607 token_start = l; /* after white space */
3608 paren_not_balanced = 0;
3609 while (paren_not_balanced || *l != ',')
3610 {
3611 if (*l == END_OF_INSN)
3612 {
3613 if (paren_not_balanced)
3614 {
3615 if (!intel_syntax)
3616 as_bad (_("unbalanced parenthesis in operand %d."),
3617 i.operands + 1);
3618 else
3619 as_bad (_("unbalanced brackets in operand %d."),
3620 i.operands + 1);
3621 return NULL;
3622 }
3623 else
3624 break; /* we are done */
3625 }
3626 else if (!is_operand_char (*l) && !is_space_char (*l))
3627 {
3628 as_bad (_("invalid character %s in operand %d"),
3629 output_invalid (*l),
3630 i.operands + 1);
3631 return NULL;
3632 }
3633 if (!intel_syntax)
3634 {
3635 if (*l == '(')
3636 ++paren_not_balanced;
3637 if (*l == ')')
3638 --paren_not_balanced;
3639 }
3640 else
3641 {
3642 if (*l == '[')
3643 ++paren_not_balanced;
3644 if (*l == ']')
3645 --paren_not_balanced;
3646 }
3647 l++;
3648 }
3649 if (l != token_start)
3650 { /* Yes, we've read in another operand. */
3651 unsigned int operand_ok;
3652 this_operand = i.operands++;
3653 i.types[this_operand].bitfield.unspecified = 1;
3654 if (i.operands > MAX_OPERANDS)
3655 {
3656 as_bad (_("spurious operands; (%d operands/instruction max)"),
3657 MAX_OPERANDS);
3658 return NULL;
3659 }
3660 /* Now parse operand adding info to 'i' as we go along. */
3661 END_STRING_AND_SAVE (l);
3662
3663 if (intel_syntax)
3664 operand_ok =
3665 i386_intel_operand (token_start,
3666 intel_float_operand (mnemonic));
3667 else
3668 operand_ok = i386_att_operand (token_start);
3669
3670 RESTORE_END_STRING (l);
3671 if (!operand_ok)
3672 return NULL;
3673 }
3674 else
3675 {
3676 if (expecting_operand)
3677 {
3678 expecting_operand_after_comma:
3679 as_bad (_("expecting operand after ','; got nothing"));
3680 return NULL;
3681 }
3682 if (*l == ',')
3683 {
3684 as_bad (_("expecting operand before ','; got nothing"));
3685 return NULL;
3686 }
3687 }
3688
3689 /* Now *l must be either ',' or END_OF_INSN. */
3690 if (*l == ',')
3691 {
3692 if (*++l == END_OF_INSN)
3693 {
3694 /* Just skip it, if it's \n complain. */
3695 goto expecting_operand_after_comma;
3696 }
3697 expecting_operand = 1;
3698 }
3699 }
3700 return l;
3701 }
3702
3703 static void
3704 swap_2_operands (int xchg1, int xchg2)
3705 {
3706 union i386_op temp_op;
3707 i386_operand_type temp_type;
3708 enum bfd_reloc_code_real temp_reloc;
3709
3710 temp_type = i.types[xchg2];
3711 i.types[xchg2] = i.types[xchg1];
3712 i.types[xchg1] = temp_type;
3713 temp_op = i.op[xchg2];
3714 i.op[xchg2] = i.op[xchg1];
3715 i.op[xchg1] = temp_op;
3716 temp_reloc = i.reloc[xchg2];
3717 i.reloc[xchg2] = i.reloc[xchg1];
3718 i.reloc[xchg1] = temp_reloc;
3719 }
3720
3721 static void
3722 swap_operands (void)
3723 {
3724 switch (i.operands)
3725 {
3726 case 5:
3727 case 4:
3728 swap_2_operands (1, i.operands - 2);
3729 case 3:
3730 case 2:
3731 swap_2_operands (0, i.operands - 1);
3732 break;
3733 default:
3734 abort ();
3735 }
3736
3737 if (i.mem_operands == 2)
3738 {
3739 const seg_entry *temp_seg;
3740 temp_seg = i.seg[0];
3741 i.seg[0] = i.seg[1];
3742 i.seg[1] = temp_seg;
3743 }
3744 }
3745
3746 /* Try to ensure constant immediates are represented in the smallest
3747 opcode possible. */
3748 static void
3749 optimize_imm (void)
3750 {
3751 char guess_suffix = 0;
3752 int op;
3753
3754 if (i.suffix)
3755 guess_suffix = i.suffix;
3756 else if (i.reg_operands)
3757 {
3758 /* Figure out a suffix from the last register operand specified.
3759 We can't do this properly yet, ie. excluding InOutPortReg,
3760 but the following works for instructions with immediates.
3761 In any case, we can't set i.suffix yet. */
3762 for (op = i.operands; --op >= 0;)
3763 if (i.types[op].bitfield.reg8)
3764 {
3765 guess_suffix = BYTE_MNEM_SUFFIX;
3766 break;
3767 }
3768 else if (i.types[op].bitfield.reg16)
3769 {
3770 guess_suffix = WORD_MNEM_SUFFIX;
3771 break;
3772 }
3773 else if (i.types[op].bitfield.reg32)
3774 {
3775 guess_suffix = LONG_MNEM_SUFFIX;
3776 break;
3777 }
3778 else if (i.types[op].bitfield.reg64)
3779 {
3780 guess_suffix = QWORD_MNEM_SUFFIX;
3781 break;
3782 }
3783 }
3784 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3785 guess_suffix = WORD_MNEM_SUFFIX;
3786
3787 for (op = i.operands; --op >= 0;)
3788 if (operand_type_check (i.types[op], imm))
3789 {
3790 switch (i.op[op].imms->X_op)
3791 {
3792 case O_constant:
3793 /* If a suffix is given, this operand may be shortened. */
3794 switch (guess_suffix)
3795 {
3796 case LONG_MNEM_SUFFIX:
3797 i.types[op].bitfield.imm32 = 1;
3798 i.types[op].bitfield.imm64 = 1;
3799 break;
3800 case WORD_MNEM_SUFFIX:
3801 i.types[op].bitfield.imm16 = 1;
3802 i.types[op].bitfield.imm32 = 1;
3803 i.types[op].bitfield.imm32s = 1;
3804 i.types[op].bitfield.imm64 = 1;
3805 break;
3806 case BYTE_MNEM_SUFFIX:
3807 i.types[op].bitfield.imm8 = 1;
3808 i.types[op].bitfield.imm8s = 1;
3809 i.types[op].bitfield.imm16 = 1;
3810 i.types[op].bitfield.imm32 = 1;
3811 i.types[op].bitfield.imm32s = 1;
3812 i.types[op].bitfield.imm64 = 1;
3813 break;
3814 }
3815
3816 /* If this operand is at most 16 bits, convert it
3817 to a signed 16 bit number before trying to see
3818 whether it will fit in an even smaller size.
3819 This allows a 16-bit operand such as $0xffe0 to
3820 be recognised as within Imm8S range. */
3821 if ((i.types[op].bitfield.imm16)
3822 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3823 {
3824 i.op[op].imms->X_add_number =
3825 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3826 }
3827 if ((i.types[op].bitfield.imm32)
3828 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3829 == 0))
3830 {
3831 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3832 ^ ((offsetT) 1 << 31))
3833 - ((offsetT) 1 << 31));
3834 }
3835 i.types[op]
3836 = operand_type_or (i.types[op],
3837 smallest_imm_type (i.op[op].imms->X_add_number));
3838
3839 /* We must avoid matching of Imm32 templates when 64bit
3840 only immediate is available. */
3841 if (guess_suffix == QWORD_MNEM_SUFFIX)
3842 i.types[op].bitfield.imm32 = 0;
3843 break;
3844
3845 case O_absent:
3846 case O_register:
3847 abort ();
3848
3849 /* Symbols and expressions. */
3850 default:
3851 /* Convert symbolic operand to proper sizes for matching, but don't
3852 prevent matching a set of insns that only supports sizes other
3853 than those matching the insn suffix. */
3854 {
3855 i386_operand_type mask, allowed;
3856 const insn_template *t;
3857
3858 operand_type_set (&mask, 0);
3859 operand_type_set (&allowed, 0);
3860
3861 for (t = current_templates->start;
3862 t < current_templates->end;
3863 ++t)
3864 allowed = operand_type_or (allowed,
3865 t->operand_types[op]);
3866 switch (guess_suffix)
3867 {
3868 case QWORD_MNEM_SUFFIX:
3869 mask.bitfield.imm64 = 1;
3870 mask.bitfield.imm32s = 1;
3871 break;
3872 case LONG_MNEM_SUFFIX:
3873 mask.bitfield.imm32 = 1;
3874 break;
3875 case WORD_MNEM_SUFFIX:
3876 mask.bitfield.imm16 = 1;
3877 break;
3878 case BYTE_MNEM_SUFFIX:
3879 mask.bitfield.imm8 = 1;
3880 break;
3881 default:
3882 break;
3883 }
3884 allowed = operand_type_and (mask, allowed);
3885 if (!operand_type_all_zero (&allowed))
3886 i.types[op] = operand_type_and (i.types[op], mask);
3887 }
3888 break;
3889 }
3890 }
3891 }
3892
3893 /* Try to use the smallest displacement type too. */
3894 static void
3895 optimize_disp (void)
3896 {
3897 int op;
3898
3899 for (op = i.operands; --op >= 0;)
3900 if (operand_type_check (i.types[op], disp))
3901 {
3902 if (i.op[op].disps->X_op == O_constant)
3903 {
3904 offsetT op_disp = i.op[op].disps->X_add_number;
3905
3906 if (i.types[op].bitfield.disp16
3907 && (op_disp & ~(offsetT) 0xffff) == 0)
3908 {
3909 /* If this operand is at most 16 bits, convert
3910 to a signed 16 bit number and don't use 64bit
3911 displacement. */
3912 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3913 i.types[op].bitfield.disp64 = 0;
3914 }
3915 if (i.types[op].bitfield.disp32
3916 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3917 {
3918 /* If this operand is at most 32 bits, convert
3919 to a signed 32 bit number and don't use 64bit
3920 displacement. */
3921 op_disp &= (((offsetT) 2 << 31) - 1);
3922 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3923 i.types[op].bitfield.disp64 = 0;
3924 }
3925 if (!op_disp && i.types[op].bitfield.baseindex)
3926 {
3927 i.types[op].bitfield.disp8 = 0;
3928 i.types[op].bitfield.disp16 = 0;
3929 i.types[op].bitfield.disp32 = 0;
3930 i.types[op].bitfield.disp32s = 0;
3931 i.types[op].bitfield.disp64 = 0;
3932 i.op[op].disps = 0;
3933 i.disp_operands--;
3934 }
3935 else if (flag_code == CODE_64BIT)
3936 {
3937 if (fits_in_signed_long (op_disp))
3938 {
3939 i.types[op].bitfield.disp64 = 0;
3940 i.types[op].bitfield.disp32s = 1;
3941 }
3942 if (i.prefix[ADDR_PREFIX]
3943 && fits_in_unsigned_long (op_disp))
3944 i.types[op].bitfield.disp32 = 1;
3945 }
3946 if ((i.types[op].bitfield.disp32
3947 || i.types[op].bitfield.disp32s
3948 || i.types[op].bitfield.disp16)
3949 && fits_in_signed_byte (op_disp))
3950 i.types[op].bitfield.disp8 = 1;
3951 }
3952 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3953 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3954 {
3955 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3956 i.op[op].disps, 0, i.reloc[op]);
3957 i.types[op].bitfield.disp8 = 0;
3958 i.types[op].bitfield.disp16 = 0;
3959 i.types[op].bitfield.disp32 = 0;
3960 i.types[op].bitfield.disp32s = 0;
3961 i.types[op].bitfield.disp64 = 0;
3962 }
3963 else
3964 /* We only support 64bit displacement on constants. */
3965 i.types[op].bitfield.disp64 = 0;
3966 }
3967 }
3968
3969 /* Check if operands are valid for the instruction. */
3970
3971 static int
3972 check_VecOperands (const insn_template *t)
3973 {
3974 /* Without VSIB byte, we can't have a vector register for index. */
3975 if (!t->opcode_modifier.vecsib
3976 && i.index_reg
3977 && (i.index_reg->reg_type.bitfield.regxmm
3978 || i.index_reg->reg_type.bitfield.regymm))
3979 {
3980 i.error = unsupported_vector_index_register;
3981 return 1;
3982 }
3983
3984 /* For VSIB byte, we need a vector register for index, and all vector
3985 registers must be distinct. */
3986 if (t->opcode_modifier.vecsib)
3987 {
3988 if (!i.index_reg
3989 || !((t->opcode_modifier.vecsib == VecSIB128
3990 && i.index_reg->reg_type.bitfield.regxmm)
3991 || (t->opcode_modifier.vecsib == VecSIB256
3992 && i.index_reg->reg_type.bitfield.regymm)))
3993 {
3994 i.error = invalid_vsib_address;
3995 return 1;
3996 }
3997
3998 gas_assert (i.reg_operands == 2);
3999 gas_assert (i.types[0].bitfield.regxmm
4000 || i.types[0].bitfield.regymm);
4001 gas_assert (i.types[2].bitfield.regxmm
4002 || i.types[2].bitfield.regymm);
4003
4004 if (operand_check == check_none)
4005 return 0;
4006 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4007 && register_number (i.op[2].regs) != register_number (i.index_reg)
4008 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4009 return 0;
4010 if (operand_check == check_error)
4011 {
4012 i.error = invalid_vector_register_set;
4013 return 1;
4014 }
4015 as_warn (_("mask, index, and destination registers should be distinct"));
4016 }
4017
4018 return 0;
4019 }
4020
4021 /* Check if operands are valid for the instruction. Update VEX
4022 operand types. */
4023
4024 static int
4025 VEX_check_operands (const insn_template *t)
4026 {
4027 if (!t->opcode_modifier.vex)
4028 return 0;
4029
4030 /* Only check VEX_Imm4, which must be the first operand. */
4031 if (t->operand_types[0].bitfield.vec_imm4)
4032 {
4033 if (i.op[0].imms->X_op != O_constant
4034 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4035 {
4036 i.error = bad_imm4;
4037 return 1;
4038 }
4039
4040 /* Turn off Imm8 so that update_imm won't complain. */
4041 i.types[0] = vec_imm4;
4042 }
4043
4044 return 0;
4045 }
4046
4047 static const insn_template *
4048 match_template (void)
4049 {
4050 /* Points to template once we've found it. */
4051 const insn_template *t;
4052 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4053 i386_operand_type overlap4;
4054 unsigned int found_reverse_match;
4055 i386_opcode_modifier suffix_check;
4056 i386_operand_type operand_types [MAX_OPERANDS];
4057 int addr_prefix_disp;
4058 unsigned int j;
4059 unsigned int found_cpu_match;
4060 unsigned int check_register;
4061 enum i386_error specific_error = 0;
4062
4063 #if MAX_OPERANDS != 5
4064 # error "MAX_OPERANDS must be 5."
4065 #endif
4066
4067 found_reverse_match = 0;
4068 addr_prefix_disp = -1;
4069
4070 memset (&suffix_check, 0, sizeof (suffix_check));
4071 if (i.suffix == BYTE_MNEM_SUFFIX)
4072 suffix_check.no_bsuf = 1;
4073 else if (i.suffix == WORD_MNEM_SUFFIX)
4074 suffix_check.no_wsuf = 1;
4075 else if (i.suffix == SHORT_MNEM_SUFFIX)
4076 suffix_check.no_ssuf = 1;
4077 else if (i.suffix == LONG_MNEM_SUFFIX)
4078 suffix_check.no_lsuf = 1;
4079 else if (i.suffix == QWORD_MNEM_SUFFIX)
4080 suffix_check.no_qsuf = 1;
4081 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4082 suffix_check.no_ldsuf = 1;
4083
4084 /* Must have right number of operands. */
4085 i.error = number_of_operands_mismatch;
4086
4087 for (t = current_templates->start; t < current_templates->end; t++)
4088 {
4089 addr_prefix_disp = -1;
4090
4091 if (i.operands != t->operands)
4092 continue;
4093
4094 /* Check processor support. */
4095 i.error = unsupported;
4096 found_cpu_match = (cpu_flags_match (t)
4097 == CPU_FLAGS_PERFECT_MATCH);
4098 if (!found_cpu_match)
4099 continue;
4100
4101 /* Check old gcc support. */
4102 i.error = old_gcc_only;
4103 if (!old_gcc && t->opcode_modifier.oldgcc)
4104 continue;
4105
4106 /* Check AT&T mnemonic. */
4107 i.error = unsupported_with_intel_mnemonic;
4108 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4109 continue;
4110
4111 /* Check AT&T/Intel syntax. */
4112 i.error = unsupported_syntax;
4113 if ((intel_syntax && t->opcode_modifier.attsyntax)
4114 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4115 continue;
4116
4117 /* Check the suffix, except for some instructions in intel mode. */
4118 i.error = invalid_instruction_suffix;
4119 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4120 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4121 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4122 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4123 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4124 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4125 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4126 continue;
4127
4128 if (!operand_size_match (t))
4129 continue;
4130
4131 for (j = 0; j < MAX_OPERANDS; j++)
4132 operand_types[j] = t->operand_types[j];
4133
4134 /* In general, don't allow 64-bit operands in 32-bit mode. */
4135 if (i.suffix == QWORD_MNEM_SUFFIX
4136 && flag_code != CODE_64BIT
4137 && (intel_syntax
4138 ? (!t->opcode_modifier.ignoresize
4139 && !intel_float_operand (t->name))
4140 : intel_float_operand (t->name) != 2)
4141 && ((!operand_types[0].bitfield.regmmx
4142 && !operand_types[0].bitfield.regxmm
4143 && !operand_types[0].bitfield.regymm)
4144 || (!operand_types[t->operands > 1].bitfield.regmmx
4145 && !!operand_types[t->operands > 1].bitfield.regxmm
4146 && !!operand_types[t->operands > 1].bitfield.regymm))
4147 && (t->base_opcode != 0x0fc7
4148 || t->extension_opcode != 1 /* cmpxchg8b */))
4149 continue;
4150
4151 /* In general, don't allow 32-bit operands on pre-386. */
4152 else if (i.suffix == LONG_MNEM_SUFFIX
4153 && !cpu_arch_flags.bitfield.cpui386
4154 && (intel_syntax
4155 ? (!t->opcode_modifier.ignoresize
4156 && !intel_float_operand (t->name))
4157 : intel_float_operand (t->name) != 2)
4158 && ((!operand_types[0].bitfield.regmmx
4159 && !operand_types[0].bitfield.regxmm)
4160 || (!operand_types[t->operands > 1].bitfield.regmmx
4161 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4162 continue;
4163
4164 /* Do not verify operands when there are none. */
4165 else
4166 {
4167 if (!t->operands)
4168 /* We've found a match; break out of loop. */
4169 break;
4170 }
4171
4172 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4173 into Disp32/Disp16/Disp32 operand. */
4174 if (i.prefix[ADDR_PREFIX] != 0)
4175 {
4176 /* There should be only one Disp operand. */
4177 switch (flag_code)
4178 {
4179 case CODE_16BIT:
4180 for (j = 0; j < MAX_OPERANDS; j++)
4181 {
4182 if (operand_types[j].bitfield.disp16)
4183 {
4184 addr_prefix_disp = j;
4185 operand_types[j].bitfield.disp32 = 1;
4186 operand_types[j].bitfield.disp16 = 0;
4187 break;
4188 }
4189 }
4190 break;
4191 case CODE_32BIT:
4192 for (j = 0; j < MAX_OPERANDS; j++)
4193 {
4194 if (operand_types[j].bitfield.disp32)
4195 {
4196 addr_prefix_disp = j;
4197 operand_types[j].bitfield.disp32 = 0;
4198 operand_types[j].bitfield.disp16 = 1;
4199 break;
4200 }
4201 }
4202 break;
4203 case CODE_64BIT:
4204 for (j = 0; j < MAX_OPERANDS; j++)
4205 {
4206 if (operand_types[j].bitfield.disp64)
4207 {
4208 addr_prefix_disp = j;
4209 operand_types[j].bitfield.disp64 = 0;
4210 operand_types[j].bitfield.disp32 = 1;
4211 break;
4212 }
4213 }
4214 break;
4215 }
4216 }
4217
4218 /* We check register size if needed. */
4219 check_register = t->opcode_modifier.checkregsize;
4220 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4221 switch (t->operands)
4222 {
4223 case 1:
4224 if (!operand_type_match (overlap0, i.types[0]))
4225 continue;
4226 break;
4227 case 2:
4228 /* xchg %eax, %eax is a special case. It is an aliase for nop
4229 only in 32bit mode and we can use opcode 0x90. In 64bit
4230 mode, we can't use 0x90 for xchg %eax, %eax since it should
4231 zero-extend %eax to %rax. */
4232 if (flag_code == CODE_64BIT
4233 && t->base_opcode == 0x90
4234 && operand_type_equal (&i.types [0], &acc32)
4235 && operand_type_equal (&i.types [1], &acc32))
4236 continue;
4237 if (i.swap_operand)
4238 {
4239 /* If we swap operand in encoding, we either match
4240 the next one or reverse direction of operands. */
4241 if (t->opcode_modifier.s)
4242 continue;
4243 else if (t->opcode_modifier.d)
4244 goto check_reverse;
4245 }
4246
4247 case 3:
4248 /* If we swap operand in encoding, we match the next one. */
4249 if (i.swap_operand && t->opcode_modifier.s)
4250 continue;
4251 case 4:
4252 case 5:
4253 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4254 if (!operand_type_match (overlap0, i.types[0])
4255 || !operand_type_match (overlap1, i.types[1])
4256 || (check_register
4257 && !operand_type_register_match (overlap0, i.types[0],
4258 operand_types[0],
4259 overlap1, i.types[1],
4260 operand_types[1])))
4261 {
4262 /* Check if other direction is valid ... */
4263 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4264 continue;
4265
4266 check_reverse:
4267 /* Try reversing direction of operands. */
4268 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4269 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4270 if (!operand_type_match (overlap0, i.types[0])
4271 || !operand_type_match (overlap1, i.types[1])
4272 || (check_register
4273 && !operand_type_register_match (overlap0,
4274 i.types[0],
4275 operand_types[1],
4276 overlap1,
4277 i.types[1],
4278 operand_types[0])))
4279 {
4280 /* Does not match either direction. */
4281 continue;
4282 }
4283 /* found_reverse_match holds which of D or FloatDR
4284 we've found. */
4285 if (t->opcode_modifier.d)
4286 found_reverse_match = Opcode_D;
4287 else if (t->opcode_modifier.floatd)
4288 found_reverse_match = Opcode_FloatD;
4289 else
4290 found_reverse_match = 0;
4291 if (t->opcode_modifier.floatr)
4292 found_reverse_match |= Opcode_FloatR;
4293 }
4294 else
4295 {
4296 /* Found a forward 2 operand match here. */
4297 switch (t->operands)
4298 {
4299 case 5:
4300 overlap4 = operand_type_and (i.types[4],
4301 operand_types[4]);
4302 case 4:
4303 overlap3 = operand_type_and (i.types[3],
4304 operand_types[3]);
4305 case 3:
4306 overlap2 = operand_type_and (i.types[2],
4307 operand_types[2]);
4308 break;
4309 }
4310
4311 switch (t->operands)
4312 {
4313 case 5:
4314 if (!operand_type_match (overlap4, i.types[4])
4315 || !operand_type_register_match (overlap3,
4316 i.types[3],
4317 operand_types[3],
4318 overlap4,
4319 i.types[4],
4320 operand_types[4]))
4321 continue;
4322 case 4:
4323 if (!operand_type_match (overlap3, i.types[3])
4324 || (check_register
4325 && !operand_type_register_match (overlap2,
4326 i.types[2],
4327 operand_types[2],
4328 overlap3,
4329 i.types[3],
4330 operand_types[3])))
4331 continue;
4332 case 3:
4333 /* Here we make use of the fact that there are no
4334 reverse match 3 operand instructions, and all 3
4335 operand instructions only need to be checked for
4336 register consistency between operands 2 and 3. */
4337 if (!operand_type_match (overlap2, i.types[2])
4338 || (check_register
4339 && !operand_type_register_match (overlap1,
4340 i.types[1],
4341 operand_types[1],
4342 overlap2,
4343 i.types[2],
4344 operand_types[2])))
4345 continue;
4346 break;
4347 }
4348 }
4349 /* Found either forward/reverse 2, 3 or 4 operand match here:
4350 slip through to break. */
4351 }
4352 if (!found_cpu_match)
4353 {
4354 found_reverse_match = 0;
4355 continue;
4356 }
4357
4358 /* Check if vector and VEX operands are valid. */
4359 if (check_VecOperands (t) || VEX_check_operands (t))
4360 {
4361 specific_error = i.error;
4362 continue;
4363 }
4364
4365 /* We've found a match; break out of loop. */
4366 break;
4367 }
4368
4369 if (t == current_templates->end)
4370 {
4371 /* We found no match. */
4372 const char *err_msg;
4373 switch (specific_error ? specific_error : i.error)
4374 {
4375 default:
4376 abort ();
4377 case operand_size_mismatch:
4378 err_msg = _("operand size mismatch");
4379 break;
4380 case operand_type_mismatch:
4381 err_msg = _("operand type mismatch");
4382 break;
4383 case register_type_mismatch:
4384 err_msg = _("register type mismatch");
4385 break;
4386 case number_of_operands_mismatch:
4387 err_msg = _("number of operands mismatch");
4388 break;
4389 case invalid_instruction_suffix:
4390 err_msg = _("invalid instruction suffix");
4391 break;
4392 case bad_imm4:
4393 err_msg = _("constant doesn't fit in 4 bits");
4394 break;
4395 case old_gcc_only:
4396 err_msg = _("only supported with old gcc");
4397 break;
4398 case unsupported_with_intel_mnemonic:
4399 err_msg = _("unsupported with Intel mnemonic");
4400 break;
4401 case unsupported_syntax:
4402 err_msg = _("unsupported syntax");
4403 break;
4404 case unsupported:
4405 as_bad (_("unsupported instruction `%s'"),
4406 current_templates->start->name);
4407 return NULL;
4408 case invalid_vsib_address:
4409 err_msg = _("invalid VSIB address");
4410 break;
4411 case invalid_vector_register_set:
4412 err_msg = _("mask, index, and destination registers must be distinct");
4413 break;
4414 case unsupported_vector_index_register:
4415 err_msg = _("unsupported vector index register");
4416 break;
4417 }
4418 as_bad (_("%s for `%s'"), err_msg,
4419 current_templates->start->name);
4420 return NULL;
4421 }
4422
4423 if (!quiet_warnings)
4424 {
4425 if (!intel_syntax
4426 && (i.types[0].bitfield.jumpabsolute
4427 != operand_types[0].bitfield.jumpabsolute))
4428 {
4429 as_warn (_("indirect %s without `*'"), t->name);
4430 }
4431
4432 if (t->opcode_modifier.isprefix
4433 && t->opcode_modifier.ignoresize)
4434 {
4435 /* Warn them that a data or address size prefix doesn't
4436 affect assembly of the next line of code. */
4437 as_warn (_("stand-alone `%s' prefix"), t->name);
4438 }
4439 }
4440
4441 /* Copy the template we found. */
4442 i.tm = *t;
4443
4444 if (addr_prefix_disp != -1)
4445 i.tm.operand_types[addr_prefix_disp]
4446 = operand_types[addr_prefix_disp];
4447
4448 if (found_reverse_match)
4449 {
4450 /* If we found a reverse match we must alter the opcode
4451 direction bit. found_reverse_match holds bits to change
4452 (different for int & float insns). */
4453
4454 i.tm.base_opcode ^= found_reverse_match;
4455
4456 i.tm.operand_types[0] = operand_types[1];
4457 i.tm.operand_types[1] = operand_types[0];
4458 }
4459
4460 return t;
4461 }
4462
4463 static int
4464 check_string (void)
4465 {
4466 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4467 if (i.tm.operand_types[mem_op].bitfield.esseg)
4468 {
4469 if (i.seg[0] != NULL && i.seg[0] != &es)
4470 {
4471 as_bad (_("`%s' operand %d must use `%ses' segment"),
4472 i.tm.name,
4473 mem_op + 1,
4474 register_prefix);
4475 return 0;
4476 }
4477 /* There's only ever one segment override allowed per instruction.
4478 This instruction possibly has a legal segment override on the
4479 second operand, so copy the segment to where non-string
4480 instructions store it, allowing common code. */
4481 i.seg[0] = i.seg[1];
4482 }
4483 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4484 {
4485 if (i.seg[1] != NULL && i.seg[1] != &es)
4486 {
4487 as_bad (_("`%s' operand %d must use `%ses' segment"),
4488 i.tm.name,
4489 mem_op + 2,
4490 register_prefix);
4491 return 0;
4492 }
4493 }
4494 return 1;
4495 }
4496
4497 static int
4498 process_suffix (void)
4499 {
4500 /* If matched instruction specifies an explicit instruction mnemonic
4501 suffix, use it. */
4502 if (i.tm.opcode_modifier.size16)
4503 i.suffix = WORD_MNEM_SUFFIX;
4504 else if (i.tm.opcode_modifier.size32)
4505 i.suffix = LONG_MNEM_SUFFIX;
4506 else if (i.tm.opcode_modifier.size64)
4507 i.suffix = QWORD_MNEM_SUFFIX;
4508 else if (i.reg_operands)
4509 {
4510 /* If there's no instruction mnemonic suffix we try to invent one
4511 based on register operands. */
4512 if (!i.suffix)
4513 {
4514 /* We take i.suffix from the last register operand specified,
4515 Destination register type is more significant than source
4516 register type. crc32 in SSE4.2 prefers source register
4517 type. */
4518 if (i.tm.base_opcode == 0xf20f38f1)
4519 {
4520 if (i.types[0].bitfield.reg16)
4521 i.suffix = WORD_MNEM_SUFFIX;
4522 else if (i.types[0].bitfield.reg32)
4523 i.suffix = LONG_MNEM_SUFFIX;
4524 else if (i.types[0].bitfield.reg64)
4525 i.suffix = QWORD_MNEM_SUFFIX;
4526 }
4527 else if (i.tm.base_opcode == 0xf20f38f0)
4528 {
4529 if (i.types[0].bitfield.reg8)
4530 i.suffix = BYTE_MNEM_SUFFIX;
4531 }
4532
4533 if (!i.suffix)
4534 {
4535 int op;
4536
4537 if (i.tm.base_opcode == 0xf20f38f1
4538 || i.tm.base_opcode == 0xf20f38f0)
4539 {
4540 /* We have to know the operand size for crc32. */
4541 as_bad (_("ambiguous memory operand size for `%s`"),
4542 i.tm.name);
4543 return 0;
4544 }
4545
4546 for (op = i.operands; --op >= 0;)
4547 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4548 {
4549 if (i.types[op].bitfield.reg8)
4550 {
4551 i.suffix = BYTE_MNEM_SUFFIX;
4552 break;
4553 }
4554 else if (i.types[op].bitfield.reg16)
4555 {
4556 i.suffix = WORD_MNEM_SUFFIX;
4557 break;
4558 }
4559 else if (i.types[op].bitfield.reg32)
4560 {
4561 i.suffix = LONG_MNEM_SUFFIX;
4562 break;
4563 }
4564 else if (i.types[op].bitfield.reg64)
4565 {
4566 i.suffix = QWORD_MNEM_SUFFIX;
4567 break;
4568 }
4569 }
4570 }
4571 }
4572 else if (i.suffix == BYTE_MNEM_SUFFIX)
4573 {
4574 if (intel_syntax
4575 && i.tm.opcode_modifier.ignoresize
4576 && i.tm.opcode_modifier.no_bsuf)
4577 i.suffix = 0;
4578 else if (!check_byte_reg ())
4579 return 0;
4580 }
4581 else if (i.suffix == LONG_MNEM_SUFFIX)
4582 {
4583 if (intel_syntax
4584 && i.tm.opcode_modifier.ignoresize
4585 && i.tm.opcode_modifier.no_lsuf)
4586 i.suffix = 0;
4587 else if (!check_long_reg ())
4588 return 0;
4589 }
4590 else if (i.suffix == QWORD_MNEM_SUFFIX)
4591 {
4592 if (intel_syntax
4593 && i.tm.opcode_modifier.ignoresize
4594 && i.tm.opcode_modifier.no_qsuf)
4595 i.suffix = 0;
4596 else if (!check_qword_reg ())
4597 return 0;
4598 }
4599 else if (i.suffix == WORD_MNEM_SUFFIX)
4600 {
4601 if (intel_syntax
4602 && i.tm.opcode_modifier.ignoresize
4603 && i.tm.opcode_modifier.no_wsuf)
4604 i.suffix = 0;
4605 else if (!check_word_reg ())
4606 return 0;
4607 }
4608 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4609 || i.suffix == YMMWORD_MNEM_SUFFIX)
4610 {
4611 /* Skip if the instruction has x/y suffix. match_template
4612 should check if it is a valid suffix. */
4613 }
4614 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4615 /* Do nothing if the instruction is going to ignore the prefix. */
4616 ;
4617 else
4618 abort ();
4619 }
4620 else if (i.tm.opcode_modifier.defaultsize
4621 && !i.suffix
4622 /* exclude fldenv/frstor/fsave/fstenv */
4623 && i.tm.opcode_modifier.no_ssuf)
4624 {
4625 i.suffix = stackop_size;
4626 }
4627 else if (intel_syntax
4628 && !i.suffix
4629 && (i.tm.operand_types[0].bitfield.jumpabsolute
4630 || i.tm.opcode_modifier.jumpbyte
4631 || i.tm.opcode_modifier.jumpintersegment
4632 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4633 && i.tm.extension_opcode <= 3)))
4634 {
4635 switch (flag_code)
4636 {
4637 case CODE_64BIT:
4638 if (!i.tm.opcode_modifier.no_qsuf)
4639 {
4640 i.suffix = QWORD_MNEM_SUFFIX;
4641 break;
4642 }
4643 case CODE_32BIT:
4644 if (!i.tm.opcode_modifier.no_lsuf)
4645 i.suffix = LONG_MNEM_SUFFIX;
4646 break;
4647 case CODE_16BIT:
4648 if (!i.tm.opcode_modifier.no_wsuf)
4649 i.suffix = WORD_MNEM_SUFFIX;
4650 break;
4651 }
4652 }
4653
4654 if (!i.suffix)
4655 {
4656 if (!intel_syntax)
4657 {
4658 if (i.tm.opcode_modifier.w)
4659 {
4660 as_bad (_("no instruction mnemonic suffix given and "
4661 "no register operands; can't size instruction"));
4662 return 0;
4663 }
4664 }
4665 else
4666 {
4667 unsigned int suffixes;
4668
4669 suffixes = !i.tm.opcode_modifier.no_bsuf;
4670 if (!i.tm.opcode_modifier.no_wsuf)
4671 suffixes |= 1 << 1;
4672 if (!i.tm.opcode_modifier.no_lsuf)
4673 suffixes |= 1 << 2;
4674 if (!i.tm.opcode_modifier.no_ldsuf)
4675 suffixes |= 1 << 3;
4676 if (!i.tm.opcode_modifier.no_ssuf)
4677 suffixes |= 1 << 4;
4678 if (!i.tm.opcode_modifier.no_qsuf)
4679 suffixes |= 1 << 5;
4680
4681 /* There are more than suffix matches. */
4682 if (i.tm.opcode_modifier.w
4683 || ((suffixes & (suffixes - 1))
4684 && !i.tm.opcode_modifier.defaultsize
4685 && !i.tm.opcode_modifier.ignoresize))
4686 {
4687 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4688 return 0;
4689 }
4690 }
4691 }
4692
4693 /* Change the opcode based on the operand size given by i.suffix;
4694 We don't need to change things for byte insns. */
4695
4696 if (i.suffix
4697 && i.suffix != BYTE_MNEM_SUFFIX
4698 && i.suffix != XMMWORD_MNEM_SUFFIX
4699 && i.suffix != YMMWORD_MNEM_SUFFIX)
4700 {
4701 /* It's not a byte, select word/dword operation. */
4702 if (i.tm.opcode_modifier.w)
4703 {
4704 if (i.tm.opcode_modifier.shortform)
4705 i.tm.base_opcode |= 8;
4706 else
4707 i.tm.base_opcode |= 1;
4708 }
4709
4710 /* Now select between word & dword operations via the operand
4711 size prefix, except for instructions that will ignore this
4712 prefix anyway. */
4713 if (i.tm.opcode_modifier.addrprefixop0)
4714 {
4715 /* The address size override prefix changes the size of the
4716 first operand. */
4717 if ((flag_code == CODE_32BIT
4718 && i.op->regs[0].reg_type.bitfield.reg16)
4719 || (flag_code != CODE_32BIT
4720 && i.op->regs[0].reg_type.bitfield.reg32))
4721 if (!add_prefix (ADDR_PREFIX_OPCODE))
4722 return 0;
4723 }
4724 else if (i.suffix != QWORD_MNEM_SUFFIX
4725 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4726 && !i.tm.opcode_modifier.ignoresize
4727 && !i.tm.opcode_modifier.floatmf
4728 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4729 || (flag_code == CODE_64BIT
4730 && i.tm.opcode_modifier.jumpbyte)))
4731 {
4732 unsigned int prefix = DATA_PREFIX_OPCODE;
4733
4734 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4735 prefix = ADDR_PREFIX_OPCODE;
4736
4737 if (!add_prefix (prefix))
4738 return 0;
4739 }
4740
4741 /* Set mode64 for an operand. */
4742 if (i.suffix == QWORD_MNEM_SUFFIX
4743 && flag_code == CODE_64BIT
4744 && !i.tm.opcode_modifier.norex64)
4745 {
4746 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4747 need rex64. cmpxchg8b is also a special case. */
4748 if (! (i.operands == 2
4749 && i.tm.base_opcode == 0x90
4750 && i.tm.extension_opcode == None
4751 && operand_type_equal (&i.types [0], &acc64)
4752 && operand_type_equal (&i.types [1], &acc64))
4753 && ! (i.operands == 1
4754 && i.tm.base_opcode == 0xfc7
4755 && i.tm.extension_opcode == 1
4756 && !operand_type_check (i.types [0], reg)
4757 && operand_type_check (i.types [0], anymem)))
4758 i.rex |= REX_W;
4759 }
4760
4761 /* Size floating point instruction. */
4762 if (i.suffix == LONG_MNEM_SUFFIX)
4763 if (i.tm.opcode_modifier.floatmf)
4764 i.tm.base_opcode ^= 4;
4765 }
4766
4767 return 1;
4768 }
4769
4770 static int
4771 check_byte_reg (void)
4772 {
4773 int op;
4774
4775 for (op = i.operands; --op >= 0;)
4776 {
4777 /* If this is an eight bit register, it's OK. If it's the 16 or
4778 32 bit version of an eight bit register, we will just use the
4779 low portion, and that's OK too. */
4780 if (i.types[op].bitfield.reg8)
4781 continue;
4782
4783 /* I/O port address operands are OK too. */
4784 if (i.tm.operand_types[op].bitfield.inoutportreg)
4785 continue;
4786
4787 /* crc32 doesn't generate this warning. */
4788 if (i.tm.base_opcode == 0xf20f38f0)
4789 continue;
4790
4791 if ((i.types[op].bitfield.reg16
4792 || i.types[op].bitfield.reg32
4793 || i.types[op].bitfield.reg64)
4794 && i.op[op].regs->reg_num < 4
4795 /* Prohibit these changes in 64bit mode, since the lowering
4796 would be more complicated. */
4797 && flag_code != CODE_64BIT)
4798 {
4799 #if REGISTER_WARNINGS
4800 if (!quiet_warnings)
4801 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4802 register_prefix,
4803 (i.op[op].regs + (i.types[op].bitfield.reg16
4804 ? REGNAM_AL - REGNAM_AX
4805 : REGNAM_AL - REGNAM_EAX))->reg_name,
4806 register_prefix,
4807 i.op[op].regs->reg_name,
4808 i.suffix);
4809 #endif
4810 continue;
4811 }
4812 /* Any other register is bad. */
4813 if (i.types[op].bitfield.reg16
4814 || i.types[op].bitfield.reg32
4815 || i.types[op].bitfield.reg64
4816 || i.types[op].bitfield.regmmx
4817 || i.types[op].bitfield.regxmm
4818 || i.types[op].bitfield.regymm
4819 || i.types[op].bitfield.sreg2
4820 || i.types[op].bitfield.sreg3
4821 || i.types[op].bitfield.control
4822 || i.types[op].bitfield.debug
4823 || i.types[op].bitfield.test
4824 || i.types[op].bitfield.floatreg
4825 || i.types[op].bitfield.floatacc)
4826 {
4827 as_bad (_("`%s%s' not allowed with `%s%c'"),
4828 register_prefix,
4829 i.op[op].regs->reg_name,
4830 i.tm.name,
4831 i.suffix);
4832 return 0;
4833 }
4834 }
4835 return 1;
4836 }
4837
4838 static int
4839 check_long_reg (void)
4840 {
4841 int op;
4842
4843 for (op = i.operands; --op >= 0;)
4844 /* Reject eight bit registers, except where the template requires
4845 them. (eg. movzb) */
4846 if (i.types[op].bitfield.reg8
4847 && (i.tm.operand_types[op].bitfield.reg16
4848 || i.tm.operand_types[op].bitfield.reg32
4849 || i.tm.operand_types[op].bitfield.acc))
4850 {
4851 as_bad (_("`%s%s' not allowed with `%s%c'"),
4852 register_prefix,
4853 i.op[op].regs->reg_name,
4854 i.tm.name,
4855 i.suffix);
4856 return 0;
4857 }
4858 /* Warn if the e prefix on a general reg is missing. */
4859 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4860 && i.types[op].bitfield.reg16
4861 && (i.tm.operand_types[op].bitfield.reg32
4862 || i.tm.operand_types[op].bitfield.acc))
4863 {
4864 /* Prohibit these changes in the 64bit mode, since the
4865 lowering is more complicated. */
4866 if (flag_code == CODE_64BIT)
4867 {
4868 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4869 register_prefix, i.op[op].regs->reg_name,
4870 i.suffix);
4871 return 0;
4872 }
4873 #if REGISTER_WARNINGS
4874 else
4875 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4876 register_prefix,
4877 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4878 register_prefix,
4879 i.op[op].regs->reg_name,
4880 i.suffix);
4881 #endif
4882 }
4883 /* Warn if the r prefix on a general reg is missing. */
4884 else if (i.types[op].bitfield.reg64
4885 && (i.tm.operand_types[op].bitfield.reg32
4886 || i.tm.operand_types[op].bitfield.acc))
4887 {
4888 if (intel_syntax
4889 && i.tm.opcode_modifier.toqword
4890 && !i.types[0].bitfield.regxmm)
4891 {
4892 /* Convert to QWORD. We want REX byte. */
4893 i.suffix = QWORD_MNEM_SUFFIX;
4894 }
4895 else
4896 {
4897 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4898 register_prefix, i.op[op].regs->reg_name,
4899 i.suffix);
4900 return 0;
4901 }
4902 }
4903 return 1;
4904 }
4905
4906 static int
4907 check_qword_reg (void)
4908 {
4909 int op;
4910
4911 for (op = i.operands; --op >= 0; )
4912 /* Reject eight bit registers, except where the template requires
4913 them. (eg. movzb) */
4914 if (i.types[op].bitfield.reg8
4915 && (i.tm.operand_types[op].bitfield.reg16
4916 || i.tm.operand_types[op].bitfield.reg32
4917 || i.tm.operand_types[op].bitfield.acc))
4918 {
4919 as_bad (_("`%s%s' not allowed with `%s%c'"),
4920 register_prefix,
4921 i.op[op].regs->reg_name,
4922 i.tm.name,
4923 i.suffix);
4924 return 0;
4925 }
4926 /* Warn if the e prefix on a general reg is missing. */
4927 else if ((i.types[op].bitfield.reg16
4928 || i.types[op].bitfield.reg32)
4929 && (i.tm.operand_types[op].bitfield.reg32
4930 || i.tm.operand_types[op].bitfield.acc))
4931 {
4932 /* Prohibit these changes in the 64bit mode, since the
4933 lowering is more complicated. */
4934 if (intel_syntax
4935 && i.tm.opcode_modifier.todword
4936 && !i.types[0].bitfield.regxmm)
4937 {
4938 /* Convert to DWORD. We don't want REX byte. */
4939 i.suffix = LONG_MNEM_SUFFIX;
4940 }
4941 else
4942 {
4943 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4944 register_prefix, i.op[op].regs->reg_name,
4945 i.suffix);
4946 return 0;
4947 }
4948 }
4949 return 1;
4950 }
4951
4952 static int
4953 check_word_reg (void)
4954 {
4955 int op;
4956 for (op = i.operands; --op >= 0;)
4957 /* Reject eight bit registers, except where the template requires
4958 them. (eg. movzb) */
4959 if (i.types[op].bitfield.reg8
4960 && (i.tm.operand_types[op].bitfield.reg16
4961 || i.tm.operand_types[op].bitfield.reg32
4962 || i.tm.operand_types[op].bitfield.acc))
4963 {
4964 as_bad (_("`%s%s' not allowed with `%s%c'"),
4965 register_prefix,
4966 i.op[op].regs->reg_name,
4967 i.tm.name,
4968 i.suffix);
4969 return 0;
4970 }
4971 /* Warn if the e prefix on a general reg is present. */
4972 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4973 && i.types[op].bitfield.reg32
4974 && (i.tm.operand_types[op].bitfield.reg16
4975 || i.tm.operand_types[op].bitfield.acc))
4976 {
4977 /* Prohibit these changes in the 64bit mode, since the
4978 lowering is more complicated. */
4979 if (flag_code == CODE_64BIT)
4980 {
4981 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4982 register_prefix, i.op[op].regs->reg_name,
4983 i.suffix);
4984 return 0;
4985 }
4986 else
4987 #if REGISTER_WARNINGS
4988 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4989 register_prefix,
4990 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4991 register_prefix,
4992 i.op[op].regs->reg_name,
4993 i.suffix);
4994 #endif
4995 }
4996 return 1;
4997 }
4998
4999 static int
5000 update_imm (unsigned int j)
5001 {
5002 i386_operand_type overlap = i.types[j];
5003 if ((overlap.bitfield.imm8
5004 || overlap.bitfield.imm8s
5005 || overlap.bitfield.imm16
5006 || overlap.bitfield.imm32
5007 || overlap.bitfield.imm32s
5008 || overlap.bitfield.imm64)
5009 && !operand_type_equal (&overlap, &imm8)
5010 && !operand_type_equal (&overlap, &imm8s)
5011 && !operand_type_equal (&overlap, &imm16)
5012 && !operand_type_equal (&overlap, &imm32)
5013 && !operand_type_equal (&overlap, &imm32s)
5014 && !operand_type_equal (&overlap, &imm64))
5015 {
5016 if (i.suffix)
5017 {
5018 i386_operand_type temp;
5019
5020 operand_type_set (&temp, 0);
5021 if (i.suffix == BYTE_MNEM_SUFFIX)
5022 {
5023 temp.bitfield.imm8 = overlap.bitfield.imm8;
5024 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5025 }
5026 else if (i.suffix == WORD_MNEM_SUFFIX)
5027 temp.bitfield.imm16 = overlap.bitfield.imm16;
5028 else if (i.suffix == QWORD_MNEM_SUFFIX)
5029 {
5030 temp.bitfield.imm64 = overlap.bitfield.imm64;
5031 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5032 }
5033 else
5034 temp.bitfield.imm32 = overlap.bitfield.imm32;
5035 overlap = temp;
5036 }
5037 else if (operand_type_equal (&overlap, &imm16_32_32s)
5038 || operand_type_equal (&overlap, &imm16_32)
5039 || operand_type_equal (&overlap, &imm16_32s))
5040 {
5041 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5042 overlap = imm16;
5043 else
5044 overlap = imm32s;
5045 }
5046 if (!operand_type_equal (&overlap, &imm8)
5047 && !operand_type_equal (&overlap, &imm8s)
5048 && !operand_type_equal (&overlap, &imm16)
5049 && !operand_type_equal (&overlap, &imm32)
5050 && !operand_type_equal (&overlap, &imm32s)
5051 && !operand_type_equal (&overlap, &imm64))
5052 {
5053 as_bad (_("no instruction mnemonic suffix given; "
5054 "can't determine immediate size"));
5055 return 0;
5056 }
5057 }
5058 i.types[j] = overlap;
5059
5060 return 1;
5061 }
5062
5063 static int
5064 finalize_imm (void)
5065 {
5066 unsigned int j, n;
5067
5068 /* Update the first 2 immediate operands. */
5069 n = i.operands > 2 ? 2 : i.operands;
5070 if (n)
5071 {
5072 for (j = 0; j < n; j++)
5073 if (update_imm (j) == 0)
5074 return 0;
5075
5076 /* The 3rd operand can't be immediate operand. */
5077 gas_assert (operand_type_check (i.types[2], imm) == 0);
5078 }
5079
5080 return 1;
5081 }
5082
5083 static int
5084 bad_implicit_operand (int xmm)
5085 {
5086 const char *ireg = xmm ? "xmm0" : "ymm0";
5087
5088 if (intel_syntax)
5089 as_bad (_("the last operand of `%s' must be `%s%s'"),
5090 i.tm.name, register_prefix, ireg);
5091 else
5092 as_bad (_("the first operand of `%s' must be `%s%s'"),
5093 i.tm.name, register_prefix, ireg);
5094 return 0;
5095 }
5096
5097 static int
5098 process_operands (void)
5099 {
5100 /* Default segment register this instruction will use for memory
5101 accesses. 0 means unknown. This is only for optimizing out
5102 unnecessary segment overrides. */
5103 const seg_entry *default_seg = 0;
5104
5105 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5106 {
5107 unsigned int dupl = i.operands;
5108 unsigned int dest = dupl - 1;
5109 unsigned int j;
5110
5111 /* The destination must be an xmm register. */
5112 gas_assert (i.reg_operands
5113 && MAX_OPERANDS > dupl
5114 && operand_type_equal (&i.types[dest], &regxmm));
5115
5116 if (i.tm.opcode_modifier.firstxmm0)
5117 {
5118 /* The first operand is implicit and must be xmm0. */
5119 gas_assert (operand_type_equal (&i.types[0], &regxmm));
5120 if (register_number (i.op[0].regs) != 0)
5121 return bad_implicit_operand (1);
5122
5123 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5124 {
5125 /* Keep xmm0 for instructions with VEX prefix and 3
5126 sources. */
5127 goto duplicate;
5128 }
5129 else
5130 {
5131 /* We remove the first xmm0 and keep the number of
5132 operands unchanged, which in fact duplicates the
5133 destination. */
5134 for (j = 1; j < i.operands; j++)
5135 {
5136 i.op[j - 1] = i.op[j];
5137 i.types[j - 1] = i.types[j];
5138 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5139 }
5140 }
5141 }
5142 else if (i.tm.opcode_modifier.implicit1stxmm0)
5143 {
5144 gas_assert ((MAX_OPERANDS - 1) > dupl
5145 && (i.tm.opcode_modifier.vexsources
5146 == VEX3SOURCES));
5147
5148 /* Add the implicit xmm0 for instructions with VEX prefix
5149 and 3 sources. */
5150 for (j = i.operands; j > 0; j--)
5151 {
5152 i.op[j] = i.op[j - 1];
5153 i.types[j] = i.types[j - 1];
5154 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5155 }
5156 i.op[0].regs
5157 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5158 i.types[0] = regxmm;
5159 i.tm.operand_types[0] = regxmm;
5160
5161 i.operands += 2;
5162 i.reg_operands += 2;
5163 i.tm.operands += 2;
5164
5165 dupl++;
5166 dest++;
5167 i.op[dupl] = i.op[dest];
5168 i.types[dupl] = i.types[dest];
5169 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5170 }
5171 else
5172 {
5173 duplicate:
5174 i.operands++;
5175 i.reg_operands++;
5176 i.tm.operands++;
5177
5178 i.op[dupl] = i.op[dest];
5179 i.types[dupl] = i.types[dest];
5180 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5181 }
5182
5183 if (i.tm.opcode_modifier.immext)
5184 process_immext ();
5185 }
5186 else if (i.tm.opcode_modifier.firstxmm0)
5187 {
5188 unsigned int j;
5189
5190 /* The first operand is implicit and must be xmm0/ymm0. */
5191 gas_assert (i.reg_operands
5192 && (operand_type_equal (&i.types[0], &regxmm)
5193 || operand_type_equal (&i.types[0], &regymm)));
5194 if (register_number (i.op[0].regs) != 0)
5195 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5196
5197 for (j = 1; j < i.operands; j++)
5198 {
5199 i.op[j - 1] = i.op[j];
5200 i.types[j - 1] = i.types[j];
5201
5202 /* We need to adjust fields in i.tm since they are used by
5203 build_modrm_byte. */
5204 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5205 }
5206
5207 i.operands--;
5208 i.reg_operands--;
5209 i.tm.operands--;
5210 }
5211 else if (i.tm.opcode_modifier.regkludge)
5212 {
5213 /* The imul $imm, %reg instruction is converted into
5214 imul $imm, %reg, %reg, and the clr %reg instruction
5215 is converted into xor %reg, %reg. */
5216
5217 unsigned int first_reg_op;
5218
5219 if (operand_type_check (i.types[0], reg))
5220 first_reg_op = 0;
5221 else
5222 first_reg_op = 1;
5223 /* Pretend we saw the extra register operand. */
5224 gas_assert (i.reg_operands == 1
5225 && i.op[first_reg_op + 1].regs == 0);
5226 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5227 i.types[first_reg_op + 1] = i.types[first_reg_op];
5228 i.operands++;
5229 i.reg_operands++;
5230 }
5231
5232 if (i.tm.opcode_modifier.shortform)
5233 {
5234 if (i.types[0].bitfield.sreg2
5235 || i.types[0].bitfield.sreg3)
5236 {
5237 if (i.tm.base_opcode == POP_SEG_SHORT
5238 && i.op[0].regs->reg_num == 1)
5239 {
5240 as_bad (_("you can't `pop %scs'"), register_prefix);
5241 return 0;
5242 }
5243 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5244 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5245 i.rex |= REX_B;
5246 }
5247 else
5248 {
5249 /* The register or float register operand is in operand
5250 0 or 1. */
5251 unsigned int op;
5252
5253 if (i.types[0].bitfield.floatreg
5254 || operand_type_check (i.types[0], reg))
5255 op = 0;
5256 else
5257 op = 1;
5258 /* Register goes in low 3 bits of opcode. */
5259 i.tm.base_opcode |= i.op[op].regs->reg_num;
5260 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5261 i.rex |= REX_B;
5262 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5263 {
5264 /* Warn about some common errors, but press on regardless.
5265 The first case can be generated by gcc (<= 2.8.1). */
5266 if (i.operands == 2)
5267 {
5268 /* Reversed arguments on faddp, fsubp, etc. */
5269 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5270 register_prefix, i.op[!intel_syntax].regs->reg_name,
5271 register_prefix, i.op[intel_syntax].regs->reg_name);
5272 }
5273 else
5274 {
5275 /* Extraneous `l' suffix on fp insn. */
5276 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5277 register_prefix, i.op[0].regs->reg_name);
5278 }
5279 }
5280 }
5281 }
5282 else if (i.tm.opcode_modifier.modrm)
5283 {
5284 /* The opcode is completed (modulo i.tm.extension_opcode which
5285 must be put into the modrm byte). Now, we make the modrm and
5286 index base bytes based on all the info we've collected. */
5287
5288 default_seg = build_modrm_byte ();
5289 }
5290 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5291 {
5292 default_seg = &ds;
5293 }
5294 else if (i.tm.opcode_modifier.isstring)
5295 {
5296 /* For the string instructions that allow a segment override
5297 on one of their operands, the default segment is ds. */
5298 default_seg = &ds;
5299 }
5300
5301 if (i.tm.base_opcode == 0x8d /* lea */
5302 && i.seg[0]
5303 && !quiet_warnings)
5304 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5305
5306 /* If a segment was explicitly specified, and the specified segment
5307 is not the default, use an opcode prefix to select it. If we
5308 never figured out what the default segment is, then default_seg
5309 will be zero at this point, and the specified segment prefix will
5310 always be used. */
5311 if ((i.seg[0]) && (i.seg[0] != default_seg))
5312 {
5313 if (!add_prefix (i.seg[0]->seg_prefix))
5314 return 0;
5315 }
5316 return 1;
5317 }
5318
5319 static const seg_entry *
5320 build_modrm_byte (void)
5321 {
5322 const seg_entry *default_seg = 0;
5323 unsigned int source, dest;
5324 int vex_3_sources;
5325
5326 /* The first operand of instructions with VEX prefix and 3 sources
5327 must be VEX_Imm4. */
5328 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5329 if (vex_3_sources)
5330 {
5331 unsigned int nds, reg_slot;
5332 expressionS *exp;
5333
5334 if (i.tm.opcode_modifier.veximmext
5335 && i.tm.opcode_modifier.immext)
5336 {
5337 dest = i.operands - 2;
5338 gas_assert (dest == 3);
5339 }
5340 else
5341 dest = i.operands - 1;
5342 nds = dest - 1;
5343
5344 /* There are 2 kinds of instructions:
5345 1. 5 operands: 4 register operands or 3 register operands
5346 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5347 VexW0 or VexW1. The destination must be either XMM or YMM
5348 register.
5349 2. 4 operands: 4 register operands or 3 register operands
5350 plus 1 memory operand, VexXDS, and VexImmExt */
5351 gas_assert ((i.reg_operands == 4
5352 || (i.reg_operands == 3 && i.mem_operands == 1))
5353 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5354 && (i.tm.opcode_modifier.veximmext
5355 || (i.imm_operands == 1
5356 && i.types[0].bitfield.vec_imm4
5357 && (i.tm.opcode_modifier.vexw == VEXW0
5358 || i.tm.opcode_modifier.vexw == VEXW1)
5359 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5360 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5361
5362 if (i.imm_operands == 0)
5363 {
5364 /* When there is no immediate operand, generate an 8bit
5365 immediate operand to encode the first operand. */
5366 exp = &im_expressions[i.imm_operands++];
5367 i.op[i.operands].imms = exp;
5368 i.types[i.operands] = imm8;
5369 i.operands++;
5370 /* If VexW1 is set, the first operand is the source and
5371 the second operand is encoded in the immediate operand. */
5372 if (i.tm.opcode_modifier.vexw == VEXW1)
5373 {
5374 source = 0;
5375 reg_slot = 1;
5376 }
5377 else
5378 {
5379 source = 1;
5380 reg_slot = 0;
5381 }
5382
5383 /* FMA swaps REG and NDS. */
5384 if (i.tm.cpu_flags.bitfield.cpufma)
5385 {
5386 unsigned int tmp;
5387 tmp = reg_slot;
5388 reg_slot = nds;
5389 nds = tmp;
5390 }
5391
5392 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5393 &regxmm)
5394 || operand_type_equal (&i.tm.operand_types[reg_slot],
5395 &regymm));
5396 exp->X_op = O_constant;
5397 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5398 }
5399 else
5400 {
5401 unsigned int imm_slot;
5402
5403 if (i.tm.opcode_modifier.vexw == VEXW0)
5404 {
5405 /* If VexW0 is set, the third operand is the source and
5406 the second operand is encoded in the immediate
5407 operand. */
5408 source = 2;
5409 reg_slot = 1;
5410 }
5411 else
5412 {
5413 /* VexW1 is set, the second operand is the source and
5414 the third operand is encoded in the immediate
5415 operand. */
5416 source = 1;
5417 reg_slot = 2;
5418 }
5419
5420 if (i.tm.opcode_modifier.immext)
5421 {
5422 /* When ImmExt is set, the immdiate byte is the last
5423 operand. */
5424 imm_slot = i.operands - 1;
5425 source--;
5426 reg_slot--;
5427 }
5428 else
5429 {
5430 imm_slot = 0;
5431
5432 /* Turn on Imm8 so that output_imm will generate it. */
5433 i.types[imm_slot].bitfield.imm8 = 1;
5434 }
5435
5436 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5437 &regxmm)
5438 || operand_type_equal (&i.tm.operand_types[reg_slot],
5439 &regymm));
5440 i.op[imm_slot].imms->X_add_number
5441 |= register_number (i.op[reg_slot].regs) << 4;
5442 }
5443
5444 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5445 || operand_type_equal (&i.tm.operand_types[nds],
5446 &regymm));
5447 i.vex.register_specifier = i.op[nds].regs;
5448 }
5449 else
5450 source = dest = 0;
5451
5452 /* i.reg_operands MUST be the number of real register operands;
5453 implicit registers do not count. If there are 3 register
5454 operands, it must be a instruction with VexNDS. For a
5455 instruction with VexNDD, the destination register is encoded
5456 in VEX prefix. If there are 4 register operands, it must be
5457 a instruction with VEX prefix and 3 sources. */
5458 if (i.mem_operands == 0
5459 && ((i.reg_operands == 2
5460 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5461 || (i.reg_operands == 3
5462 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5463 || (i.reg_operands == 4 && vex_3_sources)))
5464 {
5465 switch (i.operands)
5466 {
5467 case 2:
5468 source = 0;
5469 break;
5470 case 3:
5471 /* When there are 3 operands, one of them may be immediate,
5472 which may be the first or the last operand. Otherwise,
5473 the first operand must be shift count register (cl) or it
5474 is an instruction with VexNDS. */
5475 gas_assert (i.imm_operands == 1
5476 || (i.imm_operands == 0
5477 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5478 || i.types[0].bitfield.shiftcount)));
5479 if (operand_type_check (i.types[0], imm)
5480 || i.types[0].bitfield.shiftcount)
5481 source = 1;
5482 else
5483 source = 0;
5484 break;
5485 case 4:
5486 /* When there are 4 operands, the first two must be 8bit
5487 immediate operands. The source operand will be the 3rd
5488 one.
5489
5490 For instructions with VexNDS, if the first operand
5491 an imm8, the source operand is the 2nd one. If the last
5492 operand is imm8, the source operand is the first one. */
5493 gas_assert ((i.imm_operands == 2
5494 && i.types[0].bitfield.imm8
5495 && i.types[1].bitfield.imm8)
5496 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5497 && i.imm_operands == 1
5498 && (i.types[0].bitfield.imm8
5499 || i.types[i.operands - 1].bitfield.imm8)));
5500 if (i.imm_operands == 2)
5501 source = 2;
5502 else
5503 {
5504 if (i.types[0].bitfield.imm8)
5505 source = 1;
5506 else
5507 source = 0;
5508 }
5509 break;
5510 case 5:
5511 break;
5512 default:
5513 abort ();
5514 }
5515
5516 if (!vex_3_sources)
5517 {
5518 dest = source + 1;
5519
5520 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5521 {
5522 /* For instructions with VexNDS, the register-only
5523 source operand must be 32/64bit integer, XMM or
5524 YMM register. It is encoded in VEX prefix. We
5525 need to clear RegMem bit before calling
5526 operand_type_equal. */
5527
5528 i386_operand_type op;
5529 unsigned int vvvv;
5530
5531 /* Check register-only source operand when two source
5532 operands are swapped. */
5533 if (!i.tm.operand_types[source].bitfield.baseindex
5534 && i.tm.operand_types[dest].bitfield.baseindex)
5535 {
5536 vvvv = source;
5537 source = dest;
5538 }
5539 else
5540 vvvv = dest;
5541
5542 op = i.tm.operand_types[vvvv];
5543 op.bitfield.regmem = 0;
5544 if ((dest + 1) >= i.operands
5545 || (op.bitfield.reg32 != 1
5546 && !op.bitfield.reg64 != 1
5547 && !operand_type_equal (&op, &regxmm)
5548 && !operand_type_equal (&op, &regymm)))
5549 abort ();
5550 i.vex.register_specifier = i.op[vvvv].regs;
5551 dest++;
5552 }
5553 }
5554
5555 i.rm.mode = 3;
5556 /* One of the register operands will be encoded in the i.tm.reg
5557 field, the other in the combined i.tm.mode and i.tm.regmem
5558 fields. If no form of this instruction supports a memory
5559 destination operand, then we assume the source operand may
5560 sometimes be a memory operand and so we need to store the
5561 destination in the i.rm.reg field. */
5562 if (!i.tm.operand_types[dest].bitfield.regmem
5563 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5564 {
5565 i.rm.reg = i.op[dest].regs->reg_num;
5566 i.rm.regmem = i.op[source].regs->reg_num;
5567 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5568 i.rex |= REX_R;
5569 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5570 i.rex |= REX_B;
5571 }
5572 else
5573 {
5574 i.rm.reg = i.op[source].regs->reg_num;
5575 i.rm.regmem = i.op[dest].regs->reg_num;
5576 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5577 i.rex |= REX_B;
5578 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5579 i.rex |= REX_R;
5580 }
5581 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5582 {
5583 if (!i.types[0].bitfield.control
5584 && !i.types[1].bitfield.control)
5585 abort ();
5586 i.rex &= ~(REX_R | REX_B);
5587 add_prefix (LOCK_PREFIX_OPCODE);
5588 }
5589 }
5590 else
5591 { /* If it's not 2 reg operands... */
5592 unsigned int mem;
5593
5594 if (i.mem_operands)
5595 {
5596 unsigned int fake_zero_displacement = 0;
5597 unsigned int op;
5598
5599 for (op = 0; op < i.operands; op++)
5600 if (operand_type_check (i.types[op], anymem))
5601 break;
5602 gas_assert (op < i.operands);
5603
5604 if (i.tm.opcode_modifier.vecsib)
5605 {
5606 if (i.index_reg->reg_num == RegEiz
5607 || i.index_reg->reg_num == RegRiz)
5608 abort ();
5609
5610 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5611 if (!i.base_reg)
5612 {
5613 i.sib.base = NO_BASE_REGISTER;
5614 i.sib.scale = i.log2_scale_factor;
5615 i.types[op].bitfield.disp8 = 0;
5616 i.types[op].bitfield.disp16 = 0;
5617 i.types[op].bitfield.disp64 = 0;
5618 if (flag_code != CODE_64BIT)
5619 {
5620 /* Must be 32 bit */
5621 i.types[op].bitfield.disp32 = 1;
5622 i.types[op].bitfield.disp32s = 0;
5623 }
5624 else
5625 {
5626 i.types[op].bitfield.disp32 = 0;
5627 i.types[op].bitfield.disp32s = 1;
5628 }
5629 }
5630 i.sib.index = i.index_reg->reg_num;
5631 if ((i.index_reg->reg_flags & RegRex) != 0)
5632 i.rex |= REX_X;
5633 }
5634
5635 default_seg = &ds;
5636
5637 if (i.base_reg == 0)
5638 {
5639 i.rm.mode = 0;
5640 if (!i.disp_operands)
5641 {
5642 fake_zero_displacement = 1;
5643 /* Instructions with VSIB byte need 32bit displacement
5644 if there is no base register. */
5645 if (i.tm.opcode_modifier.vecsib)
5646 i.types[op].bitfield.disp32 = 1;
5647 }
5648 if (i.index_reg == 0)
5649 {
5650 gas_assert (!i.tm.opcode_modifier.vecsib);
5651 /* Operand is just <disp> */
5652 if (flag_code == CODE_64BIT)
5653 {
5654 /* 64bit mode overwrites the 32bit absolute
5655 addressing by RIP relative addressing and
5656 absolute addressing is encoded by one of the
5657 redundant SIB forms. */
5658 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5659 i.sib.base = NO_BASE_REGISTER;
5660 i.sib.index = NO_INDEX_REGISTER;
5661 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5662 ? disp32s : disp32);
5663 }
5664 else if ((flag_code == CODE_16BIT)
5665 ^ (i.prefix[ADDR_PREFIX] != 0))
5666 {
5667 i.rm.regmem = NO_BASE_REGISTER_16;
5668 i.types[op] = disp16;
5669 }
5670 else
5671 {
5672 i.rm.regmem = NO_BASE_REGISTER;
5673 i.types[op] = disp32;
5674 }
5675 }
5676 else if (!i.tm.opcode_modifier.vecsib)
5677 {
5678 /* !i.base_reg && i.index_reg */
5679 if (i.index_reg->reg_num == RegEiz
5680 || i.index_reg->reg_num == RegRiz)
5681 i.sib.index = NO_INDEX_REGISTER;
5682 else
5683 i.sib.index = i.index_reg->reg_num;
5684 i.sib.base = NO_BASE_REGISTER;
5685 i.sib.scale = i.log2_scale_factor;
5686 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5687 i.types[op].bitfield.disp8 = 0;
5688 i.types[op].bitfield.disp16 = 0;
5689 i.types[op].bitfield.disp64 = 0;
5690 if (flag_code != CODE_64BIT)
5691 {
5692 /* Must be 32 bit */
5693 i.types[op].bitfield.disp32 = 1;
5694 i.types[op].bitfield.disp32s = 0;
5695 }
5696 else
5697 {
5698 i.types[op].bitfield.disp32 = 0;
5699 i.types[op].bitfield.disp32s = 1;
5700 }
5701 if ((i.index_reg->reg_flags & RegRex) != 0)
5702 i.rex |= REX_X;
5703 }
5704 }
5705 /* RIP addressing for 64bit mode. */
5706 else if (i.base_reg->reg_num == RegRip ||
5707 i.base_reg->reg_num == RegEip)
5708 {
5709 gas_assert (!i.tm.opcode_modifier.vecsib);
5710 i.rm.regmem = NO_BASE_REGISTER;
5711 i.types[op].bitfield.disp8 = 0;
5712 i.types[op].bitfield.disp16 = 0;
5713 i.types[op].bitfield.disp32 = 0;
5714 i.types[op].bitfield.disp32s = 1;
5715 i.types[op].bitfield.disp64 = 0;
5716 i.flags[op] |= Operand_PCrel;
5717 if (! i.disp_operands)
5718 fake_zero_displacement = 1;
5719 }
5720 else if (i.base_reg->reg_type.bitfield.reg16)
5721 {
5722 gas_assert (!i.tm.opcode_modifier.vecsib);
5723 switch (i.base_reg->reg_num)
5724 {
5725 case 3: /* (%bx) */
5726 if (i.index_reg == 0)
5727 i.rm.regmem = 7;
5728 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5729 i.rm.regmem = i.index_reg->reg_num - 6;
5730 break;
5731 case 5: /* (%bp) */
5732 default_seg = &ss;
5733 if (i.index_reg == 0)
5734 {
5735 i.rm.regmem = 6;
5736 if (operand_type_check (i.types[op], disp) == 0)
5737 {
5738 /* fake (%bp) into 0(%bp) */
5739 i.types[op].bitfield.disp8 = 1;
5740 fake_zero_displacement = 1;
5741 }
5742 }
5743 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5744 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5745 break;
5746 default: /* (%si) -> 4 or (%di) -> 5 */
5747 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5748 }
5749 i.rm.mode = mode_from_disp_size (i.types[op]);
5750 }
5751 else /* i.base_reg and 32/64 bit mode */
5752 {
5753 if (flag_code == CODE_64BIT
5754 && operand_type_check (i.types[op], disp))
5755 {
5756 i386_operand_type temp;
5757 operand_type_set (&temp, 0);
5758 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5759 i.types[op] = temp;
5760 if (i.prefix[ADDR_PREFIX] == 0)
5761 i.types[op].bitfield.disp32s = 1;
5762 else
5763 i.types[op].bitfield.disp32 = 1;
5764 }
5765
5766 if (!i.tm.opcode_modifier.vecsib)
5767 i.rm.regmem = i.base_reg->reg_num;
5768 if ((i.base_reg->reg_flags & RegRex) != 0)
5769 i.rex |= REX_B;
5770 i.sib.base = i.base_reg->reg_num;
5771 /* x86-64 ignores REX prefix bit here to avoid decoder
5772 complications. */
5773 if (!(i.base_reg->reg_flags & RegRex)
5774 && (i.base_reg->reg_num == EBP_REG_NUM
5775 || i.base_reg->reg_num == ESP_REG_NUM))
5776 default_seg = &ss;
5777 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5778 {
5779 fake_zero_displacement = 1;
5780 i.types[op].bitfield.disp8 = 1;
5781 }
5782 i.sib.scale = i.log2_scale_factor;
5783 if (i.index_reg == 0)
5784 {
5785 gas_assert (!i.tm.opcode_modifier.vecsib);
5786 /* <disp>(%esp) becomes two byte modrm with no index
5787 register. We've already stored the code for esp
5788 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5789 Any base register besides %esp will not use the
5790 extra modrm byte. */
5791 i.sib.index = NO_INDEX_REGISTER;
5792 }
5793 else if (!i.tm.opcode_modifier.vecsib)
5794 {
5795 if (i.index_reg->reg_num == RegEiz
5796 || i.index_reg->reg_num == RegRiz)
5797 i.sib.index = NO_INDEX_REGISTER;
5798 else
5799 i.sib.index = i.index_reg->reg_num;
5800 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5801 if ((i.index_reg->reg_flags & RegRex) != 0)
5802 i.rex |= REX_X;
5803 }
5804
5805 if (i.disp_operands
5806 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5807 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5808 i.rm.mode = 0;
5809 else
5810 {
5811 if (!fake_zero_displacement
5812 && !i.disp_operands
5813 && i.disp_encoding)
5814 {
5815 fake_zero_displacement = 1;
5816 if (i.disp_encoding == disp_encoding_8bit)
5817 i.types[op].bitfield.disp8 = 1;
5818 else
5819 i.types[op].bitfield.disp32 = 1;
5820 }
5821 i.rm.mode = mode_from_disp_size (i.types[op]);
5822 }
5823 }
5824
5825 if (fake_zero_displacement)
5826 {
5827 /* Fakes a zero displacement assuming that i.types[op]
5828 holds the correct displacement size. */
5829 expressionS *exp;
5830
5831 gas_assert (i.op[op].disps == 0);
5832 exp = &disp_expressions[i.disp_operands++];
5833 i.op[op].disps = exp;
5834 exp->X_op = O_constant;
5835 exp->X_add_number = 0;
5836 exp->X_add_symbol = (symbolS *) 0;
5837 exp->X_op_symbol = (symbolS *) 0;
5838 }
5839
5840 mem = op;
5841 }
5842 else
5843 mem = ~0;
5844
5845 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5846 {
5847 if (operand_type_check (i.types[0], imm))
5848 i.vex.register_specifier = NULL;
5849 else
5850 {
5851 /* VEX.vvvv encodes one of the sources when the first
5852 operand is not an immediate. */
5853 if (i.tm.opcode_modifier.vexw == VEXW0)
5854 i.vex.register_specifier = i.op[0].regs;
5855 else
5856 i.vex.register_specifier = i.op[1].regs;
5857 }
5858
5859 /* Destination is a XMM register encoded in the ModRM.reg
5860 and VEX.R bit. */
5861 i.rm.reg = i.op[2].regs->reg_num;
5862 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5863 i.rex |= REX_R;
5864
5865 /* ModRM.rm and VEX.B encodes the other source. */
5866 if (!i.mem_operands)
5867 {
5868 i.rm.mode = 3;
5869
5870 if (i.tm.opcode_modifier.vexw == VEXW0)
5871 i.rm.regmem = i.op[1].regs->reg_num;
5872 else
5873 i.rm.regmem = i.op[0].regs->reg_num;
5874
5875 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5876 i.rex |= REX_B;
5877 }
5878 }
5879 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5880 {
5881 i.vex.register_specifier = i.op[2].regs;
5882 if (!i.mem_operands)
5883 {
5884 i.rm.mode = 3;
5885 i.rm.regmem = i.op[1].regs->reg_num;
5886 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5887 i.rex |= REX_B;
5888 }
5889 }
5890 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5891 (if any) based on i.tm.extension_opcode. Again, we must be
5892 careful to make sure that segment/control/debug/test/MMX
5893 registers are coded into the i.rm.reg field. */
5894 else if (i.reg_operands)
5895 {
5896 unsigned int op;
5897 unsigned int vex_reg = ~0;
5898
5899 for (op = 0; op < i.operands; op++)
5900 if (i.types[op].bitfield.reg8
5901 || i.types[op].bitfield.reg16
5902 || i.types[op].bitfield.reg32
5903 || i.types[op].bitfield.reg64
5904 || i.types[op].bitfield.regmmx
5905 || i.types[op].bitfield.regxmm
5906 || i.types[op].bitfield.regymm
5907 || i.types[op].bitfield.sreg2
5908 || i.types[op].bitfield.sreg3
5909 || i.types[op].bitfield.control
5910 || i.types[op].bitfield.debug
5911 || i.types[op].bitfield.test)
5912 break;
5913
5914 if (vex_3_sources)
5915 op = dest;
5916 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5917 {
5918 /* For instructions with VexNDS, the register-only
5919 source operand is encoded in VEX prefix. */
5920 gas_assert (mem != (unsigned int) ~0);
5921
5922 if (op > mem)
5923 {
5924 vex_reg = op++;
5925 gas_assert (op < i.operands);
5926 }
5927 else
5928 {
5929 /* Check register-only source operand when two source
5930 operands are swapped. */
5931 if (!i.tm.operand_types[op].bitfield.baseindex
5932 && i.tm.operand_types[op + 1].bitfield.baseindex)
5933 {
5934 vex_reg = op;
5935 op += 2;
5936 gas_assert (mem == (vex_reg + 1)
5937 && op < i.operands);
5938 }
5939 else
5940 {
5941 vex_reg = op + 1;
5942 gas_assert (vex_reg < i.operands);
5943 }
5944 }
5945 }
5946 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5947 {
5948 /* For instructions with VexNDD, the register destination
5949 is encoded in VEX prefix. */
5950 if (i.mem_operands == 0)
5951 {
5952 /* There is no memory operand. */
5953 gas_assert ((op + 2) == i.operands);
5954 vex_reg = op + 1;
5955 }
5956 else
5957 {
5958 /* There are only 2 operands. */
5959 gas_assert (op < 2 && i.operands == 2);
5960 vex_reg = 1;
5961 }
5962 }
5963 else
5964 gas_assert (op < i.operands);
5965
5966 if (vex_reg != (unsigned int) ~0)
5967 {
5968 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5969
5970 if (type->bitfield.reg32 != 1
5971 && type->bitfield.reg64 != 1
5972 && !operand_type_equal (type, &regxmm)
5973 && !operand_type_equal (type, &regymm))
5974 abort ();
5975
5976 i.vex.register_specifier = i.op[vex_reg].regs;
5977 }
5978
5979 /* Don't set OP operand twice. */
5980 if (vex_reg != op)
5981 {
5982 /* If there is an extension opcode to put here, the
5983 register number must be put into the regmem field. */
5984 if (i.tm.extension_opcode != None)
5985 {
5986 i.rm.regmem = i.op[op].regs->reg_num;
5987 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5988 i.rex |= REX_B;
5989 }
5990 else
5991 {
5992 i.rm.reg = i.op[op].regs->reg_num;
5993 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5994 i.rex |= REX_R;
5995 }
5996 }
5997
5998 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5999 must set it to 3 to indicate this is a register operand
6000 in the regmem field. */
6001 if (!i.mem_operands)
6002 i.rm.mode = 3;
6003 }
6004
6005 /* Fill in i.rm.reg field with extension opcode (if any). */
6006 if (i.tm.extension_opcode != None)
6007 i.rm.reg = i.tm.extension_opcode;
6008 }
6009 return default_seg;
6010 }
6011
6012 static void
6013 output_branch (void)
6014 {
6015 char *p;
6016 int size;
6017 int code16;
6018 int prefix;
6019 relax_substateT subtype;
6020 symbolS *sym;
6021 offsetT off;
6022
6023 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6024 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6025
6026 prefix = 0;
6027 if (i.prefix[DATA_PREFIX] != 0)
6028 {
6029 prefix = 1;
6030 i.prefixes -= 1;
6031 code16 ^= CODE16;
6032 }
6033 /* Pentium4 branch hints. */
6034 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6035 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6036 {
6037 prefix++;
6038 i.prefixes--;
6039 }
6040 if (i.prefix[REX_PREFIX] != 0)
6041 {
6042 prefix++;
6043 i.prefixes--;
6044 }
6045
6046 if (i.prefixes != 0 && !intel_syntax)
6047 as_warn (_("skipping prefixes on this instruction"));
6048
6049 /* It's always a symbol; End frag & setup for relax.
6050 Make sure there is enough room in this frag for the largest
6051 instruction we may generate in md_convert_frag. This is 2
6052 bytes for the opcode and room for the prefix and largest
6053 displacement. */
6054 frag_grow (prefix + 2 + 4);
6055 /* Prefix and 1 opcode byte go in fr_fix. */
6056 p = frag_more (prefix + 1);
6057 if (i.prefix[DATA_PREFIX] != 0)
6058 *p++ = DATA_PREFIX_OPCODE;
6059 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6060 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6061 *p++ = i.prefix[SEG_PREFIX];
6062 if (i.prefix[REX_PREFIX] != 0)
6063 *p++ = i.prefix[REX_PREFIX];
6064 *p = i.tm.base_opcode;
6065
6066 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6067 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6068 else if (cpu_arch_flags.bitfield.cpui386)
6069 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6070 else
6071 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6072 subtype |= code16;
6073
6074 sym = i.op[0].disps->X_add_symbol;
6075 off = i.op[0].disps->X_add_number;
6076
6077 if (i.op[0].disps->X_op != O_constant
6078 && i.op[0].disps->X_op != O_symbol)
6079 {
6080 /* Handle complex expressions. */
6081 sym = make_expr_symbol (i.op[0].disps);
6082 off = 0;
6083 }
6084
6085 /* 1 possible extra opcode + 4 byte displacement go in var part.
6086 Pass reloc in fr_var. */
6087 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6088 }
6089
6090 static void
6091 output_jump (void)
6092 {
6093 char *p;
6094 int size;
6095 fixS *fixP;
6096
6097 if (i.tm.opcode_modifier.jumpbyte)
6098 {
6099 /* This is a loop or jecxz type instruction. */
6100 size = 1;
6101 if (i.prefix[ADDR_PREFIX] != 0)
6102 {
6103 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6104 i.prefixes -= 1;
6105 }
6106 /* Pentium4 branch hints. */
6107 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6108 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6109 {
6110 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6111 i.prefixes--;
6112 }
6113 }
6114 else
6115 {
6116 int code16;
6117
6118 code16 = 0;
6119 if (flag_code == CODE_16BIT)
6120 code16 = CODE16;
6121
6122 if (i.prefix[DATA_PREFIX] != 0)
6123 {
6124 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6125 i.prefixes -= 1;
6126 code16 ^= CODE16;
6127 }
6128
6129 size = 4;
6130 if (code16)
6131 size = 2;
6132 }
6133
6134 if (i.prefix[REX_PREFIX] != 0)
6135 {
6136 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6137 i.prefixes -= 1;
6138 }
6139
6140 if (i.prefixes != 0 && !intel_syntax)
6141 as_warn (_("skipping prefixes on this instruction"));
6142
6143 p = frag_more (i.tm.opcode_length + size);
6144 switch (i.tm.opcode_length)
6145 {
6146 case 2:
6147 *p++ = i.tm.base_opcode >> 8;
6148 case 1:
6149 *p++ = i.tm.base_opcode;
6150 break;
6151 default:
6152 abort ();
6153 }
6154
6155 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6156 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6157
6158 /* All jumps handled here are signed, but don't use a signed limit
6159 check for 32 and 16 bit jumps as we want to allow wrap around at
6160 4G and 64k respectively. */
6161 if (size == 1)
6162 fixP->fx_signed = 1;
6163 }
6164
6165 static void
6166 output_interseg_jump (void)
6167 {
6168 char *p;
6169 int size;
6170 int prefix;
6171 int code16;
6172
6173 code16 = 0;
6174 if (flag_code == CODE_16BIT)
6175 code16 = CODE16;
6176
6177 prefix = 0;
6178 if (i.prefix[DATA_PREFIX] != 0)
6179 {
6180 prefix = 1;
6181 i.prefixes -= 1;
6182 code16 ^= CODE16;
6183 }
6184 if (i.prefix[REX_PREFIX] != 0)
6185 {
6186 prefix++;
6187 i.prefixes -= 1;
6188 }
6189
6190 size = 4;
6191 if (code16)
6192 size = 2;
6193
6194 if (i.prefixes != 0 && !intel_syntax)
6195 as_warn (_("skipping prefixes on this instruction"));
6196
6197 /* 1 opcode; 2 segment; offset */
6198 p = frag_more (prefix + 1 + 2 + size);
6199
6200 if (i.prefix[DATA_PREFIX] != 0)
6201 *p++ = DATA_PREFIX_OPCODE;
6202
6203 if (i.prefix[REX_PREFIX] != 0)
6204 *p++ = i.prefix[REX_PREFIX];
6205
6206 *p++ = i.tm.base_opcode;
6207 if (i.op[1].imms->X_op == O_constant)
6208 {
6209 offsetT n = i.op[1].imms->X_add_number;
6210
6211 if (size == 2
6212 && !fits_in_unsigned_word (n)
6213 && !fits_in_signed_word (n))
6214 {
6215 as_bad (_("16-bit jump out of range"));
6216 return;
6217 }
6218 md_number_to_chars (p, n, size);
6219 }
6220 else
6221 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6222 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6223 if (i.op[0].imms->X_op != O_constant)
6224 as_bad (_("can't handle non absolute segment in `%s'"),
6225 i.tm.name);
6226 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6227 }
6228
6229 static void
6230 output_insn (void)
6231 {
6232 fragS *insn_start_frag;
6233 offsetT insn_start_off;
6234
6235 /* Tie dwarf2 debug info to the address at the start of the insn.
6236 We can't do this after the insn has been output as the current
6237 frag may have been closed off. eg. by frag_var. */
6238 dwarf2_emit_insn (0);
6239
6240 insn_start_frag = frag_now;
6241 insn_start_off = frag_now_fix ();
6242
6243 /* Output jumps. */
6244 if (i.tm.opcode_modifier.jump)
6245 output_branch ();
6246 else if (i.tm.opcode_modifier.jumpbyte
6247 || i.tm.opcode_modifier.jumpdword)
6248 output_jump ();
6249 else if (i.tm.opcode_modifier.jumpintersegment)
6250 output_interseg_jump ();
6251 else
6252 {
6253 /* Output normal instructions here. */
6254 char *p;
6255 unsigned char *q;
6256 unsigned int j;
6257 unsigned int prefix;
6258
6259 /* Since the VEX prefix contains the implicit prefix, we don't
6260 need the explicit prefix. */
6261 if (!i.tm.opcode_modifier.vex)
6262 {
6263 switch (i.tm.opcode_length)
6264 {
6265 case 3:
6266 if (i.tm.base_opcode & 0xff000000)
6267 {
6268 prefix = (i.tm.base_opcode >> 24) & 0xff;
6269 goto check_prefix;
6270 }
6271 break;
6272 case 2:
6273 if ((i.tm.base_opcode & 0xff0000) != 0)
6274 {
6275 prefix = (i.tm.base_opcode >> 16) & 0xff;
6276 if (i.tm.cpu_flags.bitfield.cpupadlock)
6277 {
6278 check_prefix:
6279 if (prefix != REPE_PREFIX_OPCODE
6280 || (i.prefix[REP_PREFIX]
6281 != REPE_PREFIX_OPCODE))
6282 add_prefix (prefix);
6283 }
6284 else
6285 add_prefix (prefix);
6286 }
6287 break;
6288 case 1:
6289 break;
6290 default:
6291 abort ();
6292 }
6293
6294 /* The prefix bytes. */
6295 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6296 if (*q)
6297 FRAG_APPEND_1_CHAR (*q);
6298 }
6299 else
6300 {
6301 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6302 if (*q)
6303 switch (j)
6304 {
6305 case REX_PREFIX:
6306 /* REX byte is encoded in VEX prefix. */
6307 break;
6308 case SEG_PREFIX:
6309 case ADDR_PREFIX:
6310 FRAG_APPEND_1_CHAR (*q);
6311 break;
6312 default:
6313 /* There should be no other prefixes for instructions
6314 with VEX prefix. */
6315 abort ();
6316 }
6317
6318 /* Now the VEX prefix. */
6319 p = frag_more (i.vex.length);
6320 for (j = 0; j < i.vex.length; j++)
6321 p[j] = i.vex.bytes[j];
6322 }
6323
6324 /* Now the opcode; be careful about word order here! */
6325 if (i.tm.opcode_length == 1)
6326 {
6327 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6328 }
6329 else
6330 {
6331 switch (i.tm.opcode_length)
6332 {
6333 case 3:
6334 p = frag_more (3);
6335 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6336 break;
6337 case 2:
6338 p = frag_more (2);
6339 break;
6340 default:
6341 abort ();
6342 break;
6343 }
6344
6345 /* Put out high byte first: can't use md_number_to_chars! */
6346 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6347 *p = i.tm.base_opcode & 0xff;
6348 }
6349
6350 /* Now the modrm byte and sib byte (if present). */
6351 if (i.tm.opcode_modifier.modrm)
6352 {
6353 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6354 | i.rm.reg << 3
6355 | i.rm.mode << 6));
6356 /* If i.rm.regmem == ESP (4)
6357 && i.rm.mode != (Register mode)
6358 && not 16 bit
6359 ==> need second modrm byte. */
6360 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6361 && i.rm.mode != 3
6362 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6363 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6364 | i.sib.index << 3
6365 | i.sib.scale << 6));
6366 }
6367
6368 if (i.disp_operands)
6369 output_disp (insn_start_frag, insn_start_off);
6370
6371 if (i.imm_operands)
6372 output_imm (insn_start_frag, insn_start_off);
6373 }
6374
6375 #ifdef DEBUG386
6376 if (flag_debug)
6377 {
6378 pi ("" /*line*/, &i);
6379 }
6380 #endif /* DEBUG386 */
6381 }
6382
6383 /* Return the size of the displacement operand N. */
6384
6385 static int
6386 disp_size (unsigned int n)
6387 {
6388 int size = 4;
6389 if (i.types[n].bitfield.disp64)
6390 size = 8;
6391 else if (i.types[n].bitfield.disp8)
6392 size = 1;
6393 else if (i.types[n].bitfield.disp16)
6394 size = 2;
6395 return size;
6396 }
6397
6398 /* Return the size of the immediate operand N. */
6399
6400 static int
6401 imm_size (unsigned int n)
6402 {
6403 int size = 4;
6404 if (i.types[n].bitfield.imm64)
6405 size = 8;
6406 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6407 size = 1;
6408 else if (i.types[n].bitfield.imm16)
6409 size = 2;
6410 return size;
6411 }
6412
6413 static void
6414 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6415 {
6416 char *p;
6417 unsigned int n;
6418
6419 for (n = 0; n < i.operands; n++)
6420 {
6421 if (operand_type_check (i.types[n], disp))
6422 {
6423 if (i.op[n].disps->X_op == O_constant)
6424 {
6425 int size = disp_size (n);
6426 offsetT val;
6427
6428 val = offset_in_range (i.op[n].disps->X_add_number,
6429 size);
6430 p = frag_more (size);
6431 md_number_to_chars (p, val, size);
6432 }
6433 else
6434 {
6435 enum bfd_reloc_code_real reloc_type;
6436 int size = disp_size (n);
6437 int sign = i.types[n].bitfield.disp32s;
6438 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6439
6440 /* We can't have 8 bit displacement here. */
6441 gas_assert (!i.types[n].bitfield.disp8);
6442
6443 /* The PC relative address is computed relative
6444 to the instruction boundary, so in case immediate
6445 fields follows, we need to adjust the value. */
6446 if (pcrel && i.imm_operands)
6447 {
6448 unsigned int n1;
6449 int sz = 0;
6450
6451 for (n1 = 0; n1 < i.operands; n1++)
6452 if (operand_type_check (i.types[n1], imm))
6453 {
6454 /* Only one immediate is allowed for PC
6455 relative address. */
6456 gas_assert (sz == 0);
6457 sz = imm_size (n1);
6458 i.op[n].disps->X_add_number -= sz;
6459 }
6460 /* We should find the immediate. */
6461 gas_assert (sz != 0);
6462 }
6463
6464 p = frag_more (size);
6465 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6466 if (GOT_symbol
6467 && GOT_symbol == i.op[n].disps->X_add_symbol
6468 && (((reloc_type == BFD_RELOC_32
6469 || reloc_type == BFD_RELOC_X86_64_32S
6470 || (reloc_type == BFD_RELOC_64
6471 && object_64bit))
6472 && (i.op[n].disps->X_op == O_symbol
6473 || (i.op[n].disps->X_op == O_add
6474 && ((symbol_get_value_expression
6475 (i.op[n].disps->X_op_symbol)->X_op)
6476 == O_subtract))))
6477 || reloc_type == BFD_RELOC_32_PCREL))
6478 {
6479 offsetT add;
6480
6481 if (insn_start_frag == frag_now)
6482 add = (p - frag_now->fr_literal) - insn_start_off;
6483 else
6484 {
6485 fragS *fr;
6486
6487 add = insn_start_frag->fr_fix - insn_start_off;
6488 for (fr = insn_start_frag->fr_next;
6489 fr && fr != frag_now; fr = fr->fr_next)
6490 add += fr->fr_fix;
6491 add += p - frag_now->fr_literal;
6492 }
6493
6494 if (!object_64bit)
6495 {
6496 reloc_type = BFD_RELOC_386_GOTPC;
6497 i.op[n].imms->X_add_number += add;
6498 }
6499 else if (reloc_type == BFD_RELOC_64)
6500 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6501 else
6502 /* Don't do the adjustment for x86-64, as there
6503 the pcrel addressing is relative to the _next_
6504 insn, and that is taken care of in other code. */
6505 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6506 }
6507 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6508 i.op[n].disps, pcrel, reloc_type);
6509 }
6510 }
6511 }
6512 }
6513
6514 static void
6515 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6516 {
6517 char *p;
6518 unsigned int n;
6519
6520 for (n = 0; n < i.operands; n++)
6521 {
6522 if (operand_type_check (i.types[n], imm))
6523 {
6524 if (i.op[n].imms->X_op == O_constant)
6525 {
6526 int size = imm_size (n);
6527 offsetT val;
6528
6529 val = offset_in_range (i.op[n].imms->X_add_number,
6530 size);
6531 p = frag_more (size);
6532 md_number_to_chars (p, val, size);
6533 }
6534 else
6535 {
6536 /* Not absolute_section.
6537 Need a 32-bit fixup (don't support 8bit
6538 non-absolute imms). Try to support other
6539 sizes ... */
6540 enum bfd_reloc_code_real reloc_type;
6541 int size = imm_size (n);
6542 int sign;
6543
6544 if (i.types[n].bitfield.imm32s
6545 && (i.suffix == QWORD_MNEM_SUFFIX
6546 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6547 sign = 1;
6548 else
6549 sign = 0;
6550
6551 p = frag_more (size);
6552 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6553
6554 /* This is tough to explain. We end up with this one if we
6555 * have operands that look like
6556 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6557 * obtain the absolute address of the GOT, and it is strongly
6558 * preferable from a performance point of view to avoid using
6559 * a runtime relocation for this. The actual sequence of
6560 * instructions often look something like:
6561 *
6562 * call .L66
6563 * .L66:
6564 * popl %ebx
6565 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6566 *
6567 * The call and pop essentially return the absolute address
6568 * of the label .L66 and store it in %ebx. The linker itself
6569 * will ultimately change the first operand of the addl so
6570 * that %ebx points to the GOT, but to keep things simple, the
6571 * .o file must have this operand set so that it generates not
6572 * the absolute address of .L66, but the absolute address of
6573 * itself. This allows the linker itself simply treat a GOTPC
6574 * relocation as asking for a pcrel offset to the GOT to be
6575 * added in, and the addend of the relocation is stored in the
6576 * operand field for the instruction itself.
6577 *
6578 * Our job here is to fix the operand so that it would add
6579 * the correct offset so that %ebx would point to itself. The
6580 * thing that is tricky is that .-.L66 will point to the
6581 * beginning of the instruction, so we need to further modify
6582 * the operand so that it will point to itself. There are
6583 * other cases where you have something like:
6584 *
6585 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6586 *
6587 * and here no correction would be required. Internally in
6588 * the assembler we treat operands of this form as not being
6589 * pcrel since the '.' is explicitly mentioned, and I wonder
6590 * whether it would simplify matters to do it this way. Who
6591 * knows. In earlier versions of the PIC patches, the
6592 * pcrel_adjust field was used to store the correction, but
6593 * since the expression is not pcrel, I felt it would be
6594 * confusing to do it this way. */
6595
6596 if ((reloc_type == BFD_RELOC_32
6597 || reloc_type == BFD_RELOC_X86_64_32S
6598 || reloc_type == BFD_RELOC_64)
6599 && GOT_symbol
6600 && GOT_symbol == i.op[n].imms->X_add_symbol
6601 && (i.op[n].imms->X_op == O_symbol
6602 || (i.op[n].imms->X_op == O_add
6603 && ((symbol_get_value_expression
6604 (i.op[n].imms->X_op_symbol)->X_op)
6605 == O_subtract))))
6606 {
6607 offsetT add;
6608
6609 if (insn_start_frag == frag_now)
6610 add = (p - frag_now->fr_literal) - insn_start_off;
6611 else
6612 {
6613 fragS *fr;
6614
6615 add = insn_start_frag->fr_fix - insn_start_off;
6616 for (fr = insn_start_frag->fr_next;
6617 fr && fr != frag_now; fr = fr->fr_next)
6618 add += fr->fr_fix;
6619 add += p - frag_now->fr_literal;
6620 }
6621
6622 if (!object_64bit)
6623 reloc_type = BFD_RELOC_386_GOTPC;
6624 else if (size == 4)
6625 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6626 else if (size == 8)
6627 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6628 i.op[n].imms->X_add_number += add;
6629 }
6630 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6631 i.op[n].imms, 0, reloc_type);
6632 }
6633 }
6634 }
6635 }
6636 \f
6637 /* x86_cons_fix_new is called via the expression parsing code when a
6638 reloc is needed. We use this hook to get the correct .got reloc. */
6639 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6640 static int cons_sign = -1;
6641
6642 void
6643 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6644 expressionS *exp)
6645 {
6646 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6647
6648 got_reloc = NO_RELOC;
6649
6650 #ifdef TE_PE
6651 if (exp->X_op == O_secrel)
6652 {
6653 exp->X_op = O_symbol;
6654 r = BFD_RELOC_32_SECREL;
6655 }
6656 #endif
6657
6658 fix_new_exp (frag, off, len, exp, 0, r);
6659 }
6660
6661 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6662 purpose of the `.dc.a' internal pseudo-op. */
6663
6664 int
6665 x86_address_bytes (void)
6666 {
6667 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6668 return 4;
6669 return stdoutput->arch_info->bits_per_address / 8;
6670 }
6671
6672 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6673 || defined (LEX_AT)
6674 # define lex_got(reloc, adjust, types) NULL
6675 #else
6676 /* Parse operands of the form
6677 <symbol>@GOTOFF+<nnn>
6678 and similar .plt or .got references.
6679
6680 If we find one, set up the correct relocation in RELOC and copy the
6681 input string, minus the `@GOTOFF' into a malloc'd buffer for
6682 parsing by the calling routine. Return this buffer, and if ADJUST
6683 is non-null set it to the length of the string we removed from the
6684 input line. Otherwise return NULL. */
6685 static char *
6686 lex_got (enum bfd_reloc_code_real *rel,
6687 int *adjust,
6688 i386_operand_type *types)
6689 {
6690 /* Some of the relocations depend on the size of what field is to
6691 be relocated. But in our callers i386_immediate and i386_displacement
6692 we don't yet know the operand size (this will be set by insn
6693 matching). Hence we record the word32 relocation here,
6694 and adjust the reloc according to the real size in reloc(). */
6695 static const struct {
6696 const char *str;
6697 int len;
6698 const enum bfd_reloc_code_real rel[2];
6699 const i386_operand_type types64;
6700 } gotrel[] = {
6701 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6702 BFD_RELOC_X86_64_PLTOFF64 },
6703 OPERAND_TYPE_IMM64 },
6704 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6705 BFD_RELOC_X86_64_PLT32 },
6706 OPERAND_TYPE_IMM32_32S_DISP32 },
6707 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6708 BFD_RELOC_X86_64_GOTPLT64 },
6709 OPERAND_TYPE_IMM64_DISP64 },
6710 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6711 BFD_RELOC_X86_64_GOTOFF64 },
6712 OPERAND_TYPE_IMM64_DISP64 },
6713 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6714 BFD_RELOC_X86_64_GOTPCREL },
6715 OPERAND_TYPE_IMM32_32S_DISP32 },
6716 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6717 BFD_RELOC_X86_64_TLSGD },
6718 OPERAND_TYPE_IMM32_32S_DISP32 },
6719 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6720 _dummy_first_bfd_reloc_code_real },
6721 OPERAND_TYPE_NONE },
6722 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6723 BFD_RELOC_X86_64_TLSLD },
6724 OPERAND_TYPE_IMM32_32S_DISP32 },
6725 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6726 BFD_RELOC_X86_64_GOTTPOFF },
6727 OPERAND_TYPE_IMM32_32S_DISP32 },
6728 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6729 BFD_RELOC_X86_64_TPOFF32 },
6730 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6731 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6732 _dummy_first_bfd_reloc_code_real },
6733 OPERAND_TYPE_NONE },
6734 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6735 BFD_RELOC_X86_64_DTPOFF32 },
6736 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6737 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6738 _dummy_first_bfd_reloc_code_real },
6739 OPERAND_TYPE_NONE },
6740 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6741 _dummy_first_bfd_reloc_code_real },
6742 OPERAND_TYPE_NONE },
6743 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6744 BFD_RELOC_X86_64_GOT32 },
6745 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6746 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6747 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6748 OPERAND_TYPE_IMM32_32S_DISP32 },
6749 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6750 BFD_RELOC_X86_64_TLSDESC_CALL },
6751 OPERAND_TYPE_IMM32_32S_DISP32 },
6752 };
6753 char *cp;
6754 unsigned int j;
6755
6756 #if defined (OBJ_MAYBE_ELF)
6757 if (!IS_ELF)
6758 return NULL;
6759 #endif
6760
6761 for (cp = input_line_pointer; *cp != '@'; cp++)
6762 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6763 return NULL;
6764
6765 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6766 {
6767 int len = gotrel[j].len;
6768 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6769 {
6770 if (gotrel[j].rel[object_64bit] != 0)
6771 {
6772 int first, second;
6773 char *tmpbuf, *past_reloc;
6774
6775 *rel = gotrel[j].rel[object_64bit];
6776 if (adjust)
6777 *adjust = len;
6778
6779 if (types)
6780 {
6781 if (flag_code != CODE_64BIT)
6782 {
6783 types->bitfield.imm32 = 1;
6784 types->bitfield.disp32 = 1;
6785 }
6786 else
6787 *types = gotrel[j].types64;
6788 }
6789
6790 if (GOT_symbol == NULL)
6791 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6792
6793 /* The length of the first part of our input line. */
6794 first = cp - input_line_pointer;
6795
6796 /* The second part goes from after the reloc token until
6797 (and including) an end_of_line char or comma. */
6798 past_reloc = cp + 1 + len;
6799 cp = past_reloc;
6800 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6801 ++cp;
6802 second = cp + 1 - past_reloc;
6803
6804 /* Allocate and copy string. The trailing NUL shouldn't
6805 be necessary, but be safe. */
6806 tmpbuf = (char *) xmalloc (first + second + 2);
6807 memcpy (tmpbuf, input_line_pointer, first);
6808 if (second != 0 && *past_reloc != ' ')
6809 /* Replace the relocation token with ' ', so that
6810 errors like foo@GOTOFF1 will be detected. */
6811 tmpbuf[first++] = ' ';
6812 memcpy (tmpbuf + first, past_reloc, second);
6813 tmpbuf[first + second] = '\0';
6814 return tmpbuf;
6815 }
6816
6817 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6818 gotrel[j].str, 1 << (5 + object_64bit));
6819 return NULL;
6820 }
6821 }
6822
6823 /* Might be a symbol version string. Don't as_bad here. */
6824 return NULL;
6825 }
6826 #endif
6827
6828 #ifdef TE_PE
6829 #ifdef lex_got
6830 #undef lex_got
6831 #endif
6832 /* Parse operands of the form
6833 <symbol>@SECREL32+<nnn>
6834
6835 If we find one, set up the correct relocation in RELOC and copy the
6836 input string, minus the `@SECREL32' into a malloc'd buffer for
6837 parsing by the calling routine. Return this buffer, and if ADJUST
6838 is non-null set it to the length of the string we removed from the
6839 input line. Otherwise return NULL.
6840
6841 This function is copied from the ELF version above adjusted for PE targets. */
6842
6843 static char *
6844 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6845 int *adjust ATTRIBUTE_UNUSED,
6846 i386_operand_type *types ATTRIBUTE_UNUSED)
6847 {
6848 static const struct
6849 {
6850 const char *str;
6851 int len;
6852 const enum bfd_reloc_code_real rel[2];
6853 const i386_operand_type types64;
6854 }
6855 gotrel[] =
6856 {
6857 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6858 BFD_RELOC_32_SECREL },
6859 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6860 };
6861
6862 char *cp;
6863 unsigned j;
6864
6865 for (cp = input_line_pointer; *cp != '@'; cp++)
6866 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6867 return NULL;
6868
6869 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6870 {
6871 int len = gotrel[j].len;
6872
6873 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6874 {
6875 if (gotrel[j].rel[object_64bit] != 0)
6876 {
6877 int first, second;
6878 char *tmpbuf, *past_reloc;
6879
6880 *rel = gotrel[j].rel[object_64bit];
6881 if (adjust)
6882 *adjust = len;
6883
6884 if (types)
6885 {
6886 if (flag_code != CODE_64BIT)
6887 {
6888 types->bitfield.imm32 = 1;
6889 types->bitfield.disp32 = 1;
6890 }
6891 else
6892 *types = gotrel[j].types64;
6893 }
6894
6895 /* The length of the first part of our input line. */
6896 first = cp - input_line_pointer;
6897
6898 /* The second part goes from after the reloc token until
6899 (and including) an end_of_line char or comma. */
6900 past_reloc = cp + 1 + len;
6901 cp = past_reloc;
6902 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6903 ++cp;
6904 second = cp + 1 - past_reloc;
6905
6906 /* Allocate and copy string. The trailing NUL shouldn't
6907 be necessary, but be safe. */
6908 tmpbuf = (char *) xmalloc (first + second + 2);
6909 memcpy (tmpbuf, input_line_pointer, first);
6910 if (second != 0 && *past_reloc != ' ')
6911 /* Replace the relocation token with ' ', so that
6912 errors like foo@SECLREL321 will be detected. */
6913 tmpbuf[first++] = ' ';
6914 memcpy (tmpbuf + first, past_reloc, second);
6915 tmpbuf[first + second] = '\0';
6916 return tmpbuf;
6917 }
6918
6919 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6920 gotrel[j].str, 1 << (5 + object_64bit));
6921 return NULL;
6922 }
6923 }
6924
6925 /* Might be a symbol version string. Don't as_bad here. */
6926 return NULL;
6927 }
6928
6929 #endif /* TE_PE */
6930
6931 void
6932 x86_cons (expressionS *exp, int size)
6933 {
6934 intel_syntax = -intel_syntax;
6935
6936 exp->X_md = 0;
6937 if (size == 4 || (object_64bit && size == 8))
6938 {
6939 /* Handle @GOTOFF and the like in an expression. */
6940 char *save;
6941 char *gotfree_input_line;
6942 int adjust = 0;
6943
6944 save = input_line_pointer;
6945 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6946 if (gotfree_input_line)
6947 input_line_pointer = gotfree_input_line;
6948
6949 expression (exp);
6950
6951 if (gotfree_input_line)
6952 {
6953 /* expression () has merrily parsed up to the end of line,
6954 or a comma - in the wrong buffer. Transfer how far
6955 input_line_pointer has moved to the right buffer. */
6956 input_line_pointer = (save
6957 + (input_line_pointer - gotfree_input_line)
6958 + adjust);
6959 free (gotfree_input_line);
6960 if (exp->X_op == O_constant
6961 || exp->X_op == O_absent
6962 || exp->X_op == O_illegal
6963 || exp->X_op == O_register
6964 || exp->X_op == O_big)
6965 {
6966 char c = *input_line_pointer;
6967 *input_line_pointer = 0;
6968 as_bad (_("missing or invalid expression `%s'"), save);
6969 *input_line_pointer = c;
6970 }
6971 }
6972 }
6973 else
6974 expression (exp);
6975
6976 intel_syntax = -intel_syntax;
6977
6978 if (intel_syntax)
6979 i386_intel_simplify (exp);
6980 }
6981
6982 static void
6983 signed_cons (int size)
6984 {
6985 if (flag_code == CODE_64BIT)
6986 cons_sign = 1;
6987 cons (size);
6988 cons_sign = -1;
6989 }
6990
6991 #ifdef TE_PE
6992 static void
6993 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6994 {
6995 expressionS exp;
6996
6997 do
6998 {
6999 expression (&exp);
7000 if (exp.X_op == O_symbol)
7001 exp.X_op = O_secrel;
7002
7003 emit_expr (&exp, 4);
7004 }
7005 while (*input_line_pointer++ == ',');
7006
7007 input_line_pointer--;
7008 demand_empty_rest_of_line ();
7009 }
7010 #endif
7011
7012 static int
7013 i386_immediate (char *imm_start)
7014 {
7015 char *save_input_line_pointer;
7016 char *gotfree_input_line;
7017 segT exp_seg = 0;
7018 expressionS *exp;
7019 i386_operand_type types;
7020
7021 operand_type_set (&types, ~0);
7022
7023 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7024 {
7025 as_bad (_("at most %d immediate operands are allowed"),
7026 MAX_IMMEDIATE_OPERANDS);
7027 return 0;
7028 }
7029
7030 exp = &im_expressions[i.imm_operands++];
7031 i.op[this_operand].imms = exp;
7032
7033 if (is_space_char (*imm_start))
7034 ++imm_start;
7035
7036 save_input_line_pointer = input_line_pointer;
7037 input_line_pointer = imm_start;
7038
7039 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7040 if (gotfree_input_line)
7041 input_line_pointer = gotfree_input_line;
7042
7043 exp_seg = expression (exp);
7044
7045 SKIP_WHITESPACE ();
7046 if (*input_line_pointer)
7047 as_bad (_("junk `%s' after expression"), input_line_pointer);
7048
7049 input_line_pointer = save_input_line_pointer;
7050 if (gotfree_input_line)
7051 {
7052 free (gotfree_input_line);
7053
7054 if (exp->X_op == O_constant || exp->X_op == O_register)
7055 exp->X_op = O_illegal;
7056 }
7057
7058 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7059 }
7060
7061 static int
7062 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7063 i386_operand_type types, const char *imm_start)
7064 {
7065 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7066 {
7067 if (imm_start)
7068 as_bad (_("missing or invalid immediate expression `%s'"),
7069 imm_start);
7070 return 0;
7071 }
7072 else if (exp->X_op == O_constant)
7073 {
7074 /* Size it properly later. */
7075 i.types[this_operand].bitfield.imm64 = 1;
7076 /* If not 64bit, sign extend val. */
7077 if (flag_code != CODE_64BIT
7078 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7079 exp->X_add_number
7080 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7081 }
7082 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7083 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7084 && exp_seg != absolute_section
7085 && exp_seg != text_section
7086 && exp_seg != data_section
7087 && exp_seg != bss_section
7088 && exp_seg != undefined_section
7089 && !bfd_is_com_section (exp_seg))
7090 {
7091 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7092 return 0;
7093 }
7094 #endif
7095 else if (!intel_syntax && exp->X_op == O_register)
7096 {
7097 if (imm_start)
7098 as_bad (_("illegal immediate register operand %s"), imm_start);
7099 return 0;
7100 }
7101 else
7102 {
7103 /* This is an address. The size of the address will be
7104 determined later, depending on destination register,
7105 suffix, or the default for the section. */
7106 i.types[this_operand].bitfield.imm8 = 1;
7107 i.types[this_operand].bitfield.imm16 = 1;
7108 i.types[this_operand].bitfield.imm32 = 1;
7109 i.types[this_operand].bitfield.imm32s = 1;
7110 i.types[this_operand].bitfield.imm64 = 1;
7111 i.types[this_operand] = operand_type_and (i.types[this_operand],
7112 types);
7113 }
7114
7115 return 1;
7116 }
7117
7118 static char *
7119 i386_scale (char *scale)
7120 {
7121 offsetT val;
7122 char *save = input_line_pointer;
7123
7124 input_line_pointer = scale;
7125 val = get_absolute_expression ();
7126
7127 switch (val)
7128 {
7129 case 1:
7130 i.log2_scale_factor = 0;
7131 break;
7132 case 2:
7133 i.log2_scale_factor = 1;
7134 break;
7135 case 4:
7136 i.log2_scale_factor = 2;
7137 break;
7138 case 8:
7139 i.log2_scale_factor = 3;
7140 break;
7141 default:
7142 {
7143 char sep = *input_line_pointer;
7144
7145 *input_line_pointer = '\0';
7146 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7147 scale);
7148 *input_line_pointer = sep;
7149 input_line_pointer = save;
7150 return NULL;
7151 }
7152 }
7153 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7154 {
7155 as_warn (_("scale factor of %d without an index register"),
7156 1 << i.log2_scale_factor);
7157 i.log2_scale_factor = 0;
7158 }
7159 scale = input_line_pointer;
7160 input_line_pointer = save;
7161 return scale;
7162 }
7163
7164 static int
7165 i386_displacement (char *disp_start, char *disp_end)
7166 {
7167 expressionS *exp;
7168 segT exp_seg = 0;
7169 char *save_input_line_pointer;
7170 char *gotfree_input_line;
7171 int override;
7172 i386_operand_type bigdisp, types = anydisp;
7173 int ret;
7174
7175 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7176 {
7177 as_bad (_("at most %d displacement operands are allowed"),
7178 MAX_MEMORY_OPERANDS);
7179 return 0;
7180 }
7181
7182 operand_type_set (&bigdisp, 0);
7183 if ((i.types[this_operand].bitfield.jumpabsolute)
7184 || (!current_templates->start->opcode_modifier.jump
7185 && !current_templates->start->opcode_modifier.jumpdword))
7186 {
7187 bigdisp.bitfield.disp32 = 1;
7188 override = (i.prefix[ADDR_PREFIX] != 0);
7189 if (flag_code == CODE_64BIT)
7190 {
7191 if (!override)
7192 {
7193 bigdisp.bitfield.disp32s = 1;
7194 bigdisp.bitfield.disp64 = 1;
7195 }
7196 }
7197 else if ((flag_code == CODE_16BIT) ^ override)
7198 {
7199 bigdisp.bitfield.disp32 = 0;
7200 bigdisp.bitfield.disp16 = 1;
7201 }
7202 }
7203 else
7204 {
7205 /* For PC-relative branches, the width of the displacement
7206 is dependent upon data size, not address size. */
7207 override = (i.prefix[DATA_PREFIX] != 0);
7208 if (flag_code == CODE_64BIT)
7209 {
7210 if (override || i.suffix == WORD_MNEM_SUFFIX)
7211 bigdisp.bitfield.disp16 = 1;
7212 else
7213 {
7214 bigdisp.bitfield.disp32 = 1;
7215 bigdisp.bitfield.disp32s = 1;
7216 }
7217 }
7218 else
7219 {
7220 if (!override)
7221 override = (i.suffix == (flag_code != CODE_16BIT
7222 ? WORD_MNEM_SUFFIX
7223 : LONG_MNEM_SUFFIX));
7224 bigdisp.bitfield.disp32 = 1;
7225 if ((flag_code == CODE_16BIT) ^ override)
7226 {
7227 bigdisp.bitfield.disp32 = 0;
7228 bigdisp.bitfield.disp16 = 1;
7229 }
7230 }
7231 }
7232 i.types[this_operand] = operand_type_or (i.types[this_operand],
7233 bigdisp);
7234
7235 exp = &disp_expressions[i.disp_operands];
7236 i.op[this_operand].disps = exp;
7237 i.disp_operands++;
7238 save_input_line_pointer = input_line_pointer;
7239 input_line_pointer = disp_start;
7240 END_STRING_AND_SAVE (disp_end);
7241
7242 #ifndef GCC_ASM_O_HACK
7243 #define GCC_ASM_O_HACK 0
7244 #endif
7245 #if GCC_ASM_O_HACK
7246 END_STRING_AND_SAVE (disp_end + 1);
7247 if (i.types[this_operand].bitfield.baseIndex
7248 && displacement_string_end[-1] == '+')
7249 {
7250 /* This hack is to avoid a warning when using the "o"
7251 constraint within gcc asm statements.
7252 For instance:
7253
7254 #define _set_tssldt_desc(n,addr,limit,type) \
7255 __asm__ __volatile__ ( \
7256 "movw %w2,%0\n\t" \
7257 "movw %w1,2+%0\n\t" \
7258 "rorl $16,%1\n\t" \
7259 "movb %b1,4+%0\n\t" \
7260 "movb %4,5+%0\n\t" \
7261 "movb $0,6+%0\n\t" \
7262 "movb %h1,7+%0\n\t" \
7263 "rorl $16,%1" \
7264 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7265
7266 This works great except that the output assembler ends
7267 up looking a bit weird if it turns out that there is
7268 no offset. You end up producing code that looks like:
7269
7270 #APP
7271 movw $235,(%eax)
7272 movw %dx,2+(%eax)
7273 rorl $16,%edx
7274 movb %dl,4+(%eax)
7275 movb $137,5+(%eax)
7276 movb $0,6+(%eax)
7277 movb %dh,7+(%eax)
7278 rorl $16,%edx
7279 #NO_APP
7280
7281 So here we provide the missing zero. */
7282
7283 *displacement_string_end = '0';
7284 }
7285 #endif
7286 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7287 if (gotfree_input_line)
7288 input_line_pointer = gotfree_input_line;
7289
7290 exp_seg = expression (exp);
7291
7292 SKIP_WHITESPACE ();
7293 if (*input_line_pointer)
7294 as_bad (_("junk `%s' after expression"), input_line_pointer);
7295 #if GCC_ASM_O_HACK
7296 RESTORE_END_STRING (disp_end + 1);
7297 #endif
7298 input_line_pointer = save_input_line_pointer;
7299 if (gotfree_input_line)
7300 {
7301 free (gotfree_input_line);
7302
7303 if (exp->X_op == O_constant || exp->X_op == O_register)
7304 exp->X_op = O_illegal;
7305 }
7306
7307 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7308
7309 RESTORE_END_STRING (disp_end);
7310
7311 return ret;
7312 }
7313
7314 static int
7315 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7316 i386_operand_type types, const char *disp_start)
7317 {
7318 i386_operand_type bigdisp;
7319 int ret = 1;
7320
7321 /* We do this to make sure that the section symbol is in
7322 the symbol table. We will ultimately change the relocation
7323 to be relative to the beginning of the section. */
7324 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7325 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7326 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7327 {
7328 if (exp->X_op != O_symbol)
7329 goto inv_disp;
7330
7331 if (S_IS_LOCAL (exp->X_add_symbol)
7332 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7333 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7334 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7335 exp->X_op = O_subtract;
7336 exp->X_op_symbol = GOT_symbol;
7337 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7338 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7339 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7340 i.reloc[this_operand] = BFD_RELOC_64;
7341 else
7342 i.reloc[this_operand] = BFD_RELOC_32;
7343 }
7344
7345 else if (exp->X_op == O_absent
7346 || exp->X_op == O_illegal
7347 || exp->X_op == O_big)
7348 {
7349 inv_disp:
7350 as_bad (_("missing or invalid displacement expression `%s'"),
7351 disp_start);
7352 ret = 0;
7353 }
7354
7355 else if (flag_code == CODE_64BIT
7356 && !i.prefix[ADDR_PREFIX]
7357 && exp->X_op == O_constant)
7358 {
7359 /* Since displacement is signed extended to 64bit, don't allow
7360 disp32 and turn off disp32s if they are out of range. */
7361 i.types[this_operand].bitfield.disp32 = 0;
7362 if (!fits_in_signed_long (exp->X_add_number))
7363 {
7364 i.types[this_operand].bitfield.disp32s = 0;
7365 if (i.types[this_operand].bitfield.baseindex)
7366 {
7367 as_bad (_("0x%lx out range of signed 32bit displacement"),
7368 (long) exp->X_add_number);
7369 ret = 0;
7370 }
7371 }
7372 }
7373
7374 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7375 else if (exp->X_op != O_constant
7376 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7377 && exp_seg != absolute_section
7378 && exp_seg != text_section
7379 && exp_seg != data_section
7380 && exp_seg != bss_section
7381 && exp_seg != undefined_section
7382 && !bfd_is_com_section (exp_seg))
7383 {
7384 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7385 ret = 0;
7386 }
7387 #endif
7388
7389 /* Check if this is a displacement only operand. */
7390 bigdisp = i.types[this_operand];
7391 bigdisp.bitfield.disp8 = 0;
7392 bigdisp.bitfield.disp16 = 0;
7393 bigdisp.bitfield.disp32 = 0;
7394 bigdisp.bitfield.disp32s = 0;
7395 bigdisp.bitfield.disp64 = 0;
7396 if (operand_type_all_zero (&bigdisp))
7397 i.types[this_operand] = operand_type_and (i.types[this_operand],
7398 types);
7399
7400 return ret;
7401 }
7402
7403 /* Make sure the memory operand we've been dealt is valid.
7404 Return 1 on success, 0 on a failure. */
7405
7406 static int
7407 i386_index_check (const char *operand_string)
7408 {
7409 int ok;
7410 const char *kind = "base/index";
7411 #if INFER_ADDR_PREFIX
7412 int fudged = 0;
7413
7414 tryprefix:
7415 #endif
7416 ok = 1;
7417 if (current_templates->start->opcode_modifier.isstring
7418 && !current_templates->start->opcode_modifier.immext
7419 && (current_templates->end[-1].opcode_modifier.isstring
7420 || i.mem_operands))
7421 {
7422 /* Memory operands of string insns are special in that they only allow
7423 a single register (rDI, rSI, or rBX) as their memory address. */
7424 unsigned int expected;
7425
7426 kind = "string address";
7427
7428 if (current_templates->start->opcode_modifier.w)
7429 {
7430 i386_operand_type type = current_templates->end[-1].operand_types[0];
7431
7432 if (!type.bitfield.baseindex
7433 || ((!i.mem_operands != !intel_syntax)
7434 && current_templates->end[-1].operand_types[1]
7435 .bitfield.baseindex))
7436 type = current_templates->end[-1].operand_types[1];
7437 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7438 }
7439 else
7440 expected = 3 /* rBX */;
7441
7442 if (!i.base_reg || i.index_reg
7443 || operand_type_check (i.types[this_operand], disp))
7444 ok = -1;
7445 else if (!(flag_code == CODE_64BIT
7446 ? i.prefix[ADDR_PREFIX]
7447 ? i.base_reg->reg_type.bitfield.reg32
7448 : i.base_reg->reg_type.bitfield.reg64
7449 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7450 ? i.base_reg->reg_type.bitfield.reg32
7451 : i.base_reg->reg_type.bitfield.reg16))
7452 ok = 0;
7453 else if (register_number (i.base_reg) != expected)
7454 ok = -1;
7455
7456 if (ok < 0)
7457 {
7458 unsigned int j;
7459
7460 for (j = 0; j < i386_regtab_size; ++j)
7461 if ((flag_code == CODE_64BIT
7462 ? i.prefix[ADDR_PREFIX]
7463 ? i386_regtab[j].reg_type.bitfield.reg32
7464 : i386_regtab[j].reg_type.bitfield.reg64
7465 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7466 ? i386_regtab[j].reg_type.bitfield.reg32
7467 : i386_regtab[j].reg_type.bitfield.reg16)
7468 && register_number(i386_regtab + j) == expected)
7469 break;
7470 gas_assert (j < i386_regtab_size);
7471 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7472 operand_string,
7473 intel_syntax ? '[' : '(',
7474 register_prefix,
7475 i386_regtab[j].reg_name,
7476 intel_syntax ? ']' : ')');
7477 ok = 1;
7478 }
7479 }
7480 else if (flag_code == CODE_64BIT)
7481 {
7482 if ((i.base_reg
7483 && ((i.prefix[ADDR_PREFIX] == 0
7484 && !i.base_reg->reg_type.bitfield.reg64)
7485 || (i.prefix[ADDR_PREFIX]
7486 && !i.base_reg->reg_type.bitfield.reg32))
7487 && (i.index_reg
7488 || i.base_reg->reg_num !=
7489 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7490 || (i.index_reg
7491 && !(i.index_reg->reg_type.bitfield.regxmm
7492 || i.index_reg->reg_type.bitfield.regymm)
7493 && (!i.index_reg->reg_type.bitfield.baseindex
7494 || (i.prefix[ADDR_PREFIX] == 0
7495 && i.index_reg->reg_num != RegRiz
7496 && !i.index_reg->reg_type.bitfield.reg64
7497 )
7498 || (i.prefix[ADDR_PREFIX]
7499 && i.index_reg->reg_num != RegEiz
7500 && !i.index_reg->reg_type.bitfield.reg32))))
7501 ok = 0;
7502 }
7503 else
7504 {
7505 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7506 {
7507 /* 16bit checks. */
7508 if ((i.base_reg
7509 && (!i.base_reg->reg_type.bitfield.reg16
7510 || !i.base_reg->reg_type.bitfield.baseindex))
7511 || (i.index_reg
7512 && (!i.index_reg->reg_type.bitfield.reg16
7513 || !i.index_reg->reg_type.bitfield.baseindex
7514 || !(i.base_reg
7515 && i.base_reg->reg_num < 6
7516 && i.index_reg->reg_num >= 6
7517 && i.log2_scale_factor == 0))))
7518 ok = 0;
7519 }
7520 else
7521 {
7522 /* 32bit checks. */
7523 if ((i.base_reg
7524 && !i.base_reg->reg_type.bitfield.reg32)
7525 || (i.index_reg
7526 && !i.index_reg->reg_type.bitfield.regxmm
7527 && !i.index_reg->reg_type.bitfield.regymm
7528 && ((!i.index_reg->reg_type.bitfield.reg32
7529 && i.index_reg->reg_num != RegEiz)
7530 || !i.index_reg->reg_type.bitfield.baseindex)))
7531 ok = 0;
7532 }
7533 }
7534 if (!ok)
7535 {
7536 #if INFER_ADDR_PREFIX
7537 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7538 {
7539 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7540 i.prefixes += 1;
7541 /* Change the size of any displacement too. At most one of
7542 Disp16 or Disp32 is set.
7543 FIXME. There doesn't seem to be any real need for separate
7544 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7545 Removing them would probably clean up the code quite a lot. */
7546 if (flag_code != CODE_64BIT
7547 && (i.types[this_operand].bitfield.disp16
7548 || i.types[this_operand].bitfield.disp32))
7549 i.types[this_operand]
7550 = operand_type_xor (i.types[this_operand], disp16_32);
7551 fudged = 1;
7552 goto tryprefix;
7553 }
7554 if (fudged)
7555 as_bad (_("`%s' is not a valid %s expression"),
7556 operand_string,
7557 kind);
7558 else
7559 #endif
7560 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7561 operand_string,
7562 flag_code_names[i.prefix[ADDR_PREFIX]
7563 ? flag_code == CODE_32BIT
7564 ? CODE_16BIT
7565 : CODE_32BIT
7566 : flag_code],
7567 kind);
7568 }
7569 return ok;
7570 }
7571
7572 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7573 on error. */
7574
7575 static int
7576 i386_att_operand (char *operand_string)
7577 {
7578 const reg_entry *r;
7579 char *end_op;
7580 char *op_string = operand_string;
7581
7582 if (is_space_char (*op_string))
7583 ++op_string;
7584
7585 /* We check for an absolute prefix (differentiating,
7586 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7587 if (*op_string == ABSOLUTE_PREFIX)
7588 {
7589 ++op_string;
7590 if (is_space_char (*op_string))
7591 ++op_string;
7592 i.types[this_operand].bitfield.jumpabsolute = 1;
7593 }
7594
7595 /* Check if operand is a register. */
7596 if ((r = parse_register (op_string, &end_op)) != NULL)
7597 {
7598 i386_operand_type temp;
7599
7600 /* Check for a segment override by searching for ':' after a
7601 segment register. */
7602 op_string = end_op;
7603 if (is_space_char (*op_string))
7604 ++op_string;
7605 if (*op_string == ':'
7606 && (r->reg_type.bitfield.sreg2
7607 || r->reg_type.bitfield.sreg3))
7608 {
7609 switch (r->reg_num)
7610 {
7611 case 0:
7612 i.seg[i.mem_operands] = &es;
7613 break;
7614 case 1:
7615 i.seg[i.mem_operands] = &cs;
7616 break;
7617 case 2:
7618 i.seg[i.mem_operands] = &ss;
7619 break;
7620 case 3:
7621 i.seg[i.mem_operands] = &ds;
7622 break;
7623 case 4:
7624 i.seg[i.mem_operands] = &fs;
7625 break;
7626 case 5:
7627 i.seg[i.mem_operands] = &gs;
7628 break;
7629 }
7630
7631 /* Skip the ':' and whitespace. */
7632 ++op_string;
7633 if (is_space_char (*op_string))
7634 ++op_string;
7635
7636 if (!is_digit_char (*op_string)
7637 && !is_identifier_char (*op_string)
7638 && *op_string != '('
7639 && *op_string != ABSOLUTE_PREFIX)
7640 {
7641 as_bad (_("bad memory operand `%s'"), op_string);
7642 return 0;
7643 }
7644 /* Handle case of %es:*foo. */
7645 if (*op_string == ABSOLUTE_PREFIX)
7646 {
7647 ++op_string;
7648 if (is_space_char (*op_string))
7649 ++op_string;
7650 i.types[this_operand].bitfield.jumpabsolute = 1;
7651 }
7652 goto do_memory_reference;
7653 }
7654 if (*op_string)
7655 {
7656 as_bad (_("junk `%s' after register"), op_string);
7657 return 0;
7658 }
7659 temp = r->reg_type;
7660 temp.bitfield.baseindex = 0;
7661 i.types[this_operand] = operand_type_or (i.types[this_operand],
7662 temp);
7663 i.types[this_operand].bitfield.unspecified = 0;
7664 i.op[this_operand].regs = r;
7665 i.reg_operands++;
7666 }
7667 else if (*op_string == REGISTER_PREFIX)
7668 {
7669 as_bad (_("bad register name `%s'"), op_string);
7670 return 0;
7671 }
7672 else if (*op_string == IMMEDIATE_PREFIX)
7673 {
7674 ++op_string;
7675 if (i.types[this_operand].bitfield.jumpabsolute)
7676 {
7677 as_bad (_("immediate operand illegal with absolute jump"));
7678 return 0;
7679 }
7680 if (!i386_immediate (op_string))
7681 return 0;
7682 }
7683 else if (is_digit_char (*op_string)
7684 || is_identifier_char (*op_string)
7685 || *op_string == '(')
7686 {
7687 /* This is a memory reference of some sort. */
7688 char *base_string;
7689
7690 /* Start and end of displacement string expression (if found). */
7691 char *displacement_string_start;
7692 char *displacement_string_end;
7693
7694 do_memory_reference:
7695 if ((i.mem_operands == 1
7696 && !current_templates->start->opcode_modifier.isstring)
7697 || i.mem_operands == 2)
7698 {
7699 as_bad (_("too many memory references for `%s'"),
7700 current_templates->start->name);
7701 return 0;
7702 }
7703
7704 /* Check for base index form. We detect the base index form by
7705 looking for an ')' at the end of the operand, searching
7706 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7707 after the '('. */
7708 base_string = op_string + strlen (op_string);
7709
7710 --base_string;
7711 if (is_space_char (*base_string))
7712 --base_string;
7713
7714 /* If we only have a displacement, set-up for it to be parsed later. */
7715 displacement_string_start = op_string;
7716 displacement_string_end = base_string + 1;
7717
7718 if (*base_string == ')')
7719 {
7720 char *temp_string;
7721 unsigned int parens_balanced = 1;
7722 /* We've already checked that the number of left & right ()'s are
7723 equal, so this loop will not be infinite. */
7724 do
7725 {
7726 base_string--;
7727 if (*base_string == ')')
7728 parens_balanced++;
7729 if (*base_string == '(')
7730 parens_balanced--;
7731 }
7732 while (parens_balanced);
7733
7734 temp_string = base_string;
7735
7736 /* Skip past '(' and whitespace. */
7737 ++base_string;
7738 if (is_space_char (*base_string))
7739 ++base_string;
7740
7741 if (*base_string == ','
7742 || ((i.base_reg = parse_register (base_string, &end_op))
7743 != NULL))
7744 {
7745 displacement_string_end = temp_string;
7746
7747 i.types[this_operand].bitfield.baseindex = 1;
7748
7749 if (i.base_reg)
7750 {
7751 base_string = end_op;
7752 if (is_space_char (*base_string))
7753 ++base_string;
7754 }
7755
7756 /* There may be an index reg or scale factor here. */
7757 if (*base_string == ',')
7758 {
7759 ++base_string;
7760 if (is_space_char (*base_string))
7761 ++base_string;
7762
7763 if ((i.index_reg = parse_register (base_string, &end_op))
7764 != NULL)
7765 {
7766 base_string = end_op;
7767 if (is_space_char (*base_string))
7768 ++base_string;
7769 if (*base_string == ',')
7770 {
7771 ++base_string;
7772 if (is_space_char (*base_string))
7773 ++base_string;
7774 }
7775 else if (*base_string != ')')
7776 {
7777 as_bad (_("expecting `,' or `)' "
7778 "after index register in `%s'"),
7779 operand_string);
7780 return 0;
7781 }
7782 }
7783 else if (*base_string == REGISTER_PREFIX)
7784 {
7785 end_op = strchr (base_string, ',');
7786 if (end_op)
7787 *end_op = '\0';
7788 as_bad (_("bad register name `%s'"), base_string);
7789 return 0;
7790 }
7791
7792 /* Check for scale factor. */
7793 if (*base_string != ')')
7794 {
7795 char *end_scale = i386_scale (base_string);
7796
7797 if (!end_scale)
7798 return 0;
7799
7800 base_string = end_scale;
7801 if (is_space_char (*base_string))
7802 ++base_string;
7803 if (*base_string != ')')
7804 {
7805 as_bad (_("expecting `)' "
7806 "after scale factor in `%s'"),
7807 operand_string);
7808 return 0;
7809 }
7810 }
7811 else if (!i.index_reg)
7812 {
7813 as_bad (_("expecting index register or scale factor "
7814 "after `,'; got '%c'"),
7815 *base_string);
7816 return 0;
7817 }
7818 }
7819 else if (*base_string != ')')
7820 {
7821 as_bad (_("expecting `,' or `)' "
7822 "after base register in `%s'"),
7823 operand_string);
7824 return 0;
7825 }
7826 }
7827 else if (*base_string == REGISTER_PREFIX)
7828 {
7829 end_op = strchr (base_string, ',');
7830 if (end_op)
7831 *end_op = '\0';
7832 as_bad (_("bad register name `%s'"), base_string);
7833 return 0;
7834 }
7835 }
7836
7837 /* If there's an expression beginning the operand, parse it,
7838 assuming displacement_string_start and
7839 displacement_string_end are meaningful. */
7840 if (displacement_string_start != displacement_string_end)
7841 {
7842 if (!i386_displacement (displacement_string_start,
7843 displacement_string_end))
7844 return 0;
7845 }
7846
7847 /* Special case for (%dx) while doing input/output op. */
7848 if (i.base_reg
7849 && operand_type_equal (&i.base_reg->reg_type,
7850 &reg16_inoutportreg)
7851 && i.index_reg == 0
7852 && i.log2_scale_factor == 0
7853 && i.seg[i.mem_operands] == 0
7854 && !operand_type_check (i.types[this_operand], disp))
7855 {
7856 i.types[this_operand] = inoutportreg;
7857 return 1;
7858 }
7859
7860 if (i386_index_check (operand_string) == 0)
7861 return 0;
7862 i.types[this_operand].bitfield.mem = 1;
7863 i.mem_operands++;
7864 }
7865 else
7866 {
7867 /* It's not a memory operand; argh! */
7868 as_bad (_("invalid char %s beginning operand %d `%s'"),
7869 output_invalid (*op_string),
7870 this_operand + 1,
7871 op_string);
7872 return 0;
7873 }
7874 return 1; /* Normal return. */
7875 }
7876 \f
7877 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7878 that an rs_machine_dependent frag may reach. */
7879
7880 unsigned int
7881 i386_frag_max_var (fragS *frag)
7882 {
7883 /* The only relaxable frags are for jumps.
7884 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7885 gas_assert (frag->fr_type == rs_machine_dependent);
7886 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7887 }
7888
7889 /* md_estimate_size_before_relax()
7890
7891 Called just before relax() for rs_machine_dependent frags. The x86
7892 assembler uses these frags to handle variable size jump
7893 instructions.
7894
7895 Any symbol that is now undefined will not become defined.
7896 Return the correct fr_subtype in the frag.
7897 Return the initial "guess for variable size of frag" to caller.
7898 The guess is actually the growth beyond the fixed part. Whatever
7899 we do to grow the fixed or variable part contributes to our
7900 returned value. */
7901
7902 int
7903 md_estimate_size_before_relax (fragS *fragP, segT segment)
7904 {
7905 /* We've already got fragP->fr_subtype right; all we have to do is
7906 check for un-relaxable symbols. On an ELF system, we can't relax
7907 an externally visible symbol, because it may be overridden by a
7908 shared library. */
7909 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7910 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7911 || (IS_ELF
7912 && (S_IS_EXTERNAL (fragP->fr_symbol)
7913 || S_IS_WEAK (fragP->fr_symbol)
7914 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7915 & BSF_GNU_INDIRECT_FUNCTION))))
7916 #endif
7917 #if defined (OBJ_COFF) && defined (TE_PE)
7918 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7919 && S_IS_WEAK (fragP->fr_symbol))
7920 #endif
7921 )
7922 {
7923 /* Symbol is undefined in this segment, or we need to keep a
7924 reloc so that weak symbols can be overridden. */
7925 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7926 enum bfd_reloc_code_real reloc_type;
7927 unsigned char *opcode;
7928 int old_fr_fix;
7929
7930 if (fragP->fr_var != NO_RELOC)
7931 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7932 else if (size == 2)
7933 reloc_type = BFD_RELOC_16_PCREL;
7934 else
7935 reloc_type = BFD_RELOC_32_PCREL;
7936
7937 old_fr_fix = fragP->fr_fix;
7938 opcode = (unsigned char *) fragP->fr_opcode;
7939
7940 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7941 {
7942 case UNCOND_JUMP:
7943 /* Make jmp (0xeb) a (d)word displacement jump. */
7944 opcode[0] = 0xe9;
7945 fragP->fr_fix += size;
7946 fix_new (fragP, old_fr_fix, size,
7947 fragP->fr_symbol,
7948 fragP->fr_offset, 1,
7949 reloc_type);
7950 break;
7951
7952 case COND_JUMP86:
7953 if (size == 2
7954 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7955 {
7956 /* Negate the condition, and branch past an
7957 unconditional jump. */
7958 opcode[0] ^= 1;
7959 opcode[1] = 3;
7960 /* Insert an unconditional jump. */
7961 opcode[2] = 0xe9;
7962 /* We added two extra opcode bytes, and have a two byte
7963 offset. */
7964 fragP->fr_fix += 2 + 2;
7965 fix_new (fragP, old_fr_fix + 2, 2,
7966 fragP->fr_symbol,
7967 fragP->fr_offset, 1,
7968 reloc_type);
7969 break;
7970 }
7971 /* Fall through. */
7972
7973 case COND_JUMP:
7974 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7975 {
7976 fixS *fixP;
7977
7978 fragP->fr_fix += 1;
7979 fixP = fix_new (fragP, old_fr_fix, 1,
7980 fragP->fr_symbol,
7981 fragP->fr_offset, 1,
7982 BFD_RELOC_8_PCREL);
7983 fixP->fx_signed = 1;
7984 break;
7985 }
7986
7987 /* This changes the byte-displacement jump 0x7N
7988 to the (d)word-displacement jump 0x0f,0x8N. */
7989 opcode[1] = opcode[0] + 0x10;
7990 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7991 /* We've added an opcode byte. */
7992 fragP->fr_fix += 1 + size;
7993 fix_new (fragP, old_fr_fix + 1, size,
7994 fragP->fr_symbol,
7995 fragP->fr_offset, 1,
7996 reloc_type);
7997 break;
7998
7999 default:
8000 BAD_CASE (fragP->fr_subtype);
8001 break;
8002 }
8003 frag_wane (fragP);
8004 return fragP->fr_fix - old_fr_fix;
8005 }
8006
8007 /* Guess size depending on current relax state. Initially the relax
8008 state will correspond to a short jump and we return 1, because
8009 the variable part of the frag (the branch offset) is one byte
8010 long. However, we can relax a section more than once and in that
8011 case we must either set fr_subtype back to the unrelaxed state,
8012 or return the value for the appropriate branch. */
8013 return md_relax_table[fragP->fr_subtype].rlx_length;
8014 }
8015
8016 /* Called after relax() is finished.
8017
8018 In: Address of frag.
8019 fr_type == rs_machine_dependent.
8020 fr_subtype is what the address relaxed to.
8021
8022 Out: Any fixSs and constants are set up.
8023 Caller will turn frag into a ".space 0". */
8024
8025 void
8026 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8027 fragS *fragP)
8028 {
8029 unsigned char *opcode;
8030 unsigned char *where_to_put_displacement = NULL;
8031 offsetT target_address;
8032 offsetT opcode_address;
8033 unsigned int extension = 0;
8034 offsetT displacement_from_opcode_start;
8035
8036 opcode = (unsigned char *) fragP->fr_opcode;
8037
8038 /* Address we want to reach in file space. */
8039 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8040
8041 /* Address opcode resides at in file space. */
8042 opcode_address = fragP->fr_address + fragP->fr_fix;
8043
8044 /* Displacement from opcode start to fill into instruction. */
8045 displacement_from_opcode_start = target_address - opcode_address;
8046
8047 if ((fragP->fr_subtype & BIG) == 0)
8048 {
8049 /* Don't have to change opcode. */
8050 extension = 1; /* 1 opcode + 1 displacement */
8051 where_to_put_displacement = &opcode[1];
8052 }
8053 else
8054 {
8055 if (no_cond_jump_promotion
8056 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8057 as_warn_where (fragP->fr_file, fragP->fr_line,
8058 _("long jump required"));
8059
8060 switch (fragP->fr_subtype)
8061 {
8062 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8063 extension = 4; /* 1 opcode + 4 displacement */
8064 opcode[0] = 0xe9;
8065 where_to_put_displacement = &opcode[1];
8066 break;
8067
8068 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8069 extension = 2; /* 1 opcode + 2 displacement */
8070 opcode[0] = 0xe9;
8071 where_to_put_displacement = &opcode[1];
8072 break;
8073
8074 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8075 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8076 extension = 5; /* 2 opcode + 4 displacement */
8077 opcode[1] = opcode[0] + 0x10;
8078 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8079 where_to_put_displacement = &opcode[2];
8080 break;
8081
8082 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8083 extension = 3; /* 2 opcode + 2 displacement */
8084 opcode[1] = opcode[0] + 0x10;
8085 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8086 where_to_put_displacement = &opcode[2];
8087 break;
8088
8089 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8090 extension = 4;
8091 opcode[0] ^= 1;
8092 opcode[1] = 3;
8093 opcode[2] = 0xe9;
8094 where_to_put_displacement = &opcode[3];
8095 break;
8096
8097 default:
8098 BAD_CASE (fragP->fr_subtype);
8099 break;
8100 }
8101 }
8102
8103 /* If size if less then four we are sure that the operand fits,
8104 but if it's 4, then it could be that the displacement is larger
8105 then -/+ 2GB. */
8106 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8107 && object_64bit
8108 && ((addressT) (displacement_from_opcode_start - extension
8109 + ((addressT) 1 << 31))
8110 > (((addressT) 2 << 31) - 1)))
8111 {
8112 as_bad_where (fragP->fr_file, fragP->fr_line,
8113 _("jump target out of range"));
8114 /* Make us emit 0. */
8115 displacement_from_opcode_start = extension;
8116 }
8117 /* Now put displacement after opcode. */
8118 md_number_to_chars ((char *) where_to_put_displacement,
8119 (valueT) (displacement_from_opcode_start - extension),
8120 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8121 fragP->fr_fix += extension;
8122 }
8123 \f
8124 /* Apply a fixup (fixP) to segment data, once it has been determined
8125 by our caller that we have all the info we need to fix it up.
8126
8127 Parameter valP is the pointer to the value of the bits.
8128
8129 On the 386, immediates, displacements, and data pointers are all in
8130 the same (little-endian) format, so we don't need to care about which
8131 we are handling. */
8132
8133 void
8134 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8135 {
8136 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8137 valueT value = *valP;
8138
8139 #if !defined (TE_Mach)
8140 if (fixP->fx_pcrel)
8141 {
8142 switch (fixP->fx_r_type)
8143 {
8144 default:
8145 break;
8146
8147 case BFD_RELOC_64:
8148 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8149 break;
8150 case BFD_RELOC_32:
8151 case BFD_RELOC_X86_64_32S:
8152 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8153 break;
8154 case BFD_RELOC_16:
8155 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8156 break;
8157 case BFD_RELOC_8:
8158 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8159 break;
8160 }
8161 }
8162
8163 if (fixP->fx_addsy != NULL
8164 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8165 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8166 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8167 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8168 && !use_rela_relocations)
8169 {
8170 /* This is a hack. There should be a better way to handle this.
8171 This covers for the fact that bfd_install_relocation will
8172 subtract the current location (for partial_inplace, PC relative
8173 relocations); see more below. */
8174 #ifndef OBJ_AOUT
8175 if (IS_ELF
8176 #ifdef TE_PE
8177 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8178 #endif
8179 )
8180 value += fixP->fx_where + fixP->fx_frag->fr_address;
8181 #endif
8182 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8183 if (IS_ELF)
8184 {
8185 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8186
8187 if ((sym_seg == seg
8188 || (symbol_section_p (fixP->fx_addsy)
8189 && sym_seg != absolute_section))
8190 && !generic_force_reloc (fixP))
8191 {
8192 /* Yes, we add the values in twice. This is because
8193 bfd_install_relocation subtracts them out again. I think
8194 bfd_install_relocation is broken, but I don't dare change
8195 it. FIXME. */
8196 value += fixP->fx_where + fixP->fx_frag->fr_address;
8197 }
8198 }
8199 #endif
8200 #if defined (OBJ_COFF) && defined (TE_PE)
8201 /* For some reason, the PE format does not store a
8202 section address offset for a PC relative symbol. */
8203 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8204 || S_IS_WEAK (fixP->fx_addsy))
8205 value += md_pcrel_from (fixP);
8206 #endif
8207 }
8208 #if defined (OBJ_COFF) && defined (TE_PE)
8209 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8210 {
8211 value -= S_GET_VALUE (fixP->fx_addsy);
8212 }
8213 #endif
8214
8215 /* Fix a few things - the dynamic linker expects certain values here,
8216 and we must not disappoint it. */
8217 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8218 if (IS_ELF && fixP->fx_addsy)
8219 switch (fixP->fx_r_type)
8220 {
8221 case BFD_RELOC_386_PLT32:
8222 case BFD_RELOC_X86_64_PLT32:
8223 /* Make the jump instruction point to the address of the operand. At
8224 runtime we merely add the offset to the actual PLT entry. */
8225 value = -4;
8226 break;
8227
8228 case BFD_RELOC_386_TLS_GD:
8229 case BFD_RELOC_386_TLS_LDM:
8230 case BFD_RELOC_386_TLS_IE_32:
8231 case BFD_RELOC_386_TLS_IE:
8232 case BFD_RELOC_386_TLS_GOTIE:
8233 case BFD_RELOC_386_TLS_GOTDESC:
8234 case BFD_RELOC_X86_64_TLSGD:
8235 case BFD_RELOC_X86_64_TLSLD:
8236 case BFD_RELOC_X86_64_GOTTPOFF:
8237 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8238 value = 0; /* Fully resolved at runtime. No addend. */
8239 /* Fallthrough */
8240 case BFD_RELOC_386_TLS_LE:
8241 case BFD_RELOC_386_TLS_LDO_32:
8242 case BFD_RELOC_386_TLS_LE_32:
8243 case BFD_RELOC_X86_64_DTPOFF32:
8244 case BFD_RELOC_X86_64_DTPOFF64:
8245 case BFD_RELOC_X86_64_TPOFF32:
8246 case BFD_RELOC_X86_64_TPOFF64:
8247 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8248 break;
8249
8250 case BFD_RELOC_386_TLS_DESC_CALL:
8251 case BFD_RELOC_X86_64_TLSDESC_CALL:
8252 value = 0; /* Fully resolved at runtime. No addend. */
8253 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8254 fixP->fx_done = 0;
8255 return;
8256
8257 case BFD_RELOC_386_GOT32:
8258 case BFD_RELOC_X86_64_GOT32:
8259 value = 0; /* Fully resolved at runtime. No addend. */
8260 break;
8261
8262 case BFD_RELOC_VTABLE_INHERIT:
8263 case BFD_RELOC_VTABLE_ENTRY:
8264 fixP->fx_done = 0;
8265 return;
8266
8267 default:
8268 break;
8269 }
8270 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8271 *valP = value;
8272 #endif /* !defined (TE_Mach) */
8273
8274 /* Are we finished with this relocation now? */
8275 if (fixP->fx_addsy == NULL)
8276 fixP->fx_done = 1;
8277 #if defined (OBJ_COFF) && defined (TE_PE)
8278 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8279 {
8280 fixP->fx_done = 0;
8281 /* Remember value for tc_gen_reloc. */
8282 fixP->fx_addnumber = value;
8283 /* Clear out the frag for now. */
8284 value = 0;
8285 }
8286 #endif
8287 else if (use_rela_relocations)
8288 {
8289 fixP->fx_no_overflow = 1;
8290 /* Remember value for tc_gen_reloc. */
8291 fixP->fx_addnumber = value;
8292 value = 0;
8293 }
8294
8295 md_number_to_chars (p, value, fixP->fx_size);
8296 }
8297 \f
8298 char *
8299 md_atof (int type, char *litP, int *sizeP)
8300 {
8301 /* This outputs the LITTLENUMs in REVERSE order;
8302 in accord with the bigendian 386. */
8303 return ieee_md_atof (type, litP, sizeP, FALSE);
8304 }
8305 \f
8306 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8307
8308 static char *
8309 output_invalid (int c)
8310 {
8311 if (ISPRINT (c))
8312 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8313 "'%c'", c);
8314 else
8315 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8316 "(0x%x)", (unsigned char) c);
8317 return output_invalid_buf;
8318 }
8319
8320 /* REG_STRING starts *before* REGISTER_PREFIX. */
8321
8322 static const reg_entry *
8323 parse_real_register (char *reg_string, char **end_op)
8324 {
8325 char *s = reg_string;
8326 char *p;
8327 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8328 const reg_entry *r;
8329
8330 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8331 if (*s == REGISTER_PREFIX)
8332 ++s;
8333
8334 if (is_space_char (*s))
8335 ++s;
8336
8337 p = reg_name_given;
8338 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8339 {
8340 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8341 return (const reg_entry *) NULL;
8342 s++;
8343 }
8344
8345 /* For naked regs, make sure that we are not dealing with an identifier.
8346 This prevents confusing an identifier like `eax_var' with register
8347 `eax'. */
8348 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8349 return (const reg_entry *) NULL;
8350
8351 *end_op = s;
8352
8353 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8354
8355 /* Handle floating point regs, allowing spaces in the (i) part. */
8356 if (r == i386_regtab /* %st is first entry of table */)
8357 {
8358 if (is_space_char (*s))
8359 ++s;
8360 if (*s == '(')
8361 {
8362 ++s;
8363 if (is_space_char (*s))
8364 ++s;
8365 if (*s >= '0' && *s <= '7')
8366 {
8367 int fpr = *s - '0';
8368 ++s;
8369 if (is_space_char (*s))
8370 ++s;
8371 if (*s == ')')
8372 {
8373 *end_op = s + 1;
8374 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8375 know (r);
8376 return r + fpr;
8377 }
8378 }
8379 /* We have "%st(" then garbage. */
8380 return (const reg_entry *) NULL;
8381 }
8382 }
8383
8384 if (r == NULL || allow_pseudo_reg)
8385 return r;
8386
8387 if (operand_type_all_zero (&r->reg_type))
8388 return (const reg_entry *) NULL;
8389
8390 if ((r->reg_type.bitfield.reg32
8391 || r->reg_type.bitfield.sreg3
8392 || r->reg_type.bitfield.control
8393 || r->reg_type.bitfield.debug
8394 || r->reg_type.bitfield.test)
8395 && !cpu_arch_flags.bitfield.cpui386)
8396 return (const reg_entry *) NULL;
8397
8398 if (r->reg_type.bitfield.floatreg
8399 && !cpu_arch_flags.bitfield.cpu8087
8400 && !cpu_arch_flags.bitfield.cpu287
8401 && !cpu_arch_flags.bitfield.cpu387)
8402 return (const reg_entry *) NULL;
8403
8404 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8405 return (const reg_entry *) NULL;
8406
8407 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8408 return (const reg_entry *) NULL;
8409
8410 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8411 return (const reg_entry *) NULL;
8412
8413 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8414 if (!allow_index_reg
8415 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8416 return (const reg_entry *) NULL;
8417
8418 if (((r->reg_flags & (RegRex64 | RegRex))
8419 || r->reg_type.bitfield.reg64)
8420 && (!cpu_arch_flags.bitfield.cpulm
8421 || !operand_type_equal (&r->reg_type, &control))
8422 && flag_code != CODE_64BIT)
8423 return (const reg_entry *) NULL;
8424
8425 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8426 return (const reg_entry *) NULL;
8427
8428 return r;
8429 }
8430
8431 /* REG_STRING starts *before* REGISTER_PREFIX. */
8432
8433 static const reg_entry *
8434 parse_register (char *reg_string, char **end_op)
8435 {
8436 const reg_entry *r;
8437
8438 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8439 r = parse_real_register (reg_string, end_op);
8440 else
8441 r = NULL;
8442 if (!r)
8443 {
8444 char *save = input_line_pointer;
8445 char c;
8446 symbolS *symbolP;
8447
8448 input_line_pointer = reg_string;
8449 c = get_symbol_end ();
8450 symbolP = symbol_find (reg_string);
8451 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8452 {
8453 const expressionS *e = symbol_get_value_expression (symbolP);
8454
8455 know (e->X_op == O_register);
8456 know (e->X_add_number >= 0
8457 && (valueT) e->X_add_number < i386_regtab_size);
8458 r = i386_regtab + e->X_add_number;
8459 *end_op = input_line_pointer;
8460 }
8461 *input_line_pointer = c;
8462 input_line_pointer = save;
8463 }
8464 return r;
8465 }
8466
8467 int
8468 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8469 {
8470 const reg_entry *r;
8471 char *end = input_line_pointer;
8472
8473 *end = *nextcharP;
8474 r = parse_register (name, &input_line_pointer);
8475 if (r && end <= input_line_pointer)
8476 {
8477 *nextcharP = *input_line_pointer;
8478 *input_line_pointer = 0;
8479 e->X_op = O_register;
8480 e->X_add_number = r - i386_regtab;
8481 return 1;
8482 }
8483 input_line_pointer = end;
8484 *end = 0;
8485 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8486 }
8487
8488 void
8489 md_operand (expressionS *e)
8490 {
8491 char *end;
8492 const reg_entry *r;
8493
8494 switch (*input_line_pointer)
8495 {
8496 case REGISTER_PREFIX:
8497 r = parse_real_register (input_line_pointer, &end);
8498 if (r)
8499 {
8500 e->X_op = O_register;
8501 e->X_add_number = r - i386_regtab;
8502 input_line_pointer = end;
8503 }
8504 break;
8505
8506 case '[':
8507 gas_assert (intel_syntax);
8508 end = input_line_pointer++;
8509 expression (e);
8510 if (*input_line_pointer == ']')
8511 {
8512 ++input_line_pointer;
8513 e->X_op_symbol = make_expr_symbol (e);
8514 e->X_add_symbol = NULL;
8515 e->X_add_number = 0;
8516 e->X_op = O_index;
8517 }
8518 else
8519 {
8520 e->X_op = O_absent;
8521 input_line_pointer = end;
8522 }
8523 break;
8524 }
8525 }
8526
8527 \f
8528 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8529 const char *md_shortopts = "kVQ:sqn";
8530 #else
8531 const char *md_shortopts = "qn";
8532 #endif
8533
8534 #define OPTION_32 (OPTION_MD_BASE + 0)
8535 #define OPTION_64 (OPTION_MD_BASE + 1)
8536 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8537 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8538 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8539 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8540 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8541 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8542 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8543 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8544 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8545 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8546 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8547 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8548 #define OPTION_X32 (OPTION_MD_BASE + 14)
8549
8550 struct option md_longopts[] =
8551 {
8552 {"32", no_argument, NULL, OPTION_32},
8553 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8554 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8555 {"64", no_argument, NULL, OPTION_64},
8556 #endif
8557 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8558 {"x32", no_argument, NULL, OPTION_X32},
8559 #endif
8560 {"divide", no_argument, NULL, OPTION_DIVIDE},
8561 {"march", required_argument, NULL, OPTION_MARCH},
8562 {"mtune", required_argument, NULL, OPTION_MTUNE},
8563 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8564 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8565 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8566 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8567 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8568 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8569 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8570 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8571 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8572 {NULL, no_argument, NULL, 0}
8573 };
8574 size_t md_longopts_size = sizeof (md_longopts);
8575
8576 int
8577 md_parse_option (int c, char *arg)
8578 {
8579 unsigned int j;
8580 char *arch, *next;
8581
8582 switch (c)
8583 {
8584 case 'n':
8585 optimize_align_code = 0;
8586 break;
8587
8588 case 'q':
8589 quiet_warnings = 1;
8590 break;
8591
8592 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8593 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8594 should be emitted or not. FIXME: Not implemented. */
8595 case 'Q':
8596 break;
8597
8598 /* -V: SVR4 argument to print version ID. */
8599 case 'V':
8600 print_version_id ();
8601 break;
8602
8603 /* -k: Ignore for FreeBSD compatibility. */
8604 case 'k':
8605 break;
8606
8607 case 's':
8608 /* -s: On i386 Solaris, this tells the native assembler to use
8609 .stab instead of .stab.excl. We always use .stab anyhow. */
8610 break;
8611 #endif
8612 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8613 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8614 case OPTION_64:
8615 {
8616 const char **list, **l;
8617
8618 list = bfd_target_list ();
8619 for (l = list; *l != NULL; l++)
8620 if (CONST_STRNEQ (*l, "elf64-x86-64")
8621 || strcmp (*l, "coff-x86-64") == 0
8622 || strcmp (*l, "pe-x86-64") == 0
8623 || strcmp (*l, "pei-x86-64") == 0
8624 || strcmp (*l, "mach-o-x86-64") == 0)
8625 {
8626 default_arch = "x86_64";
8627 break;
8628 }
8629 if (*l == NULL)
8630 as_fatal (_("no compiled in support for x86_64"));
8631 free (list);
8632 }
8633 break;
8634 #endif
8635
8636 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8637 case OPTION_X32:
8638 if (IS_ELF)
8639 {
8640 const char **list, **l;
8641
8642 list = bfd_target_list ();
8643 for (l = list; *l != NULL; l++)
8644 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8645 {
8646 default_arch = "x86_64:32";
8647 break;
8648 }
8649 if (*l == NULL)
8650 as_fatal (_("no compiled in support for 32bit x86_64"));
8651 free (list);
8652 }
8653 else
8654 as_fatal (_("32bit x86_64 is only supported for ELF"));
8655 break;
8656 #endif
8657
8658 case OPTION_32:
8659 default_arch = "i386";
8660 break;
8661
8662 case OPTION_DIVIDE:
8663 #ifdef SVR4_COMMENT_CHARS
8664 {
8665 char *n, *t;
8666 const char *s;
8667
8668 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8669 t = n;
8670 for (s = i386_comment_chars; *s != '\0'; s++)
8671 if (*s != '/')
8672 *t++ = *s;
8673 *t = '\0';
8674 i386_comment_chars = n;
8675 }
8676 #endif
8677 break;
8678
8679 case OPTION_MARCH:
8680 arch = xstrdup (arg);
8681 do
8682 {
8683 if (*arch == '.')
8684 as_fatal (_("invalid -march= option: `%s'"), arg);
8685 next = strchr (arch, '+');
8686 if (next)
8687 *next++ = '\0';
8688 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8689 {
8690 if (strcmp (arch, cpu_arch [j].name) == 0)
8691 {
8692 /* Processor. */
8693 if (! cpu_arch[j].flags.bitfield.cpui386)
8694 continue;
8695
8696 cpu_arch_name = cpu_arch[j].name;
8697 cpu_sub_arch_name = NULL;
8698 cpu_arch_flags = cpu_arch[j].flags;
8699 cpu_arch_isa = cpu_arch[j].type;
8700 cpu_arch_isa_flags = cpu_arch[j].flags;
8701 if (!cpu_arch_tune_set)
8702 {
8703 cpu_arch_tune = cpu_arch_isa;
8704 cpu_arch_tune_flags = cpu_arch_isa_flags;
8705 }
8706 break;
8707 }
8708 else if (*cpu_arch [j].name == '.'
8709 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8710 {
8711 /* ISA entension. */
8712 i386_cpu_flags flags;
8713
8714 if (!cpu_arch[j].negated)
8715 flags = cpu_flags_or (cpu_arch_flags,
8716 cpu_arch[j].flags);
8717 else
8718 flags = cpu_flags_and_not (cpu_arch_flags,
8719 cpu_arch[j].flags);
8720 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8721 {
8722 if (cpu_sub_arch_name)
8723 {
8724 char *name = cpu_sub_arch_name;
8725 cpu_sub_arch_name = concat (name,
8726 cpu_arch[j].name,
8727 (const char *) NULL);
8728 free (name);
8729 }
8730 else
8731 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8732 cpu_arch_flags = flags;
8733 cpu_arch_isa_flags = flags;
8734 }
8735 break;
8736 }
8737 }
8738
8739 if (j >= ARRAY_SIZE (cpu_arch))
8740 as_fatal (_("invalid -march= option: `%s'"), arg);
8741
8742 arch = next;
8743 }
8744 while (next != NULL );
8745 break;
8746
8747 case OPTION_MTUNE:
8748 if (*arg == '.')
8749 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8750 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8751 {
8752 if (strcmp (arg, cpu_arch [j].name) == 0)
8753 {
8754 cpu_arch_tune_set = 1;
8755 cpu_arch_tune = cpu_arch [j].type;
8756 cpu_arch_tune_flags = cpu_arch[j].flags;
8757 break;
8758 }
8759 }
8760 if (j >= ARRAY_SIZE (cpu_arch))
8761 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8762 break;
8763
8764 case OPTION_MMNEMONIC:
8765 if (strcasecmp (arg, "att") == 0)
8766 intel_mnemonic = 0;
8767 else if (strcasecmp (arg, "intel") == 0)
8768 intel_mnemonic = 1;
8769 else
8770 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8771 break;
8772
8773 case OPTION_MSYNTAX:
8774 if (strcasecmp (arg, "att") == 0)
8775 intel_syntax = 0;
8776 else if (strcasecmp (arg, "intel") == 0)
8777 intel_syntax = 1;
8778 else
8779 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8780 break;
8781
8782 case OPTION_MINDEX_REG:
8783 allow_index_reg = 1;
8784 break;
8785
8786 case OPTION_MNAKED_REG:
8787 allow_naked_reg = 1;
8788 break;
8789
8790 case OPTION_MOLD_GCC:
8791 old_gcc = 1;
8792 break;
8793
8794 case OPTION_MSSE2AVX:
8795 sse2avx = 1;
8796 break;
8797
8798 case OPTION_MSSE_CHECK:
8799 if (strcasecmp (arg, "error") == 0)
8800 sse_check = check_error;
8801 else if (strcasecmp (arg, "warning") == 0)
8802 sse_check = check_warning;
8803 else if (strcasecmp (arg, "none") == 0)
8804 sse_check = check_none;
8805 else
8806 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8807 break;
8808
8809 case OPTION_MOPERAND_CHECK:
8810 if (strcasecmp (arg, "error") == 0)
8811 operand_check = check_error;
8812 else if (strcasecmp (arg, "warning") == 0)
8813 operand_check = check_warning;
8814 else if (strcasecmp (arg, "none") == 0)
8815 operand_check = check_none;
8816 else
8817 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8818 break;
8819
8820 case OPTION_MAVXSCALAR:
8821 if (strcasecmp (arg, "128") == 0)
8822 avxscalar = vex128;
8823 else if (strcasecmp (arg, "256") == 0)
8824 avxscalar = vex256;
8825 else
8826 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8827 break;
8828
8829 default:
8830 return 0;
8831 }
8832 return 1;
8833 }
8834
8835 #define MESSAGE_TEMPLATE \
8836 " "
8837
8838 static void
8839 show_arch (FILE *stream, int ext, int check)
8840 {
8841 static char message[] = MESSAGE_TEMPLATE;
8842 char *start = message + 27;
8843 char *p;
8844 int size = sizeof (MESSAGE_TEMPLATE);
8845 int left;
8846 const char *name;
8847 int len;
8848 unsigned int j;
8849
8850 p = start;
8851 left = size - (start - message);
8852 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8853 {
8854 /* Should it be skipped? */
8855 if (cpu_arch [j].skip)
8856 continue;
8857
8858 name = cpu_arch [j].name;
8859 len = cpu_arch [j].len;
8860 if (*name == '.')
8861 {
8862 /* It is an extension. Skip if we aren't asked to show it. */
8863 if (ext)
8864 {
8865 name++;
8866 len--;
8867 }
8868 else
8869 continue;
8870 }
8871 else if (ext)
8872 {
8873 /* It is an processor. Skip if we show only extension. */
8874 continue;
8875 }
8876 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8877 {
8878 /* It is an impossible processor - skip. */
8879 continue;
8880 }
8881
8882 /* Reserve 2 spaces for ", " or ",\0" */
8883 left -= len + 2;
8884
8885 /* Check if there is any room. */
8886 if (left >= 0)
8887 {
8888 if (p != start)
8889 {
8890 *p++ = ',';
8891 *p++ = ' ';
8892 }
8893 p = mempcpy (p, name, len);
8894 }
8895 else
8896 {
8897 /* Output the current message now and start a new one. */
8898 *p++ = ',';
8899 *p = '\0';
8900 fprintf (stream, "%s\n", message);
8901 p = start;
8902 left = size - (start - message) - len - 2;
8903
8904 gas_assert (left >= 0);
8905
8906 p = mempcpy (p, name, len);
8907 }
8908 }
8909
8910 *p = '\0';
8911 fprintf (stream, "%s\n", message);
8912 }
8913
8914 void
8915 md_show_usage (FILE *stream)
8916 {
8917 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8918 fprintf (stream, _("\
8919 -Q ignored\n\
8920 -V print assembler version number\n\
8921 -k ignored\n"));
8922 #endif
8923 fprintf (stream, _("\
8924 -n Do not optimize code alignment\n\
8925 -q quieten some warnings\n"));
8926 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8927 fprintf (stream, _("\
8928 -s ignored\n"));
8929 #endif
8930 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8931 || defined (TE_PE) || defined (TE_PEP))
8932 fprintf (stream, _("\
8933 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8934 #endif
8935 #ifdef SVR4_COMMENT_CHARS
8936 fprintf (stream, _("\
8937 --divide do not treat `/' as a comment character\n"));
8938 #else
8939 fprintf (stream, _("\
8940 --divide ignored\n"));
8941 #endif
8942 fprintf (stream, _("\
8943 -march=CPU[,+EXTENSION...]\n\
8944 generate code for CPU and EXTENSION, CPU is one of:\n"));
8945 show_arch (stream, 0, 1);
8946 fprintf (stream, _("\
8947 EXTENSION is combination of:\n"));
8948 show_arch (stream, 1, 0);
8949 fprintf (stream, _("\
8950 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8951 show_arch (stream, 0, 0);
8952 fprintf (stream, _("\
8953 -msse2avx encode SSE instructions with VEX prefix\n"));
8954 fprintf (stream, _("\
8955 -msse-check=[none|error|warning]\n\
8956 check SSE instructions\n"));
8957 fprintf (stream, _("\
8958 -moperand-check=[none|error|warning]\n\
8959 check operand combinations for validity\n"));
8960 fprintf (stream, _("\
8961 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8962 length\n"));
8963 fprintf (stream, _("\
8964 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8965 fprintf (stream, _("\
8966 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8967 fprintf (stream, _("\
8968 -mindex-reg support pseudo index registers\n"));
8969 fprintf (stream, _("\
8970 -mnaked-reg don't require `%%' prefix for registers\n"));
8971 fprintf (stream, _("\
8972 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8973 }
8974
8975 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8976 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8977 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8978
8979 /* Pick the target format to use. */
8980
8981 const char *
8982 i386_target_format (void)
8983 {
8984 if (!strncmp (default_arch, "x86_64", 6))
8985 {
8986 update_code_flag (CODE_64BIT, 1);
8987 if (default_arch[6] == '\0')
8988 x86_elf_abi = X86_64_ABI;
8989 else
8990 x86_elf_abi = X86_64_X32_ABI;
8991 }
8992 else if (!strcmp (default_arch, "i386"))
8993 update_code_flag (CODE_32BIT, 1);
8994 else
8995 as_fatal (_("unknown architecture"));
8996
8997 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8998 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8999 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9000 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9001
9002 switch (OUTPUT_FLAVOR)
9003 {
9004 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9005 case bfd_target_aout_flavour:
9006 return AOUT_TARGET_FORMAT;
9007 #endif
9008 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9009 # if defined (TE_PE) || defined (TE_PEP)
9010 case bfd_target_coff_flavour:
9011 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9012 # elif defined (TE_GO32)
9013 case bfd_target_coff_flavour:
9014 return "coff-go32";
9015 # else
9016 case bfd_target_coff_flavour:
9017 return "coff-i386";
9018 # endif
9019 #endif
9020 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9021 case bfd_target_elf_flavour:
9022 {
9023 const char *format;
9024
9025 switch (x86_elf_abi)
9026 {
9027 default:
9028 format = ELF_TARGET_FORMAT;
9029 break;
9030 case X86_64_ABI:
9031 use_rela_relocations = 1;
9032 object_64bit = 1;
9033 format = ELF_TARGET_FORMAT64;
9034 break;
9035 case X86_64_X32_ABI:
9036 use_rela_relocations = 1;
9037 object_64bit = 1;
9038 disallow_64bit_reloc = 1;
9039 format = ELF_TARGET_FORMAT32;
9040 break;
9041 }
9042 if (cpu_arch_isa == PROCESSOR_L1OM)
9043 {
9044 if (x86_elf_abi != X86_64_ABI)
9045 as_fatal (_("Intel L1OM is 64bit only"));
9046 return ELF_TARGET_L1OM_FORMAT;
9047 }
9048 if (cpu_arch_isa == PROCESSOR_K1OM)
9049 {
9050 if (x86_elf_abi != X86_64_ABI)
9051 as_fatal (_("Intel K1OM is 64bit only"));
9052 return ELF_TARGET_K1OM_FORMAT;
9053 }
9054 else
9055 return format;
9056 }
9057 #endif
9058 #if defined (OBJ_MACH_O)
9059 case bfd_target_mach_o_flavour:
9060 if (flag_code == CODE_64BIT)
9061 {
9062 use_rela_relocations = 1;
9063 object_64bit = 1;
9064 return "mach-o-x86-64";
9065 }
9066 else
9067 return "mach-o-i386";
9068 #endif
9069 default:
9070 abort ();
9071 return NULL;
9072 }
9073 }
9074
9075 #endif /* OBJ_MAYBE_ more than one */
9076
9077 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9078 void
9079 i386_elf_emit_arch_note (void)
9080 {
9081 if (IS_ELF && cpu_arch_name != NULL)
9082 {
9083 char *p;
9084 asection *seg = now_seg;
9085 subsegT subseg = now_subseg;
9086 Elf_Internal_Note i_note;
9087 Elf_External_Note e_note;
9088 asection *note_secp;
9089 int len;
9090
9091 /* Create the .note section. */
9092 note_secp = subseg_new (".note", 0);
9093 bfd_set_section_flags (stdoutput,
9094 note_secp,
9095 SEC_HAS_CONTENTS | SEC_READONLY);
9096
9097 /* Process the arch string. */
9098 len = strlen (cpu_arch_name);
9099
9100 i_note.namesz = len + 1;
9101 i_note.descsz = 0;
9102 i_note.type = NT_ARCH;
9103 p = frag_more (sizeof (e_note.namesz));
9104 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9105 p = frag_more (sizeof (e_note.descsz));
9106 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9107 p = frag_more (sizeof (e_note.type));
9108 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9109 p = frag_more (len + 1);
9110 strcpy (p, cpu_arch_name);
9111
9112 frag_align (2, 0, 0);
9113
9114 subseg_set (seg, subseg);
9115 }
9116 }
9117 #endif
9118 \f
9119 symbolS *
9120 md_undefined_symbol (char *name)
9121 {
9122 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9123 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9124 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9125 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9126 {
9127 if (!GOT_symbol)
9128 {
9129 if (symbol_find (name))
9130 as_bad (_("GOT already in symbol table"));
9131 GOT_symbol = symbol_new (name, undefined_section,
9132 (valueT) 0, &zero_address_frag);
9133 };
9134 return GOT_symbol;
9135 }
9136 return 0;
9137 }
9138
9139 /* Round up a section size to the appropriate boundary. */
9140
9141 valueT
9142 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9143 {
9144 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9145 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9146 {
9147 /* For a.out, force the section size to be aligned. If we don't do
9148 this, BFD will align it for us, but it will not write out the
9149 final bytes of the section. This may be a bug in BFD, but it is
9150 easier to fix it here since that is how the other a.out targets
9151 work. */
9152 int align;
9153
9154 align = bfd_get_section_alignment (stdoutput, segment);
9155 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9156 }
9157 #endif
9158
9159 return size;
9160 }
9161
9162 /* On the i386, PC-relative offsets are relative to the start of the
9163 next instruction. That is, the address of the offset, plus its
9164 size, since the offset is always the last part of the insn. */
9165
9166 long
9167 md_pcrel_from (fixS *fixP)
9168 {
9169 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9170 }
9171
9172 #ifndef I386COFF
9173
9174 static void
9175 s_bss (int ignore ATTRIBUTE_UNUSED)
9176 {
9177 int temp;
9178
9179 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9180 if (IS_ELF)
9181 obj_elf_section_change_hook ();
9182 #endif
9183 temp = get_absolute_expression ();
9184 subseg_set (bss_section, (subsegT) temp);
9185 demand_empty_rest_of_line ();
9186 }
9187
9188 #endif
9189
9190 void
9191 i386_validate_fix (fixS *fixp)
9192 {
9193 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9194 {
9195 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9196 {
9197 if (!object_64bit)
9198 abort ();
9199 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9200 }
9201 else
9202 {
9203 if (!object_64bit)
9204 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9205 else
9206 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9207 }
9208 fixp->fx_subsy = 0;
9209 }
9210 }
9211
9212 arelent *
9213 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9214 {
9215 arelent *rel;
9216 bfd_reloc_code_real_type code;
9217
9218 switch (fixp->fx_r_type)
9219 {
9220 case BFD_RELOC_X86_64_PLT32:
9221 case BFD_RELOC_X86_64_GOT32:
9222 case BFD_RELOC_X86_64_GOTPCREL:
9223 case BFD_RELOC_386_PLT32:
9224 case BFD_RELOC_386_GOT32:
9225 case BFD_RELOC_386_GOTOFF:
9226 case BFD_RELOC_386_GOTPC:
9227 case BFD_RELOC_386_TLS_GD:
9228 case BFD_RELOC_386_TLS_LDM:
9229 case BFD_RELOC_386_TLS_LDO_32:
9230 case BFD_RELOC_386_TLS_IE_32:
9231 case BFD_RELOC_386_TLS_IE:
9232 case BFD_RELOC_386_TLS_GOTIE:
9233 case BFD_RELOC_386_TLS_LE_32:
9234 case BFD_RELOC_386_TLS_LE:
9235 case BFD_RELOC_386_TLS_GOTDESC:
9236 case BFD_RELOC_386_TLS_DESC_CALL:
9237 case BFD_RELOC_X86_64_TLSGD:
9238 case BFD_RELOC_X86_64_TLSLD:
9239 case BFD_RELOC_X86_64_DTPOFF32:
9240 case BFD_RELOC_X86_64_DTPOFF64:
9241 case BFD_RELOC_X86_64_GOTTPOFF:
9242 case BFD_RELOC_X86_64_TPOFF32:
9243 case BFD_RELOC_X86_64_TPOFF64:
9244 case BFD_RELOC_X86_64_GOTOFF64:
9245 case BFD_RELOC_X86_64_GOTPC32:
9246 case BFD_RELOC_X86_64_GOT64:
9247 case BFD_RELOC_X86_64_GOTPCREL64:
9248 case BFD_RELOC_X86_64_GOTPC64:
9249 case BFD_RELOC_X86_64_GOTPLT64:
9250 case BFD_RELOC_X86_64_PLTOFF64:
9251 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9252 case BFD_RELOC_X86_64_TLSDESC_CALL:
9253 case BFD_RELOC_RVA:
9254 case BFD_RELOC_VTABLE_ENTRY:
9255 case BFD_RELOC_VTABLE_INHERIT:
9256 #ifdef TE_PE
9257 case BFD_RELOC_32_SECREL:
9258 #endif
9259 code = fixp->fx_r_type;
9260 break;
9261 case BFD_RELOC_X86_64_32S:
9262 if (!fixp->fx_pcrel)
9263 {
9264 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9265 code = fixp->fx_r_type;
9266 break;
9267 }
9268 default:
9269 if (fixp->fx_pcrel)
9270 {
9271 switch (fixp->fx_size)
9272 {
9273 default:
9274 as_bad_where (fixp->fx_file, fixp->fx_line,
9275 _("can not do %d byte pc-relative relocation"),
9276 fixp->fx_size);
9277 code = BFD_RELOC_32_PCREL;
9278 break;
9279 case 1: code = BFD_RELOC_8_PCREL; break;
9280 case 2: code = BFD_RELOC_16_PCREL; break;
9281 case 4: code = BFD_RELOC_32_PCREL; break;
9282 #ifdef BFD64
9283 case 8: code = BFD_RELOC_64_PCREL; break;
9284 #endif
9285 }
9286 }
9287 else
9288 {
9289 switch (fixp->fx_size)
9290 {
9291 default:
9292 as_bad_where (fixp->fx_file, fixp->fx_line,
9293 _("can not do %d byte relocation"),
9294 fixp->fx_size);
9295 code = BFD_RELOC_32;
9296 break;
9297 case 1: code = BFD_RELOC_8; break;
9298 case 2: code = BFD_RELOC_16; break;
9299 case 4: code = BFD_RELOC_32; break;
9300 #ifdef BFD64
9301 case 8: code = BFD_RELOC_64; break;
9302 #endif
9303 }
9304 }
9305 break;
9306 }
9307
9308 if ((code == BFD_RELOC_32
9309 || code == BFD_RELOC_32_PCREL
9310 || code == BFD_RELOC_X86_64_32S)
9311 && GOT_symbol
9312 && fixp->fx_addsy == GOT_symbol)
9313 {
9314 if (!object_64bit)
9315 code = BFD_RELOC_386_GOTPC;
9316 else
9317 code = BFD_RELOC_X86_64_GOTPC32;
9318 }
9319 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9320 && GOT_symbol
9321 && fixp->fx_addsy == GOT_symbol)
9322 {
9323 code = BFD_RELOC_X86_64_GOTPC64;
9324 }
9325
9326 rel = (arelent *) xmalloc (sizeof (arelent));
9327 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9328 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9329
9330 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9331
9332 if (!use_rela_relocations)
9333 {
9334 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9335 vtable entry to be used in the relocation's section offset. */
9336 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9337 rel->address = fixp->fx_offset;
9338 #if defined (OBJ_COFF) && defined (TE_PE)
9339 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9340 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9341 else
9342 #endif
9343 rel->addend = 0;
9344 }
9345 /* Use the rela in 64bit mode. */
9346 else
9347 {
9348 if (disallow_64bit_reloc)
9349 switch (code)
9350 {
9351 case BFD_RELOC_X86_64_DTPOFF64:
9352 case BFD_RELOC_X86_64_TPOFF64:
9353 case BFD_RELOC_64_PCREL:
9354 case BFD_RELOC_X86_64_GOTOFF64:
9355 case BFD_RELOC_X86_64_GOT64:
9356 case BFD_RELOC_X86_64_GOTPCREL64:
9357 case BFD_RELOC_X86_64_GOTPC64:
9358 case BFD_RELOC_X86_64_GOTPLT64:
9359 case BFD_RELOC_X86_64_PLTOFF64:
9360 as_bad_where (fixp->fx_file, fixp->fx_line,
9361 _("cannot represent relocation type %s in x32 mode"),
9362 bfd_get_reloc_code_name (code));
9363 break;
9364 default:
9365 break;
9366 }
9367
9368 if (!fixp->fx_pcrel)
9369 rel->addend = fixp->fx_offset;
9370 else
9371 switch (code)
9372 {
9373 case BFD_RELOC_X86_64_PLT32:
9374 case BFD_RELOC_X86_64_GOT32:
9375 case BFD_RELOC_X86_64_GOTPCREL:
9376 case BFD_RELOC_X86_64_TLSGD:
9377 case BFD_RELOC_X86_64_TLSLD:
9378 case BFD_RELOC_X86_64_GOTTPOFF:
9379 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9380 case BFD_RELOC_X86_64_TLSDESC_CALL:
9381 rel->addend = fixp->fx_offset - fixp->fx_size;
9382 break;
9383 default:
9384 rel->addend = (section->vma
9385 - fixp->fx_size
9386 + fixp->fx_addnumber
9387 + md_pcrel_from (fixp));
9388 break;
9389 }
9390 }
9391
9392 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9393 if (rel->howto == NULL)
9394 {
9395 as_bad_where (fixp->fx_file, fixp->fx_line,
9396 _("cannot represent relocation type %s"),
9397 bfd_get_reloc_code_name (code));
9398 /* Set howto to a garbage value so that we can keep going. */
9399 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9400 gas_assert (rel->howto != NULL);
9401 }
9402
9403 return rel;
9404 }
9405
9406 #include "tc-i386-intel.c"
9407
9408 void
9409 tc_x86_parse_to_dw2regnum (expressionS *exp)
9410 {
9411 int saved_naked_reg;
9412 char saved_register_dot;
9413
9414 saved_naked_reg = allow_naked_reg;
9415 allow_naked_reg = 1;
9416 saved_register_dot = register_chars['.'];
9417 register_chars['.'] = '.';
9418 allow_pseudo_reg = 1;
9419 expression_and_evaluate (exp);
9420 allow_pseudo_reg = 0;
9421 register_chars['.'] = saved_register_dot;
9422 allow_naked_reg = saved_naked_reg;
9423
9424 if (exp->X_op == O_register && exp->X_add_number >= 0)
9425 {
9426 if ((addressT) exp->X_add_number < i386_regtab_size)
9427 {
9428 exp->X_op = O_constant;
9429 exp->X_add_number = i386_regtab[exp->X_add_number]
9430 .dw2_regnum[flag_code >> 1];
9431 }
9432 else
9433 exp->X_op = O_illegal;
9434 }
9435 }
9436
9437 void
9438 tc_x86_frame_initial_instructions (void)
9439 {
9440 static unsigned int sp_regno[2];
9441
9442 if (!sp_regno[flag_code >> 1])
9443 {
9444 char *saved_input = input_line_pointer;
9445 char sp[][4] = {"esp", "rsp"};
9446 expressionS exp;
9447
9448 input_line_pointer = sp[flag_code >> 1];
9449 tc_x86_parse_to_dw2regnum (&exp);
9450 gas_assert (exp.X_op == O_constant);
9451 sp_regno[flag_code >> 1] = exp.X_add_number;
9452 input_line_pointer = saved_input;
9453 }
9454
9455 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9456 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9457 }
9458
9459 int
9460 x86_dwarf2_addr_size (void)
9461 {
9462 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9463 if (x86_elf_abi == X86_64_X32_ABI)
9464 return 4;
9465 #endif
9466 return bfd_arch_bits_per_address (stdoutput) / 8;
9467 }
9468
9469 int
9470 i386_elf_section_type (const char *str, size_t len)
9471 {
9472 if (flag_code == CODE_64BIT
9473 && len == sizeof ("unwind") - 1
9474 && strncmp (str, "unwind", 6) == 0)
9475 return SHT_X86_64_UNWIND;
9476
9477 return -1;
9478 }
9479
9480 #ifdef TE_SOLARIS
9481 void
9482 i386_solaris_fix_up_eh_frame (segT sec)
9483 {
9484 if (flag_code == CODE_64BIT)
9485 elf_section_type (sec) = SHT_X86_64_UNWIND;
9486 }
9487 #endif
9488
9489 #ifdef TE_PE
9490 void
9491 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9492 {
9493 expressionS exp;
9494
9495 exp.X_op = O_secrel;
9496 exp.X_add_symbol = symbol;
9497 exp.X_add_number = 0;
9498 emit_expr (&exp, size);
9499 }
9500 #endif
9501
9502 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9503 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9504
9505 bfd_vma
9506 x86_64_section_letter (int letter, char **ptr_msg)
9507 {
9508 if (flag_code == CODE_64BIT)
9509 {
9510 if (letter == 'l')
9511 return SHF_X86_64_LARGE;
9512
9513 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9514 }
9515 else
9516 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9517 return -1;
9518 }
9519
9520 bfd_vma
9521 x86_64_section_word (char *str, size_t len)
9522 {
9523 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9524 return SHF_X86_64_LARGE;
9525
9526 return -1;
9527 }
9528
9529 static void
9530 handle_large_common (int small ATTRIBUTE_UNUSED)
9531 {
9532 if (flag_code != CODE_64BIT)
9533 {
9534 s_comm_internal (0, elf_common_parse);
9535 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9536 }
9537 else
9538 {
9539 static segT lbss_section;
9540 asection *saved_com_section_ptr = elf_com_section_ptr;
9541 asection *saved_bss_section = bss_section;
9542
9543 if (lbss_section == NULL)
9544 {
9545 flagword applicable;
9546 segT seg = now_seg;
9547 subsegT subseg = now_subseg;
9548
9549 /* The .lbss section is for local .largecomm symbols. */
9550 lbss_section = subseg_new (".lbss", 0);
9551 applicable = bfd_applicable_section_flags (stdoutput);
9552 bfd_set_section_flags (stdoutput, lbss_section,
9553 applicable & SEC_ALLOC);
9554 seg_info (lbss_section)->bss = 1;
9555
9556 subseg_set (seg, subseg);
9557 }
9558
9559 elf_com_section_ptr = &_bfd_elf_large_com_section;
9560 bss_section = lbss_section;
9561
9562 s_comm_internal (0, elf_common_parse);
9563
9564 elf_com_section_ptr = saved_com_section_ptr;
9565 bss_section = saved_bss_section;
9566 }
9567 }
9568 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.221795 seconds and 5 git commands to generate.