Check R_X86_64_32 overflow and allow R_X86_64_64 for x32.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136 }
137 arch_entry;
138
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 #endif
186
187 static const char *default_arch = DEFAULT_ARCH;
188
189 /* VEX prefix. */
190 typedef struct
191 {
192 /* VEX prefix is either 2 byte or 3 byte. */
193 unsigned char bytes[3];
194 unsigned int length;
195 /* Destination or source register specifier. */
196 const reg_entry *register_specifier;
197 } vex_prefix;
198
199 /* 'md_assemble ()' gathers together information and puts it into a
200 i386_insn. */
201
202 union i386_op
203 {
204 expressionS *disps;
205 expressionS *imms;
206 const reg_entry *regs;
207 };
208
209 enum i386_error
210 {
211 operand_size_mismatch,
212 operand_type_mismatch,
213 register_type_mismatch,
214 number_of_operands_mismatch,
215 invalid_instruction_suffix,
216 bad_imm4,
217 old_gcc_only,
218 unsupported_with_intel_mnemonic,
219 unsupported_syntax,
220 unsupported,
221 invalid_vsib_address,
222 unsupported_vector_index_register
223 };
224
225 struct _i386_insn
226 {
227 /* TM holds the template for the insn were currently assembling. */
228 insn_template tm;
229
230 /* SUFFIX holds the instruction size suffix for byte, word, dword
231 or qword, if given. */
232 char suffix;
233
234 /* OPERANDS gives the number of given operands. */
235 unsigned int operands;
236
237 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
238 of given register, displacement, memory operands and immediate
239 operands. */
240 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
241
242 /* TYPES [i] is the type (see above #defines) which tells us how to
243 use OP[i] for the corresponding operand. */
244 i386_operand_type types[MAX_OPERANDS];
245
246 /* Displacement expression, immediate expression, or register for each
247 operand. */
248 union i386_op op[MAX_OPERANDS];
249
250 /* Flags for operands. */
251 unsigned int flags[MAX_OPERANDS];
252 #define Operand_PCrel 1
253
254 /* Relocation type for operand */
255 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
256
257 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
258 the base index byte below. */
259 const reg_entry *base_reg;
260 const reg_entry *index_reg;
261 unsigned int log2_scale_factor;
262
263 /* SEG gives the seg_entries of this insn. They are zero unless
264 explicit segment overrides are given. */
265 const seg_entry *seg[2];
266
267 /* PREFIX holds all the given prefix opcodes (usually null).
268 PREFIXES is the number of prefix opcodes. */
269 unsigned int prefixes;
270 unsigned char prefix[MAX_PREFIXES];
271
272 /* RM and SIB are the modrm byte and the sib byte where the
273 addressing modes of this insn are encoded. */
274 modrm_byte rm;
275 rex_byte rex;
276 sib_byte sib;
277 vex_prefix vex;
278
279 /* Swap operand in encoding. */
280 unsigned int swap_operand;
281
282 /* Force 32bit displacement in encoding. */
283 unsigned int disp32_encoding;
284
285 /* Error message. */
286 enum i386_error error;
287 };
288
289 typedef struct _i386_insn i386_insn;
290
291 /* List of chars besides those in app.c:symbol_chars that can start an
292 operand. Used to prevent the scrubber eating vital white-space. */
293 const char extra_symbol_chars[] = "*%-(["
294 #ifdef LEX_AT
295 "@"
296 #endif
297 #ifdef LEX_QM
298 "?"
299 #endif
300 ;
301
302 #if (defined (TE_I386AIX) \
303 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
304 && !defined (TE_GNU) \
305 && !defined (TE_LINUX) \
306 && !defined (TE_NETWARE) \
307 && !defined (TE_FreeBSD) \
308 && !defined (TE_DragonFly) \
309 && !defined (TE_NetBSD)))
310 /* This array holds the chars that always start a comment. If the
311 pre-processor is disabled, these aren't very useful. The option
312 --divide will remove '/' from this list. */
313 const char *i386_comment_chars = "#/";
314 #define SVR4_COMMENT_CHARS 1
315 #define PREFIX_SEPARATOR '\\'
316
317 #else
318 const char *i386_comment_chars = "#";
319 #define PREFIX_SEPARATOR '/'
320 #endif
321
322 /* This array holds the chars that only start a comment at the beginning of
323 a line. If the line seems to have the form '# 123 filename'
324 .line and .file directives will appear in the pre-processed output.
325 Note that input_file.c hand checks for '#' at the beginning of the
326 first line of the input file. This is because the compiler outputs
327 #NO_APP at the beginning of its output.
328 Also note that comments started like this one will always work if
329 '/' isn't otherwise defined. */
330 const char line_comment_chars[] = "#/";
331
332 const char line_separator_chars[] = ";";
333
334 /* Chars that can be used to separate mant from exp in floating point
335 nums. */
336 const char EXP_CHARS[] = "eE";
337
338 /* Chars that mean this number is a floating point constant
339 As in 0f12.456
340 or 0d1.2345e12. */
341 const char FLT_CHARS[] = "fFdDxX";
342
343 /* Tables for lexical analysis. */
344 static char mnemonic_chars[256];
345 static char register_chars[256];
346 static char operand_chars[256];
347 static char identifier_chars[256];
348 static char digit_chars[256];
349
350 /* Lexical macros. */
351 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
352 #define is_operand_char(x) (operand_chars[(unsigned char) x])
353 #define is_register_char(x) (register_chars[(unsigned char) x])
354 #define is_space_char(x) ((x) == ' ')
355 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
356 #define is_digit_char(x) (digit_chars[(unsigned char) x])
357
358 /* All non-digit non-letter characters that may occur in an operand. */
359 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
360
361 /* md_assemble() always leaves the strings it's passed unaltered. To
362 effect this we maintain a stack of saved characters that we've smashed
363 with '\0's (indicating end of strings for various sub-fields of the
364 assembler instruction). */
365 static char save_stack[32];
366 static char *save_stack_p;
367 #define END_STRING_AND_SAVE(s) \
368 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
369 #define RESTORE_END_STRING(s) \
370 do { *(s) = *--save_stack_p; } while (0)
371
372 /* The instruction we're assembling. */
373 static i386_insn i;
374
375 /* Possible templates for current insn. */
376 static const templates *current_templates;
377
378 /* Per instruction expressionS buffers: max displacements & immediates. */
379 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
380 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
381
382 /* Current operand we are working on. */
383 static int this_operand = -1;
384
385 /* We support four different modes. FLAG_CODE variable is used to distinguish
386 these. */
387
388 enum flag_code {
389 CODE_32BIT,
390 CODE_16BIT,
391 CODE_64BIT };
392
393 static enum flag_code flag_code;
394 static unsigned int object_64bit;
395 static unsigned int disallow_64bit_reloc;
396 static int use_rela_relocations = 0;
397
398 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
399 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
400 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
401
402 /* The ELF ABI to use. */
403 enum x86_elf_abi
404 {
405 I386_ABI,
406 X86_64_ABI,
407 X86_64_X32_ABI
408 };
409
410 static enum x86_elf_abi x86_elf_abi = I386_ABI;
411 #endif
412
413 /* The names used to print error messages. */
414 static const char *flag_code_names[] =
415 {
416 "32",
417 "16",
418 "64"
419 };
420
421 /* 1 for intel syntax,
422 0 if att syntax. */
423 static int intel_syntax = 0;
424
425 /* 1 for intel mnemonic,
426 0 if att mnemonic. */
427 static int intel_mnemonic = !SYSV386_COMPAT;
428
429 /* 1 if support old (<= 2.8.1) versions of gcc. */
430 static int old_gcc = OLDGCC_COMPAT;
431
432 /* 1 if pseudo registers are permitted. */
433 static int allow_pseudo_reg = 0;
434
435 /* 1 if register prefix % not required. */
436 static int allow_naked_reg = 0;
437
438 /* 1 if pseudo index register, eiz/riz, is allowed . */
439 static int allow_index_reg = 0;
440
441 static enum
442 {
443 sse_check_none = 0,
444 sse_check_warning,
445 sse_check_error
446 }
447 sse_check;
448
449 /* Register prefix used for error message. */
450 static const char *register_prefix = "%";
451
452 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
453 leave, push, and pop instructions so that gcc has the same stack
454 frame as in 32 bit mode. */
455 static char stackop_size = '\0';
456
457 /* Non-zero to optimize code alignment. */
458 int optimize_align_code = 1;
459
460 /* Non-zero to quieten some warnings. */
461 static int quiet_warnings = 0;
462
463 /* CPU name. */
464 static const char *cpu_arch_name = NULL;
465 static char *cpu_sub_arch_name = NULL;
466
467 /* CPU feature flags. */
468 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
469
470 /* If we have selected a cpu we are generating instructions for. */
471 static int cpu_arch_tune_set = 0;
472
473 /* Cpu we are generating instructions for. */
474 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
475
476 /* CPU feature flags of cpu we are generating instructions for. */
477 static i386_cpu_flags cpu_arch_tune_flags;
478
479 /* CPU instruction set architecture used. */
480 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
481
482 /* CPU feature flags of instruction set architecture used. */
483 i386_cpu_flags cpu_arch_isa_flags;
484
485 /* If set, conditional jumps are not automatically promoted to handle
486 larger than a byte offset. */
487 static unsigned int no_cond_jump_promotion = 0;
488
489 /* Encode SSE instructions with VEX prefix. */
490 static unsigned int sse2avx;
491
492 /* Encode scalar AVX instructions with specific vector length. */
493 static enum
494 {
495 vex128 = 0,
496 vex256
497 } avxscalar;
498
499 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
500 static symbolS *GOT_symbol;
501
502 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
503 unsigned int x86_dwarf2_return_column;
504
505 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
506 int x86_cie_data_alignment;
507
508 /* Interface to relax_segment.
509 There are 3 major relax states for 386 jump insns because the
510 different types of jumps add different sizes to frags when we're
511 figuring out what sort of jump to choose to reach a given label. */
512
513 /* Types. */
514 #define UNCOND_JUMP 0
515 #define COND_JUMP 1
516 #define COND_JUMP86 2
517
518 /* Sizes. */
519 #define CODE16 1
520 #define SMALL 0
521 #define SMALL16 (SMALL | CODE16)
522 #define BIG 2
523 #define BIG16 (BIG | CODE16)
524
525 #ifndef INLINE
526 #ifdef __GNUC__
527 #define INLINE __inline__
528 #else
529 #define INLINE
530 #endif
531 #endif
532
533 #define ENCODE_RELAX_STATE(type, size) \
534 ((relax_substateT) (((type) << 2) | (size)))
535 #define TYPE_FROM_RELAX_STATE(s) \
536 ((s) >> 2)
537 #define DISP_SIZE_FROM_RELAX_STATE(s) \
538 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
539
540 /* This table is used by relax_frag to promote short jumps to long
541 ones where necessary. SMALL (short) jumps may be promoted to BIG
542 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
543 don't allow a short jump in a 32 bit code segment to be promoted to
544 a 16 bit offset jump because it's slower (requires data size
545 prefix), and doesn't work, unless the destination is in the bottom
546 64k of the code segment (The top 16 bits of eip are zeroed). */
547
548 const relax_typeS md_relax_table[] =
549 {
550 /* The fields are:
551 1) most positive reach of this state,
552 2) most negative reach of this state,
553 3) how many bytes this mode will have in the variable part of the frag
554 4) which index into the table to try if we can't fit into this one. */
555
556 /* UNCOND_JUMP states. */
557 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
558 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
559 /* dword jmp adds 4 bytes to frag:
560 0 extra opcode bytes, 4 displacement bytes. */
561 {0, 0, 4, 0},
562 /* word jmp adds 2 byte2 to frag:
563 0 extra opcode bytes, 2 displacement bytes. */
564 {0, 0, 2, 0},
565
566 /* COND_JUMP states. */
567 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
569 /* dword conditionals adds 5 bytes to frag:
570 1 extra opcode byte, 4 displacement bytes. */
571 {0, 0, 5, 0},
572 /* word conditionals add 3 bytes to frag:
573 1 extra opcode byte, 2 displacement bytes. */
574 {0, 0, 3, 0},
575
576 /* COND_JUMP86 states. */
577 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
579 /* dword conditionals adds 5 bytes to frag:
580 1 extra opcode byte, 4 displacement bytes. */
581 {0, 0, 5, 0},
582 /* word conditionals add 4 bytes to frag:
583 1 displacement byte and a 3 byte long branch insn. */
584 {0, 0, 4, 0}
585 };
586
587 static const arch_entry cpu_arch[] =
588 {
589 /* Do not replace the first two entries - i386_target_format()
590 relies on them being there in this order. */
591 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
592 CPU_GENERIC32_FLAGS, 0, 0 },
593 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
594 CPU_GENERIC64_FLAGS, 0, 0 },
595 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
596 CPU_NONE_FLAGS, 0, 0 },
597 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
598 CPU_I186_FLAGS, 0, 0 },
599 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
600 CPU_I286_FLAGS, 0, 0 },
601 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
602 CPU_I386_FLAGS, 0, 0 },
603 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
604 CPU_I486_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
606 CPU_I586_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
608 CPU_I686_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
610 CPU_I586_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
612 CPU_PENTIUMPRO_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
614 CPU_P2_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
616 CPU_P3_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
618 CPU_P4_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
620 CPU_CORE_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
622 CPU_NOCONA_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
624 CPU_CORE_FLAGS, 1, 0 },
625 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
626 CPU_CORE_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
628 CPU_CORE2_FLAGS, 1, 0 },
629 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
630 CPU_CORE2_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
632 CPU_COREI7_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
634 CPU_L1OM_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
636 CPU_K1OM_FLAGS, 0, 0 },
637 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
638 CPU_K6_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
640 CPU_K6_2_FLAGS, 0, 0 },
641 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
642 CPU_ATHLON_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
644 CPU_K8_FLAGS, 1, 0 },
645 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
646 CPU_K8_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
648 CPU_K8_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
650 CPU_AMDFAM10_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
652 CPU_BDVER1_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
654 CPU_BDVER2_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
656 CPU_8087_FLAGS, 0, 0 },
657 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
658 CPU_287_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
660 CPU_387_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
662 CPU_ANY87_FLAGS, 0, 1 },
663 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
664 CPU_MMX_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
666 CPU_3DNOWA_FLAGS, 0, 1 },
667 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
668 CPU_SSE_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
670 CPU_SSE2_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
672 CPU_SSE3_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
674 CPU_SSSE3_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
676 CPU_SSE4_1_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
678 CPU_SSE4_2_FLAGS, 0, 0 },
679 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
680 CPU_SSE4_2_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
682 CPU_ANY_SSE_FLAGS, 0, 1 },
683 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
684 CPU_AVX_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
686 CPU_AVX2_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
688 CPU_ANY_AVX_FLAGS, 0, 1 },
689 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
690 CPU_VMX_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
692 CPU_SMX_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
694 CPU_XSAVE_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
696 CPU_XSAVEOPT_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
698 CPU_AES_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
700 CPU_PCLMUL_FLAGS, 0, 0 },
701 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
702 CPU_PCLMUL_FLAGS, 1, 0 },
703 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
704 CPU_FSGSBASE_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
706 CPU_RDRND_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
708 CPU_F16C_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
710 CPU_BMI2_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
712 CPU_FMA_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
714 CPU_FMA4_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
716 CPU_XOP_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
718 CPU_LWP_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
720 CPU_MOVBE_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
722 CPU_EPT_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
724 CPU_LZCNT_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
726 CPU_INVPCID_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
728 CPU_CLFLUSH_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
730 CPU_NOP_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
732 CPU_SYSCALL_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
734 CPU_RDTSCP_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
736 CPU_3DNOW_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
738 CPU_3DNOWA_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
740 CPU_PADLOCK_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
742 CPU_SVME_FLAGS, 1, 0 },
743 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
744 CPU_SVME_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
746 CPU_SSE4A_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
748 CPU_ABM_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
750 CPU_BMI_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
752 CPU_TBM_FLAGS, 0, 0 },
753 };
754
755 #ifdef I386COFF
756 /* Like s_lcomm_internal in gas/read.c but the alignment string
757 is allowed to be optional. */
758
759 static symbolS *
760 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
761 {
762 addressT align = 0;
763
764 SKIP_WHITESPACE ();
765
766 if (needs_align
767 && *input_line_pointer == ',')
768 {
769 align = parse_align (needs_align - 1);
770
771 if (align == (addressT) -1)
772 return NULL;
773 }
774 else
775 {
776 if (size >= 8)
777 align = 3;
778 else if (size >= 4)
779 align = 2;
780 else if (size >= 2)
781 align = 1;
782 else
783 align = 0;
784 }
785
786 bss_alloc (symbolP, size, align);
787 return symbolP;
788 }
789
790 static void
791 pe_lcomm (int needs_align)
792 {
793 s_comm_internal (needs_align * 2, pe_lcomm_internal);
794 }
795 #endif
796
797 const pseudo_typeS md_pseudo_table[] =
798 {
799 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
800 {"align", s_align_bytes, 0},
801 #else
802 {"align", s_align_ptwo, 0},
803 #endif
804 {"arch", set_cpu_arch, 0},
805 #ifndef I386COFF
806 {"bss", s_bss, 0},
807 #else
808 {"lcomm", pe_lcomm, 1},
809 #endif
810 {"ffloat", float_cons, 'f'},
811 {"dfloat", float_cons, 'd'},
812 {"tfloat", float_cons, 'x'},
813 {"value", cons, 2},
814 {"slong", signed_cons, 4},
815 {"noopt", s_ignore, 0},
816 {"optim", s_ignore, 0},
817 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
818 {"code16", set_code_flag, CODE_16BIT},
819 {"code32", set_code_flag, CODE_32BIT},
820 {"code64", set_code_flag, CODE_64BIT},
821 {"intel_syntax", set_intel_syntax, 1},
822 {"att_syntax", set_intel_syntax, 0},
823 {"intel_mnemonic", set_intel_mnemonic, 1},
824 {"att_mnemonic", set_intel_mnemonic, 0},
825 {"allow_index_reg", set_allow_index_reg, 1},
826 {"disallow_index_reg", set_allow_index_reg, 0},
827 {"sse_check", set_sse_check, 0},
828 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
829 {"largecomm", handle_large_common, 0},
830 #else
831 {"file", (void (*) (int)) dwarf2_directive_file, 0},
832 {"loc", dwarf2_directive_loc, 0},
833 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
834 #endif
835 #ifdef TE_PE
836 {"secrel32", pe_directive_secrel, 0},
837 #endif
838 {0, 0, 0}
839 };
840
841 /* For interface with expression (). */
842 extern char *input_line_pointer;
843
844 /* Hash table for instruction mnemonic lookup. */
845 static struct hash_control *op_hash;
846
847 /* Hash table for register lookup. */
848 static struct hash_control *reg_hash;
849 \f
850 void
851 i386_align_code (fragS *fragP, int count)
852 {
853 /* Various efficient no-op patterns for aligning code labels.
854 Note: Don't try to assemble the instructions in the comments.
855 0L and 0w are not legal. */
856 static const char f32_1[] =
857 {0x90}; /* nop */
858 static const char f32_2[] =
859 {0x66,0x90}; /* xchg %ax,%ax */
860 static const char f32_3[] =
861 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
862 static const char f32_4[] =
863 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
864 static const char f32_5[] =
865 {0x90, /* nop */
866 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
867 static const char f32_6[] =
868 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
869 static const char f32_7[] =
870 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
871 static const char f32_8[] =
872 {0x90, /* nop */
873 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
874 static const char f32_9[] =
875 {0x89,0xf6, /* movl %esi,%esi */
876 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
877 static const char f32_10[] =
878 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
879 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
880 static const char f32_11[] =
881 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
882 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
883 static const char f32_12[] =
884 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
885 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
886 static const char f32_13[] =
887 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
888 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
889 static const char f32_14[] =
890 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
891 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
892 static const char f16_3[] =
893 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
894 static const char f16_4[] =
895 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
896 static const char f16_5[] =
897 {0x90, /* nop */
898 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
899 static const char f16_6[] =
900 {0x89,0xf6, /* mov %si,%si */
901 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
902 static const char f16_7[] =
903 {0x8d,0x74,0x00, /* lea 0(%si),%si */
904 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
905 static const char f16_8[] =
906 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
907 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
908 static const char jump_31[] =
909 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
910 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
911 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
912 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
913 static const char *const f32_patt[] = {
914 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
915 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
916 };
917 static const char *const f16_patt[] = {
918 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
919 };
920 /* nopl (%[re]ax) */
921 static const char alt_3[] =
922 {0x0f,0x1f,0x00};
923 /* nopl 0(%[re]ax) */
924 static const char alt_4[] =
925 {0x0f,0x1f,0x40,0x00};
926 /* nopl 0(%[re]ax,%[re]ax,1) */
927 static const char alt_5[] =
928 {0x0f,0x1f,0x44,0x00,0x00};
929 /* nopw 0(%[re]ax,%[re]ax,1) */
930 static const char alt_6[] =
931 {0x66,0x0f,0x1f,0x44,0x00,0x00};
932 /* nopl 0L(%[re]ax) */
933 static const char alt_7[] =
934 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
935 /* nopl 0L(%[re]ax,%[re]ax,1) */
936 static const char alt_8[] =
937 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
938 /* nopw 0L(%[re]ax,%[re]ax,1) */
939 static const char alt_9[] =
940 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
942 static const char alt_10[] =
943 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
944 /* data16
945 nopw %cs:0L(%[re]ax,%[re]ax,1) */
946 static const char alt_long_11[] =
947 {0x66,
948 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
949 /* data16
950 data16
951 nopw %cs:0L(%[re]ax,%[re]ax,1) */
952 static const char alt_long_12[] =
953 {0x66,
954 0x66,
955 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
956 /* data16
957 data16
958 data16
959 nopw %cs:0L(%[re]ax,%[re]ax,1) */
960 static const char alt_long_13[] =
961 {0x66,
962 0x66,
963 0x66,
964 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
965 /* data16
966 data16
967 data16
968 data16
969 nopw %cs:0L(%[re]ax,%[re]ax,1) */
970 static const char alt_long_14[] =
971 {0x66,
972 0x66,
973 0x66,
974 0x66,
975 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 /* data16
977 data16
978 data16
979 data16
980 data16
981 nopw %cs:0L(%[re]ax,%[re]ax,1) */
982 static const char alt_long_15[] =
983 {0x66,
984 0x66,
985 0x66,
986 0x66,
987 0x66,
988 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
989 /* nopl 0(%[re]ax,%[re]ax,1)
990 nopw 0(%[re]ax,%[re]ax,1) */
991 static const char alt_short_11[] =
992 {0x0f,0x1f,0x44,0x00,0x00,
993 0x66,0x0f,0x1f,0x44,0x00,0x00};
994 /* nopw 0(%[re]ax,%[re]ax,1)
995 nopw 0(%[re]ax,%[re]ax,1) */
996 static const char alt_short_12[] =
997 {0x66,0x0f,0x1f,0x44,0x00,0x00,
998 0x66,0x0f,0x1f,0x44,0x00,0x00};
999 /* nopw 0(%[re]ax,%[re]ax,1)
1000 nopl 0L(%[re]ax) */
1001 static const char alt_short_13[] =
1002 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1003 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1004 /* nopl 0L(%[re]ax)
1005 nopl 0L(%[re]ax) */
1006 static const char alt_short_14[] =
1007 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1008 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1009 /* nopl 0L(%[re]ax)
1010 nopl 0L(%[re]ax,%[re]ax,1) */
1011 static const char alt_short_15[] =
1012 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1013 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 static const char *const alt_short_patt[] = {
1015 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1016 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1017 alt_short_14, alt_short_15
1018 };
1019 static const char *const alt_long_patt[] = {
1020 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1021 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1022 alt_long_14, alt_long_15
1023 };
1024
1025 /* Only align for at least a positive non-zero boundary. */
1026 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1027 return;
1028
1029 /* We need to decide which NOP sequence to use for 32bit and
1030 64bit. When -mtune= is used:
1031
1032 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1033 PROCESSOR_GENERIC32, f32_patt will be used.
1034 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1035 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1036 PROCESSOR_GENERIC64, alt_long_patt will be used.
1037 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1038 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1039 will be used.
1040
1041 When -mtune= isn't used, alt_long_patt will be used if
1042 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1043 be used.
1044
1045 When -march= or .arch is used, we can't use anything beyond
1046 cpu_arch_isa_flags. */
1047
1048 if (flag_code == CODE_16BIT)
1049 {
1050 if (count > 8)
1051 {
1052 memcpy (fragP->fr_literal + fragP->fr_fix,
1053 jump_31, count);
1054 /* Adjust jump offset. */
1055 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1056 }
1057 else
1058 memcpy (fragP->fr_literal + fragP->fr_fix,
1059 f16_patt[count - 1], count);
1060 }
1061 else
1062 {
1063 const char *const *patt = NULL;
1064
1065 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1066 {
1067 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1068 switch (cpu_arch_tune)
1069 {
1070 case PROCESSOR_UNKNOWN:
1071 /* We use cpu_arch_isa_flags to check if we SHOULD
1072 optimize with nops. */
1073 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1074 patt = alt_long_patt;
1075 else
1076 patt = f32_patt;
1077 break;
1078 case PROCESSOR_PENTIUM4:
1079 case PROCESSOR_NOCONA:
1080 case PROCESSOR_CORE:
1081 case PROCESSOR_CORE2:
1082 case PROCESSOR_COREI7:
1083 case PROCESSOR_L1OM:
1084 case PROCESSOR_K1OM:
1085 case PROCESSOR_GENERIC64:
1086 patt = alt_long_patt;
1087 break;
1088 case PROCESSOR_K6:
1089 case PROCESSOR_ATHLON:
1090 case PROCESSOR_K8:
1091 case PROCESSOR_AMDFAM10:
1092 case PROCESSOR_BD:
1093 patt = alt_short_patt;
1094 break;
1095 case PROCESSOR_I386:
1096 case PROCESSOR_I486:
1097 case PROCESSOR_PENTIUM:
1098 case PROCESSOR_PENTIUMPRO:
1099 case PROCESSOR_GENERIC32:
1100 patt = f32_patt;
1101 break;
1102 }
1103 }
1104 else
1105 {
1106 switch (fragP->tc_frag_data.tune)
1107 {
1108 case PROCESSOR_UNKNOWN:
1109 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1110 PROCESSOR_UNKNOWN. */
1111 abort ();
1112 break;
1113
1114 case PROCESSOR_I386:
1115 case PROCESSOR_I486:
1116 case PROCESSOR_PENTIUM:
1117 case PROCESSOR_K6:
1118 case PROCESSOR_ATHLON:
1119 case PROCESSOR_K8:
1120 case PROCESSOR_AMDFAM10:
1121 case PROCESSOR_BD:
1122 case PROCESSOR_GENERIC32:
1123 /* We use cpu_arch_isa_flags to check if we CAN optimize
1124 with nops. */
1125 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1126 patt = alt_short_patt;
1127 else
1128 patt = f32_patt;
1129 break;
1130 case PROCESSOR_PENTIUMPRO:
1131 case PROCESSOR_PENTIUM4:
1132 case PROCESSOR_NOCONA:
1133 case PROCESSOR_CORE:
1134 case PROCESSOR_CORE2:
1135 case PROCESSOR_COREI7:
1136 case PROCESSOR_L1OM:
1137 case PROCESSOR_K1OM:
1138 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1139 patt = alt_long_patt;
1140 else
1141 patt = f32_patt;
1142 break;
1143 case PROCESSOR_GENERIC64:
1144 patt = alt_long_patt;
1145 break;
1146 }
1147 }
1148
1149 if (patt == f32_patt)
1150 {
1151 /* If the padding is less than 15 bytes, we use the normal
1152 ones. Otherwise, we use a jump instruction and adjust
1153 its offset. */
1154 int limit;
1155
1156 /* For 64bit, the limit is 3 bytes. */
1157 if (flag_code == CODE_64BIT
1158 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1159 limit = 3;
1160 else
1161 limit = 15;
1162 if (count < limit)
1163 memcpy (fragP->fr_literal + fragP->fr_fix,
1164 patt[count - 1], count);
1165 else
1166 {
1167 memcpy (fragP->fr_literal + fragP->fr_fix,
1168 jump_31, count);
1169 /* Adjust jump offset. */
1170 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1171 }
1172 }
1173 else
1174 {
1175 /* Maximum length of an instruction is 15 byte. If the
1176 padding is greater than 15 bytes and we don't use jump,
1177 we have to break it into smaller pieces. */
1178 int padding = count;
1179 while (padding > 15)
1180 {
1181 padding -= 15;
1182 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1183 patt [14], 15);
1184 }
1185
1186 if (padding)
1187 memcpy (fragP->fr_literal + fragP->fr_fix,
1188 patt [padding - 1], padding);
1189 }
1190 }
1191 fragP->fr_var = count;
1192 }
1193
1194 static INLINE int
1195 operand_type_all_zero (const union i386_operand_type *x)
1196 {
1197 switch (ARRAY_SIZE(x->array))
1198 {
1199 case 3:
1200 if (x->array[2])
1201 return 0;
1202 case 2:
1203 if (x->array[1])
1204 return 0;
1205 case 1:
1206 return !x->array[0];
1207 default:
1208 abort ();
1209 }
1210 }
1211
1212 static INLINE void
1213 operand_type_set (union i386_operand_type *x, unsigned int v)
1214 {
1215 switch (ARRAY_SIZE(x->array))
1216 {
1217 case 3:
1218 x->array[2] = v;
1219 case 2:
1220 x->array[1] = v;
1221 case 1:
1222 x->array[0] = v;
1223 break;
1224 default:
1225 abort ();
1226 }
1227 }
1228
1229 static INLINE int
1230 operand_type_equal (const union i386_operand_type *x,
1231 const union i386_operand_type *y)
1232 {
1233 switch (ARRAY_SIZE(x->array))
1234 {
1235 case 3:
1236 if (x->array[2] != y->array[2])
1237 return 0;
1238 case 2:
1239 if (x->array[1] != y->array[1])
1240 return 0;
1241 case 1:
1242 return x->array[0] == y->array[0];
1243 break;
1244 default:
1245 abort ();
1246 }
1247 }
1248
1249 static INLINE int
1250 cpu_flags_all_zero (const union i386_cpu_flags *x)
1251 {
1252 switch (ARRAY_SIZE(x->array))
1253 {
1254 case 3:
1255 if (x->array[2])
1256 return 0;
1257 case 2:
1258 if (x->array[1])
1259 return 0;
1260 case 1:
1261 return !x->array[0];
1262 default:
1263 abort ();
1264 }
1265 }
1266
1267 static INLINE void
1268 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1269 {
1270 switch (ARRAY_SIZE(x->array))
1271 {
1272 case 3:
1273 x->array[2] = v;
1274 case 2:
1275 x->array[1] = v;
1276 case 1:
1277 x->array[0] = v;
1278 break;
1279 default:
1280 abort ();
1281 }
1282 }
1283
1284 static INLINE int
1285 cpu_flags_equal (const union i386_cpu_flags *x,
1286 const union i386_cpu_flags *y)
1287 {
1288 switch (ARRAY_SIZE(x->array))
1289 {
1290 case 3:
1291 if (x->array[2] != y->array[2])
1292 return 0;
1293 case 2:
1294 if (x->array[1] != y->array[1])
1295 return 0;
1296 case 1:
1297 return x->array[0] == y->array[0];
1298 break;
1299 default:
1300 abort ();
1301 }
1302 }
1303
1304 static INLINE int
1305 cpu_flags_check_cpu64 (i386_cpu_flags f)
1306 {
1307 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1308 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1309 }
1310
1311 static INLINE i386_cpu_flags
1312 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1313 {
1314 switch (ARRAY_SIZE (x.array))
1315 {
1316 case 3:
1317 x.array [2] &= y.array [2];
1318 case 2:
1319 x.array [1] &= y.array [1];
1320 case 1:
1321 x.array [0] &= y.array [0];
1322 break;
1323 default:
1324 abort ();
1325 }
1326 return x;
1327 }
1328
1329 static INLINE i386_cpu_flags
1330 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1331 {
1332 switch (ARRAY_SIZE (x.array))
1333 {
1334 case 3:
1335 x.array [2] |= y.array [2];
1336 case 2:
1337 x.array [1] |= y.array [1];
1338 case 1:
1339 x.array [0] |= y.array [0];
1340 break;
1341 default:
1342 abort ();
1343 }
1344 return x;
1345 }
1346
1347 static INLINE i386_cpu_flags
1348 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1349 {
1350 switch (ARRAY_SIZE (x.array))
1351 {
1352 case 3:
1353 x.array [2] &= ~y.array [2];
1354 case 2:
1355 x.array [1] &= ~y.array [1];
1356 case 1:
1357 x.array [0] &= ~y.array [0];
1358 break;
1359 default:
1360 abort ();
1361 }
1362 return x;
1363 }
1364
1365 #define CPU_FLAGS_ARCH_MATCH 0x1
1366 #define CPU_FLAGS_64BIT_MATCH 0x2
1367 #define CPU_FLAGS_AES_MATCH 0x4
1368 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1369 #define CPU_FLAGS_AVX_MATCH 0x10
1370
1371 #define CPU_FLAGS_32BIT_MATCH \
1372 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1373 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1374 #define CPU_FLAGS_PERFECT_MATCH \
1375 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1376
1377 /* Return CPU flags match bits. */
1378
1379 static int
1380 cpu_flags_match (const insn_template *t)
1381 {
1382 i386_cpu_flags x = t->cpu_flags;
1383 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1384
1385 x.bitfield.cpu64 = 0;
1386 x.bitfield.cpuno64 = 0;
1387
1388 if (cpu_flags_all_zero (&x))
1389 {
1390 /* This instruction is available on all archs. */
1391 match |= CPU_FLAGS_32BIT_MATCH;
1392 }
1393 else
1394 {
1395 /* This instruction is available only on some archs. */
1396 i386_cpu_flags cpu = cpu_arch_flags;
1397
1398 cpu.bitfield.cpu64 = 0;
1399 cpu.bitfield.cpuno64 = 0;
1400 cpu = cpu_flags_and (x, cpu);
1401 if (!cpu_flags_all_zero (&cpu))
1402 {
1403 if (x.bitfield.cpuavx)
1404 {
1405 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1406 if (cpu.bitfield.cpuavx)
1407 {
1408 /* Check SSE2AVX. */
1409 if (!t->opcode_modifier.sse2avx|| sse2avx)
1410 {
1411 match |= (CPU_FLAGS_ARCH_MATCH
1412 | CPU_FLAGS_AVX_MATCH);
1413 /* Check AES. */
1414 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1415 match |= CPU_FLAGS_AES_MATCH;
1416 /* Check PCLMUL. */
1417 if (!x.bitfield.cpupclmul
1418 || cpu.bitfield.cpupclmul)
1419 match |= CPU_FLAGS_PCLMUL_MATCH;
1420 }
1421 }
1422 else
1423 match |= CPU_FLAGS_ARCH_MATCH;
1424 }
1425 else
1426 match |= CPU_FLAGS_32BIT_MATCH;
1427 }
1428 }
1429 return match;
1430 }
1431
1432 static INLINE i386_operand_type
1433 operand_type_and (i386_operand_type x, i386_operand_type y)
1434 {
1435 switch (ARRAY_SIZE (x.array))
1436 {
1437 case 3:
1438 x.array [2] &= y.array [2];
1439 case 2:
1440 x.array [1] &= y.array [1];
1441 case 1:
1442 x.array [0] &= y.array [0];
1443 break;
1444 default:
1445 abort ();
1446 }
1447 return x;
1448 }
1449
1450 static INLINE i386_operand_type
1451 operand_type_or (i386_operand_type x, i386_operand_type y)
1452 {
1453 switch (ARRAY_SIZE (x.array))
1454 {
1455 case 3:
1456 x.array [2] |= y.array [2];
1457 case 2:
1458 x.array [1] |= y.array [1];
1459 case 1:
1460 x.array [0] |= y.array [0];
1461 break;
1462 default:
1463 abort ();
1464 }
1465 return x;
1466 }
1467
1468 static INLINE i386_operand_type
1469 operand_type_xor (i386_operand_type x, i386_operand_type y)
1470 {
1471 switch (ARRAY_SIZE (x.array))
1472 {
1473 case 3:
1474 x.array [2] ^= y.array [2];
1475 case 2:
1476 x.array [1] ^= y.array [1];
1477 case 1:
1478 x.array [0] ^= y.array [0];
1479 break;
1480 default:
1481 abort ();
1482 }
1483 return x;
1484 }
1485
1486 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1487 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1488 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1489 static const i386_operand_type inoutportreg
1490 = OPERAND_TYPE_INOUTPORTREG;
1491 static const i386_operand_type reg16_inoutportreg
1492 = OPERAND_TYPE_REG16_INOUTPORTREG;
1493 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1494 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1495 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1496 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1497 static const i386_operand_type anydisp
1498 = OPERAND_TYPE_ANYDISP;
1499 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1500 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1501 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1502 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1503 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1504 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1505 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1506 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1507 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1508 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1509 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1510 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1511
1512 enum operand_type
1513 {
1514 reg,
1515 imm,
1516 disp,
1517 anymem
1518 };
1519
1520 static INLINE int
1521 operand_type_check (i386_operand_type t, enum operand_type c)
1522 {
1523 switch (c)
1524 {
1525 case reg:
1526 return (t.bitfield.reg8
1527 || t.bitfield.reg16
1528 || t.bitfield.reg32
1529 || t.bitfield.reg64);
1530
1531 case imm:
1532 return (t.bitfield.imm8
1533 || t.bitfield.imm8s
1534 || t.bitfield.imm16
1535 || t.bitfield.imm32
1536 || t.bitfield.imm32s
1537 || t.bitfield.imm64);
1538
1539 case disp:
1540 return (t.bitfield.disp8
1541 || t.bitfield.disp16
1542 || t.bitfield.disp32
1543 || t.bitfield.disp32s
1544 || t.bitfield.disp64);
1545
1546 case anymem:
1547 return (t.bitfield.disp8
1548 || t.bitfield.disp16
1549 || t.bitfield.disp32
1550 || t.bitfield.disp32s
1551 || t.bitfield.disp64
1552 || t.bitfield.baseindex);
1553
1554 default:
1555 abort ();
1556 }
1557
1558 return 0;
1559 }
1560
1561 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1562 operand J for instruction template T. */
1563
1564 static INLINE int
1565 match_reg_size (const insn_template *t, unsigned int j)
1566 {
1567 return !((i.types[j].bitfield.byte
1568 && !t->operand_types[j].bitfield.byte)
1569 || (i.types[j].bitfield.word
1570 && !t->operand_types[j].bitfield.word)
1571 || (i.types[j].bitfield.dword
1572 && !t->operand_types[j].bitfield.dword)
1573 || (i.types[j].bitfield.qword
1574 && !t->operand_types[j].bitfield.qword));
1575 }
1576
1577 /* Return 1 if there is no conflict in any size on operand J for
1578 instruction template T. */
1579
1580 static INLINE int
1581 match_mem_size (const insn_template *t, unsigned int j)
1582 {
1583 return (match_reg_size (t, j)
1584 && !((i.types[j].bitfield.unspecified
1585 && !t->operand_types[j].bitfield.unspecified)
1586 || (i.types[j].bitfield.fword
1587 && !t->operand_types[j].bitfield.fword)
1588 || (i.types[j].bitfield.tbyte
1589 && !t->operand_types[j].bitfield.tbyte)
1590 || (i.types[j].bitfield.xmmword
1591 && !t->operand_types[j].bitfield.xmmword)
1592 || (i.types[j].bitfield.ymmword
1593 && !t->operand_types[j].bitfield.ymmword)));
1594 }
1595
1596 /* Return 1 if there is no size conflict on any operands for
1597 instruction template T. */
1598
1599 static INLINE int
1600 operand_size_match (const insn_template *t)
1601 {
1602 unsigned int j;
1603 int match = 1;
1604
1605 /* Don't check jump instructions. */
1606 if (t->opcode_modifier.jump
1607 || t->opcode_modifier.jumpbyte
1608 || t->opcode_modifier.jumpdword
1609 || t->opcode_modifier.jumpintersegment)
1610 return match;
1611
1612 /* Check memory and accumulator operand size. */
1613 for (j = 0; j < i.operands; j++)
1614 {
1615 if (t->operand_types[j].bitfield.anysize)
1616 continue;
1617
1618 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1619 {
1620 match = 0;
1621 break;
1622 }
1623
1624 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1625 {
1626 match = 0;
1627 break;
1628 }
1629 }
1630
1631 if (match)
1632 return match;
1633 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1634 {
1635 mismatch:
1636 i.error = operand_size_mismatch;
1637 return 0;
1638 }
1639
1640 /* Check reverse. */
1641 gas_assert (i.operands == 2);
1642
1643 match = 1;
1644 for (j = 0; j < 2; j++)
1645 {
1646 if (t->operand_types[j].bitfield.acc
1647 && !match_reg_size (t, j ? 0 : 1))
1648 goto mismatch;
1649
1650 if (i.types[j].bitfield.mem
1651 && !match_mem_size (t, j ? 0 : 1))
1652 goto mismatch;
1653 }
1654
1655 return match;
1656 }
1657
1658 static INLINE int
1659 operand_type_match (i386_operand_type overlap,
1660 i386_operand_type given)
1661 {
1662 i386_operand_type temp = overlap;
1663
1664 temp.bitfield.jumpabsolute = 0;
1665 temp.bitfield.unspecified = 0;
1666 temp.bitfield.byte = 0;
1667 temp.bitfield.word = 0;
1668 temp.bitfield.dword = 0;
1669 temp.bitfield.fword = 0;
1670 temp.bitfield.qword = 0;
1671 temp.bitfield.tbyte = 0;
1672 temp.bitfield.xmmword = 0;
1673 temp.bitfield.ymmword = 0;
1674 if (operand_type_all_zero (&temp))
1675 goto mismatch;
1676
1677 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1678 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1679 return 1;
1680
1681 mismatch:
1682 i.error = operand_type_mismatch;
1683 return 0;
1684 }
1685
1686 /* If given types g0 and g1 are registers they must be of the same type
1687 unless the expected operand type register overlap is null.
1688 Note that Acc in a template matches every size of reg. */
1689
1690 static INLINE int
1691 operand_type_register_match (i386_operand_type m0,
1692 i386_operand_type g0,
1693 i386_operand_type t0,
1694 i386_operand_type m1,
1695 i386_operand_type g1,
1696 i386_operand_type t1)
1697 {
1698 if (!operand_type_check (g0, reg))
1699 return 1;
1700
1701 if (!operand_type_check (g1, reg))
1702 return 1;
1703
1704 if (g0.bitfield.reg8 == g1.bitfield.reg8
1705 && g0.bitfield.reg16 == g1.bitfield.reg16
1706 && g0.bitfield.reg32 == g1.bitfield.reg32
1707 && g0.bitfield.reg64 == g1.bitfield.reg64)
1708 return 1;
1709
1710 if (m0.bitfield.acc)
1711 {
1712 t0.bitfield.reg8 = 1;
1713 t0.bitfield.reg16 = 1;
1714 t0.bitfield.reg32 = 1;
1715 t0.bitfield.reg64 = 1;
1716 }
1717
1718 if (m1.bitfield.acc)
1719 {
1720 t1.bitfield.reg8 = 1;
1721 t1.bitfield.reg16 = 1;
1722 t1.bitfield.reg32 = 1;
1723 t1.bitfield.reg64 = 1;
1724 }
1725
1726 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1727 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1728 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1729 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1730 return 1;
1731
1732 i.error = register_type_mismatch;
1733
1734 return 0;
1735 }
1736
1737 static INLINE unsigned int
1738 mode_from_disp_size (i386_operand_type t)
1739 {
1740 if (t.bitfield.disp8)
1741 return 1;
1742 else if (t.bitfield.disp16
1743 || t.bitfield.disp32
1744 || t.bitfield.disp32s)
1745 return 2;
1746 else
1747 return 0;
1748 }
1749
1750 static INLINE int
1751 fits_in_signed_byte (offsetT num)
1752 {
1753 return (num >= -128) && (num <= 127);
1754 }
1755
1756 static INLINE int
1757 fits_in_unsigned_byte (offsetT num)
1758 {
1759 return (num & 0xff) == num;
1760 }
1761
1762 static INLINE int
1763 fits_in_unsigned_word (offsetT num)
1764 {
1765 return (num & 0xffff) == num;
1766 }
1767
1768 static INLINE int
1769 fits_in_signed_word (offsetT num)
1770 {
1771 return (-32768 <= num) && (num <= 32767);
1772 }
1773
1774 static INLINE int
1775 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1776 {
1777 #ifndef BFD64
1778 return 1;
1779 #else
1780 return (!(((offsetT) -1 << 31) & num)
1781 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1782 #endif
1783 } /* fits_in_signed_long() */
1784
1785 static INLINE int
1786 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1787 {
1788 #ifndef BFD64
1789 return 1;
1790 #else
1791 return (num & (((offsetT) 2 << 31) - 1)) == num;
1792 #endif
1793 } /* fits_in_unsigned_long() */
1794
1795 static INLINE int
1796 fits_in_imm4 (offsetT num)
1797 {
1798 return (num & 0xf) == num;
1799 }
1800
1801 static i386_operand_type
1802 smallest_imm_type (offsetT num)
1803 {
1804 i386_operand_type t;
1805
1806 operand_type_set (&t, 0);
1807 t.bitfield.imm64 = 1;
1808
1809 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1810 {
1811 /* This code is disabled on the 486 because all the Imm1 forms
1812 in the opcode table are slower on the i486. They're the
1813 versions with the implicitly specified single-position
1814 displacement, which has another syntax if you really want to
1815 use that form. */
1816 t.bitfield.imm1 = 1;
1817 t.bitfield.imm8 = 1;
1818 t.bitfield.imm8s = 1;
1819 t.bitfield.imm16 = 1;
1820 t.bitfield.imm32 = 1;
1821 t.bitfield.imm32s = 1;
1822 }
1823 else if (fits_in_signed_byte (num))
1824 {
1825 t.bitfield.imm8 = 1;
1826 t.bitfield.imm8s = 1;
1827 t.bitfield.imm16 = 1;
1828 t.bitfield.imm32 = 1;
1829 t.bitfield.imm32s = 1;
1830 }
1831 else if (fits_in_unsigned_byte (num))
1832 {
1833 t.bitfield.imm8 = 1;
1834 t.bitfield.imm16 = 1;
1835 t.bitfield.imm32 = 1;
1836 t.bitfield.imm32s = 1;
1837 }
1838 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1839 {
1840 t.bitfield.imm16 = 1;
1841 t.bitfield.imm32 = 1;
1842 t.bitfield.imm32s = 1;
1843 }
1844 else if (fits_in_signed_long (num))
1845 {
1846 t.bitfield.imm32 = 1;
1847 t.bitfield.imm32s = 1;
1848 }
1849 else if (fits_in_unsigned_long (num))
1850 t.bitfield.imm32 = 1;
1851
1852 return t;
1853 }
1854
1855 static offsetT
1856 offset_in_range (offsetT val, int size)
1857 {
1858 addressT mask;
1859
1860 switch (size)
1861 {
1862 case 1: mask = ((addressT) 1 << 8) - 1; break;
1863 case 2: mask = ((addressT) 1 << 16) - 1; break;
1864 case 4: mask = ((addressT) 2 << 31) - 1; break;
1865 #ifdef BFD64
1866 case 8: mask = ((addressT) 2 << 63) - 1; break;
1867 #endif
1868 default: abort ();
1869 }
1870
1871 #ifdef BFD64
1872 /* If BFD64, sign extend val for 32bit address mode. */
1873 if (flag_code != CODE_64BIT
1874 || i.prefix[ADDR_PREFIX])
1875 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1876 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1877 #endif
1878
1879 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1880 {
1881 char buf1[40], buf2[40];
1882
1883 sprint_value (buf1, val);
1884 sprint_value (buf2, val & mask);
1885 as_warn (_("%s shortened to %s"), buf1, buf2);
1886 }
1887 return val & mask;
1888 }
1889
1890 enum PREFIX_GROUP
1891 {
1892 PREFIX_EXIST = 0,
1893 PREFIX_LOCK,
1894 PREFIX_REP,
1895 PREFIX_OTHER
1896 };
1897
1898 /* Returns
1899 a. PREFIX_EXIST if attempting to add a prefix where one from the
1900 same class already exists.
1901 b. PREFIX_LOCK if lock prefix is added.
1902 c. PREFIX_REP if rep/repne prefix is added.
1903 d. PREFIX_OTHER if other prefix is added.
1904 */
1905
1906 static enum PREFIX_GROUP
1907 add_prefix (unsigned int prefix)
1908 {
1909 enum PREFIX_GROUP ret = PREFIX_OTHER;
1910 unsigned int q;
1911
1912 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1913 && flag_code == CODE_64BIT)
1914 {
1915 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1916 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1917 && (prefix & (REX_R | REX_X | REX_B))))
1918 ret = PREFIX_EXIST;
1919 q = REX_PREFIX;
1920 }
1921 else
1922 {
1923 switch (prefix)
1924 {
1925 default:
1926 abort ();
1927
1928 case CS_PREFIX_OPCODE:
1929 case DS_PREFIX_OPCODE:
1930 case ES_PREFIX_OPCODE:
1931 case FS_PREFIX_OPCODE:
1932 case GS_PREFIX_OPCODE:
1933 case SS_PREFIX_OPCODE:
1934 q = SEG_PREFIX;
1935 break;
1936
1937 case REPNE_PREFIX_OPCODE:
1938 case REPE_PREFIX_OPCODE:
1939 q = REP_PREFIX;
1940 ret = PREFIX_REP;
1941 break;
1942
1943 case LOCK_PREFIX_OPCODE:
1944 q = LOCK_PREFIX;
1945 ret = PREFIX_LOCK;
1946 break;
1947
1948 case FWAIT_OPCODE:
1949 q = WAIT_PREFIX;
1950 break;
1951
1952 case ADDR_PREFIX_OPCODE:
1953 q = ADDR_PREFIX;
1954 break;
1955
1956 case DATA_PREFIX_OPCODE:
1957 q = DATA_PREFIX;
1958 break;
1959 }
1960 if (i.prefix[q] != 0)
1961 ret = PREFIX_EXIST;
1962 }
1963
1964 if (ret)
1965 {
1966 if (!i.prefix[q])
1967 ++i.prefixes;
1968 i.prefix[q] |= prefix;
1969 }
1970 else
1971 as_bad (_("same type of prefix used twice"));
1972
1973 return ret;
1974 }
1975
1976 static void
1977 update_code_flag (int value, int check)
1978 {
1979 PRINTF_LIKE ((*as_error));
1980
1981 flag_code = (enum flag_code) value;
1982 if (flag_code == CODE_64BIT)
1983 {
1984 cpu_arch_flags.bitfield.cpu64 = 1;
1985 cpu_arch_flags.bitfield.cpuno64 = 0;
1986 }
1987 else
1988 {
1989 cpu_arch_flags.bitfield.cpu64 = 0;
1990 cpu_arch_flags.bitfield.cpuno64 = 1;
1991 }
1992 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1993 {
1994 if (check)
1995 as_error = as_fatal;
1996 else
1997 as_error = as_bad;
1998 (*as_error) (_("64bit mode not supported on `%s'."),
1999 cpu_arch_name ? cpu_arch_name : default_arch);
2000 }
2001 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2002 {
2003 if (check)
2004 as_error = as_fatal;
2005 else
2006 as_error = as_bad;
2007 (*as_error) (_("32bit mode not supported on `%s'."),
2008 cpu_arch_name ? cpu_arch_name : default_arch);
2009 }
2010 stackop_size = '\0';
2011 }
2012
2013 static void
2014 set_code_flag (int value)
2015 {
2016 update_code_flag (value, 0);
2017 }
2018
2019 static void
2020 set_16bit_gcc_code_flag (int new_code_flag)
2021 {
2022 flag_code = (enum flag_code) new_code_flag;
2023 if (flag_code != CODE_16BIT)
2024 abort ();
2025 cpu_arch_flags.bitfield.cpu64 = 0;
2026 cpu_arch_flags.bitfield.cpuno64 = 1;
2027 stackop_size = LONG_MNEM_SUFFIX;
2028 }
2029
2030 static void
2031 set_intel_syntax (int syntax_flag)
2032 {
2033 /* Find out if register prefixing is specified. */
2034 int ask_naked_reg = 0;
2035
2036 SKIP_WHITESPACE ();
2037 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2038 {
2039 char *string = input_line_pointer;
2040 int e = get_symbol_end ();
2041
2042 if (strcmp (string, "prefix") == 0)
2043 ask_naked_reg = 1;
2044 else if (strcmp (string, "noprefix") == 0)
2045 ask_naked_reg = -1;
2046 else
2047 as_bad (_("bad argument to syntax directive."));
2048 *input_line_pointer = e;
2049 }
2050 demand_empty_rest_of_line ();
2051
2052 intel_syntax = syntax_flag;
2053
2054 if (ask_naked_reg == 0)
2055 allow_naked_reg = (intel_syntax
2056 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2057 else
2058 allow_naked_reg = (ask_naked_reg < 0);
2059
2060 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2061
2062 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2063 identifier_chars['$'] = intel_syntax ? '$' : 0;
2064 register_prefix = allow_naked_reg ? "" : "%";
2065 }
2066
2067 static void
2068 set_intel_mnemonic (int mnemonic_flag)
2069 {
2070 intel_mnemonic = mnemonic_flag;
2071 }
2072
2073 static void
2074 set_allow_index_reg (int flag)
2075 {
2076 allow_index_reg = flag;
2077 }
2078
2079 static void
2080 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2081 {
2082 SKIP_WHITESPACE ();
2083
2084 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2085 {
2086 char *string = input_line_pointer;
2087 int e = get_symbol_end ();
2088
2089 if (strcmp (string, "none") == 0)
2090 sse_check = sse_check_none;
2091 else if (strcmp (string, "warning") == 0)
2092 sse_check = sse_check_warning;
2093 else if (strcmp (string, "error") == 0)
2094 sse_check = sse_check_error;
2095 else
2096 as_bad (_("bad argument to sse_check directive."));
2097 *input_line_pointer = e;
2098 }
2099 else
2100 as_bad (_("missing argument for sse_check directive"));
2101
2102 demand_empty_rest_of_line ();
2103 }
2104
2105 static void
2106 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2107 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2108 {
2109 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2110 static const char *arch;
2111
2112 /* Intel LIOM is only supported on ELF. */
2113 if (!IS_ELF)
2114 return;
2115
2116 if (!arch)
2117 {
2118 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2119 use default_arch. */
2120 arch = cpu_arch_name;
2121 if (!arch)
2122 arch = default_arch;
2123 }
2124
2125 /* If we are targeting Intel L1OM, we must enable it. */
2126 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2127 || new_flag.bitfield.cpul1om)
2128 return;
2129
2130 /* If we are targeting Intel K1OM, we must enable it. */
2131 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2132 || new_flag.bitfield.cpuk1om)
2133 return;
2134
2135 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2136 #endif
2137 }
2138
2139 static void
2140 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2141 {
2142 SKIP_WHITESPACE ();
2143
2144 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2145 {
2146 char *string = input_line_pointer;
2147 int e = get_symbol_end ();
2148 unsigned int j;
2149 i386_cpu_flags flags;
2150
2151 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2152 {
2153 if (strcmp (string, cpu_arch[j].name) == 0)
2154 {
2155 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2156
2157 if (*string != '.')
2158 {
2159 cpu_arch_name = cpu_arch[j].name;
2160 cpu_sub_arch_name = NULL;
2161 cpu_arch_flags = cpu_arch[j].flags;
2162 if (flag_code == CODE_64BIT)
2163 {
2164 cpu_arch_flags.bitfield.cpu64 = 1;
2165 cpu_arch_flags.bitfield.cpuno64 = 0;
2166 }
2167 else
2168 {
2169 cpu_arch_flags.bitfield.cpu64 = 0;
2170 cpu_arch_flags.bitfield.cpuno64 = 1;
2171 }
2172 cpu_arch_isa = cpu_arch[j].type;
2173 cpu_arch_isa_flags = cpu_arch[j].flags;
2174 if (!cpu_arch_tune_set)
2175 {
2176 cpu_arch_tune = cpu_arch_isa;
2177 cpu_arch_tune_flags = cpu_arch_isa_flags;
2178 }
2179 break;
2180 }
2181
2182 if (!cpu_arch[j].negated)
2183 flags = cpu_flags_or (cpu_arch_flags,
2184 cpu_arch[j].flags);
2185 else
2186 flags = cpu_flags_and_not (cpu_arch_flags,
2187 cpu_arch[j].flags);
2188 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2189 {
2190 if (cpu_sub_arch_name)
2191 {
2192 char *name = cpu_sub_arch_name;
2193 cpu_sub_arch_name = concat (name,
2194 cpu_arch[j].name,
2195 (const char *) NULL);
2196 free (name);
2197 }
2198 else
2199 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2200 cpu_arch_flags = flags;
2201 cpu_arch_isa_flags = flags;
2202 }
2203 *input_line_pointer = e;
2204 demand_empty_rest_of_line ();
2205 return;
2206 }
2207 }
2208 if (j >= ARRAY_SIZE (cpu_arch))
2209 as_bad (_("no such architecture: `%s'"), string);
2210
2211 *input_line_pointer = e;
2212 }
2213 else
2214 as_bad (_("missing cpu architecture"));
2215
2216 no_cond_jump_promotion = 0;
2217 if (*input_line_pointer == ','
2218 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2219 {
2220 char *string = ++input_line_pointer;
2221 int e = get_symbol_end ();
2222
2223 if (strcmp (string, "nojumps") == 0)
2224 no_cond_jump_promotion = 1;
2225 else if (strcmp (string, "jumps") == 0)
2226 ;
2227 else
2228 as_bad (_("no such architecture modifier: `%s'"), string);
2229
2230 *input_line_pointer = e;
2231 }
2232
2233 demand_empty_rest_of_line ();
2234 }
2235
2236 enum bfd_architecture
2237 i386_arch (void)
2238 {
2239 if (cpu_arch_isa == PROCESSOR_L1OM)
2240 {
2241 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2242 || flag_code != CODE_64BIT)
2243 as_fatal (_("Intel L1OM is 64bit ELF only"));
2244 return bfd_arch_l1om;
2245 }
2246 else if (cpu_arch_isa == PROCESSOR_K1OM)
2247 {
2248 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2249 || flag_code != CODE_64BIT)
2250 as_fatal (_("Intel K1OM is 64bit ELF only"));
2251 return bfd_arch_k1om;
2252 }
2253 else
2254 return bfd_arch_i386;
2255 }
2256
2257 unsigned long
2258 i386_mach (void)
2259 {
2260 if (!strncmp (default_arch, "x86_64", 6))
2261 {
2262 if (cpu_arch_isa == PROCESSOR_L1OM)
2263 {
2264 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2265 || default_arch[6] != '\0')
2266 as_fatal (_("Intel L1OM is 64bit ELF only"));
2267 return bfd_mach_l1om;
2268 }
2269 else if (cpu_arch_isa == PROCESSOR_K1OM)
2270 {
2271 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2272 || default_arch[6] != '\0')
2273 as_fatal (_("Intel K1OM is 64bit ELF only"));
2274 return bfd_mach_k1om;
2275 }
2276 else if (default_arch[6] == '\0')
2277 return bfd_mach_x86_64;
2278 else
2279 return bfd_mach_x64_32;
2280 }
2281 else if (!strcmp (default_arch, "i386"))
2282 return bfd_mach_i386_i386;
2283 else
2284 as_fatal (_("unknown architecture"));
2285 }
2286 \f
2287 void
2288 md_begin (void)
2289 {
2290 const char *hash_err;
2291
2292 /* Initialize op_hash hash table. */
2293 op_hash = hash_new ();
2294
2295 {
2296 const insn_template *optab;
2297 templates *core_optab;
2298
2299 /* Setup for loop. */
2300 optab = i386_optab;
2301 core_optab = (templates *) xmalloc (sizeof (templates));
2302 core_optab->start = optab;
2303
2304 while (1)
2305 {
2306 ++optab;
2307 if (optab->name == NULL
2308 || strcmp (optab->name, (optab - 1)->name) != 0)
2309 {
2310 /* different name --> ship out current template list;
2311 add to hash table; & begin anew. */
2312 core_optab->end = optab;
2313 hash_err = hash_insert (op_hash,
2314 (optab - 1)->name,
2315 (void *) core_optab);
2316 if (hash_err)
2317 {
2318 as_fatal (_("internal Error: Can't hash %s: %s"),
2319 (optab - 1)->name,
2320 hash_err);
2321 }
2322 if (optab->name == NULL)
2323 break;
2324 core_optab = (templates *) xmalloc (sizeof (templates));
2325 core_optab->start = optab;
2326 }
2327 }
2328 }
2329
2330 /* Initialize reg_hash hash table. */
2331 reg_hash = hash_new ();
2332 {
2333 const reg_entry *regtab;
2334 unsigned int regtab_size = i386_regtab_size;
2335
2336 for (regtab = i386_regtab; regtab_size--; regtab++)
2337 {
2338 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2339 if (hash_err)
2340 as_fatal (_("internal Error: Can't hash %s: %s"),
2341 regtab->reg_name,
2342 hash_err);
2343 }
2344 }
2345
2346 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2347 {
2348 int c;
2349 char *p;
2350
2351 for (c = 0; c < 256; c++)
2352 {
2353 if (ISDIGIT (c))
2354 {
2355 digit_chars[c] = c;
2356 mnemonic_chars[c] = c;
2357 register_chars[c] = c;
2358 operand_chars[c] = c;
2359 }
2360 else if (ISLOWER (c))
2361 {
2362 mnemonic_chars[c] = c;
2363 register_chars[c] = c;
2364 operand_chars[c] = c;
2365 }
2366 else if (ISUPPER (c))
2367 {
2368 mnemonic_chars[c] = TOLOWER (c);
2369 register_chars[c] = mnemonic_chars[c];
2370 operand_chars[c] = c;
2371 }
2372
2373 if (ISALPHA (c) || ISDIGIT (c))
2374 identifier_chars[c] = c;
2375 else if (c >= 128)
2376 {
2377 identifier_chars[c] = c;
2378 operand_chars[c] = c;
2379 }
2380 }
2381
2382 #ifdef LEX_AT
2383 identifier_chars['@'] = '@';
2384 #endif
2385 #ifdef LEX_QM
2386 identifier_chars['?'] = '?';
2387 operand_chars['?'] = '?';
2388 #endif
2389 digit_chars['-'] = '-';
2390 mnemonic_chars['_'] = '_';
2391 mnemonic_chars['-'] = '-';
2392 mnemonic_chars['.'] = '.';
2393 identifier_chars['_'] = '_';
2394 identifier_chars['.'] = '.';
2395
2396 for (p = operand_special_chars; *p != '\0'; p++)
2397 operand_chars[(unsigned char) *p] = *p;
2398 }
2399
2400 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2401 if (IS_ELF)
2402 {
2403 record_alignment (text_section, 2);
2404 record_alignment (data_section, 2);
2405 record_alignment (bss_section, 2);
2406 }
2407 #endif
2408
2409 if (flag_code == CODE_64BIT)
2410 {
2411 #if defined (OBJ_COFF) && defined (TE_PE)
2412 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2413 ? 32 : 16);
2414 #else
2415 x86_dwarf2_return_column = 16;
2416 #endif
2417 x86_cie_data_alignment = -8;
2418 }
2419 else
2420 {
2421 x86_dwarf2_return_column = 8;
2422 x86_cie_data_alignment = -4;
2423 }
2424 }
2425
2426 void
2427 i386_print_statistics (FILE *file)
2428 {
2429 hash_print_statistics (file, "i386 opcode", op_hash);
2430 hash_print_statistics (file, "i386 register", reg_hash);
2431 }
2432 \f
2433 #ifdef DEBUG386
2434
2435 /* Debugging routines for md_assemble. */
2436 static void pte (insn_template *);
2437 static void pt (i386_operand_type);
2438 static void pe (expressionS *);
2439 static void ps (symbolS *);
2440
2441 static void
2442 pi (char *line, i386_insn *x)
2443 {
2444 unsigned int j;
2445
2446 fprintf (stdout, "%s: template ", line);
2447 pte (&x->tm);
2448 fprintf (stdout, " address: base %s index %s scale %x\n",
2449 x->base_reg ? x->base_reg->reg_name : "none",
2450 x->index_reg ? x->index_reg->reg_name : "none",
2451 x->log2_scale_factor);
2452 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2453 x->rm.mode, x->rm.reg, x->rm.regmem);
2454 fprintf (stdout, " sib: base %x index %x scale %x\n",
2455 x->sib.base, x->sib.index, x->sib.scale);
2456 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2457 (x->rex & REX_W) != 0,
2458 (x->rex & REX_R) != 0,
2459 (x->rex & REX_X) != 0,
2460 (x->rex & REX_B) != 0);
2461 for (j = 0; j < x->operands; j++)
2462 {
2463 fprintf (stdout, " #%d: ", j + 1);
2464 pt (x->types[j]);
2465 fprintf (stdout, "\n");
2466 if (x->types[j].bitfield.reg8
2467 || x->types[j].bitfield.reg16
2468 || x->types[j].bitfield.reg32
2469 || x->types[j].bitfield.reg64
2470 || x->types[j].bitfield.regmmx
2471 || x->types[j].bitfield.regxmm
2472 || x->types[j].bitfield.regymm
2473 || x->types[j].bitfield.sreg2
2474 || x->types[j].bitfield.sreg3
2475 || x->types[j].bitfield.control
2476 || x->types[j].bitfield.debug
2477 || x->types[j].bitfield.test)
2478 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2479 if (operand_type_check (x->types[j], imm))
2480 pe (x->op[j].imms);
2481 if (operand_type_check (x->types[j], disp))
2482 pe (x->op[j].disps);
2483 }
2484 }
2485
2486 static void
2487 pte (insn_template *t)
2488 {
2489 unsigned int j;
2490 fprintf (stdout, " %d operands ", t->operands);
2491 fprintf (stdout, "opcode %x ", t->base_opcode);
2492 if (t->extension_opcode != None)
2493 fprintf (stdout, "ext %x ", t->extension_opcode);
2494 if (t->opcode_modifier.d)
2495 fprintf (stdout, "D");
2496 if (t->opcode_modifier.w)
2497 fprintf (stdout, "W");
2498 fprintf (stdout, "\n");
2499 for (j = 0; j < t->operands; j++)
2500 {
2501 fprintf (stdout, " #%d type ", j + 1);
2502 pt (t->operand_types[j]);
2503 fprintf (stdout, "\n");
2504 }
2505 }
2506
2507 static void
2508 pe (expressionS *e)
2509 {
2510 fprintf (stdout, " operation %d\n", e->X_op);
2511 fprintf (stdout, " add_number %ld (%lx)\n",
2512 (long) e->X_add_number, (long) e->X_add_number);
2513 if (e->X_add_symbol)
2514 {
2515 fprintf (stdout, " add_symbol ");
2516 ps (e->X_add_symbol);
2517 fprintf (stdout, "\n");
2518 }
2519 if (e->X_op_symbol)
2520 {
2521 fprintf (stdout, " op_symbol ");
2522 ps (e->X_op_symbol);
2523 fprintf (stdout, "\n");
2524 }
2525 }
2526
2527 static void
2528 ps (symbolS *s)
2529 {
2530 fprintf (stdout, "%s type %s%s",
2531 S_GET_NAME (s),
2532 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2533 segment_name (S_GET_SEGMENT (s)));
2534 }
2535
2536 static struct type_name
2537 {
2538 i386_operand_type mask;
2539 const char *name;
2540 }
2541 const type_names[] =
2542 {
2543 { OPERAND_TYPE_REG8, "r8" },
2544 { OPERAND_TYPE_REG16, "r16" },
2545 { OPERAND_TYPE_REG32, "r32" },
2546 { OPERAND_TYPE_REG64, "r64" },
2547 { OPERAND_TYPE_IMM8, "i8" },
2548 { OPERAND_TYPE_IMM8, "i8s" },
2549 { OPERAND_TYPE_IMM16, "i16" },
2550 { OPERAND_TYPE_IMM32, "i32" },
2551 { OPERAND_TYPE_IMM32S, "i32s" },
2552 { OPERAND_TYPE_IMM64, "i64" },
2553 { OPERAND_TYPE_IMM1, "i1" },
2554 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2555 { OPERAND_TYPE_DISP8, "d8" },
2556 { OPERAND_TYPE_DISP16, "d16" },
2557 { OPERAND_TYPE_DISP32, "d32" },
2558 { OPERAND_TYPE_DISP32S, "d32s" },
2559 { OPERAND_TYPE_DISP64, "d64" },
2560 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2561 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2562 { OPERAND_TYPE_CONTROL, "control reg" },
2563 { OPERAND_TYPE_TEST, "test reg" },
2564 { OPERAND_TYPE_DEBUG, "debug reg" },
2565 { OPERAND_TYPE_FLOATREG, "FReg" },
2566 { OPERAND_TYPE_FLOATACC, "FAcc" },
2567 { OPERAND_TYPE_SREG2, "SReg2" },
2568 { OPERAND_TYPE_SREG3, "SReg3" },
2569 { OPERAND_TYPE_ACC, "Acc" },
2570 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2571 { OPERAND_TYPE_REGMMX, "rMMX" },
2572 { OPERAND_TYPE_REGXMM, "rXMM" },
2573 { OPERAND_TYPE_REGYMM, "rYMM" },
2574 { OPERAND_TYPE_ESSEG, "es" },
2575 };
2576
2577 static void
2578 pt (i386_operand_type t)
2579 {
2580 unsigned int j;
2581 i386_operand_type a;
2582
2583 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2584 {
2585 a = operand_type_and (t, type_names[j].mask);
2586 if (!operand_type_all_zero (&a))
2587 fprintf (stdout, "%s, ", type_names[j].name);
2588 }
2589 fflush (stdout);
2590 }
2591
2592 #endif /* DEBUG386 */
2593 \f
2594 static bfd_reloc_code_real_type
2595 reloc (unsigned int size,
2596 int pcrel,
2597 int sign,
2598 bfd_reloc_code_real_type other)
2599 {
2600 if (other != NO_RELOC)
2601 {
2602 reloc_howto_type *rel;
2603
2604 if (size == 8)
2605 switch (other)
2606 {
2607 case BFD_RELOC_X86_64_GOT32:
2608 return BFD_RELOC_X86_64_GOT64;
2609 break;
2610 case BFD_RELOC_X86_64_PLTOFF64:
2611 return BFD_RELOC_X86_64_PLTOFF64;
2612 break;
2613 case BFD_RELOC_X86_64_GOTPC32:
2614 other = BFD_RELOC_X86_64_GOTPC64;
2615 break;
2616 case BFD_RELOC_X86_64_GOTPCREL:
2617 other = BFD_RELOC_X86_64_GOTPCREL64;
2618 break;
2619 case BFD_RELOC_X86_64_TPOFF32:
2620 other = BFD_RELOC_X86_64_TPOFF64;
2621 break;
2622 case BFD_RELOC_X86_64_DTPOFF32:
2623 other = BFD_RELOC_X86_64_DTPOFF64;
2624 break;
2625 default:
2626 break;
2627 }
2628
2629 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2630 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2631 sign = -1;
2632
2633 rel = bfd_reloc_type_lookup (stdoutput, other);
2634 if (!rel)
2635 as_bad (_("unknown relocation (%u)"), other);
2636 else if (size != bfd_get_reloc_size (rel))
2637 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2638 bfd_get_reloc_size (rel),
2639 size);
2640 else if (pcrel && !rel->pc_relative)
2641 as_bad (_("non-pc-relative relocation for pc-relative field"));
2642 else if ((rel->complain_on_overflow == complain_overflow_signed
2643 && !sign)
2644 || (rel->complain_on_overflow == complain_overflow_unsigned
2645 && sign > 0))
2646 as_bad (_("relocated field and relocation type differ in signedness"));
2647 else
2648 return other;
2649 return NO_RELOC;
2650 }
2651
2652 if (pcrel)
2653 {
2654 if (!sign)
2655 as_bad (_("there are no unsigned pc-relative relocations"));
2656 switch (size)
2657 {
2658 case 1: return BFD_RELOC_8_PCREL;
2659 case 2: return BFD_RELOC_16_PCREL;
2660 case 4: return BFD_RELOC_32_PCREL;
2661 case 8: return BFD_RELOC_64_PCREL;
2662 }
2663 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2664 }
2665 else
2666 {
2667 if (sign > 0)
2668 switch (size)
2669 {
2670 case 4: return BFD_RELOC_X86_64_32S;
2671 }
2672 else
2673 switch (size)
2674 {
2675 case 1: return BFD_RELOC_8;
2676 case 2: return BFD_RELOC_16;
2677 case 4: return BFD_RELOC_32;
2678 case 8: return BFD_RELOC_64;
2679 }
2680 as_bad (_("cannot do %s %u byte relocation"),
2681 sign > 0 ? "signed" : "unsigned", size);
2682 }
2683
2684 return NO_RELOC;
2685 }
2686
2687 /* Here we decide which fixups can be adjusted to make them relative to
2688 the beginning of the section instead of the symbol. Basically we need
2689 to make sure that the dynamic relocations are done correctly, so in
2690 some cases we force the original symbol to be used. */
2691
2692 int
2693 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2694 {
2695 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2696 if (!IS_ELF)
2697 return 1;
2698
2699 /* Don't adjust pc-relative references to merge sections in 64-bit
2700 mode. */
2701 if (use_rela_relocations
2702 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2703 && fixP->fx_pcrel)
2704 return 0;
2705
2706 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2707 and changed later by validate_fix. */
2708 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2709 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2710 return 0;
2711
2712 /* adjust_reloc_syms doesn't know about the GOT. */
2713 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2714 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2715 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2716 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2717 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2718 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2719 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2720 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2721 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2722 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2723 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2724 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2725 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2726 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2727 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2728 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2729 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2730 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2731 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2732 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2733 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2734 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2735 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2736 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2737 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2738 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2739 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2740 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2741 return 0;
2742 #endif
2743 return 1;
2744 }
2745
2746 static int
2747 intel_float_operand (const char *mnemonic)
2748 {
2749 /* Note that the value returned is meaningful only for opcodes with (memory)
2750 operands, hence the code here is free to improperly handle opcodes that
2751 have no operands (for better performance and smaller code). */
2752
2753 if (mnemonic[0] != 'f')
2754 return 0; /* non-math */
2755
2756 switch (mnemonic[1])
2757 {
2758 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2759 the fs segment override prefix not currently handled because no
2760 call path can make opcodes without operands get here */
2761 case 'i':
2762 return 2 /* integer op */;
2763 case 'l':
2764 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2765 return 3; /* fldcw/fldenv */
2766 break;
2767 case 'n':
2768 if (mnemonic[2] != 'o' /* fnop */)
2769 return 3; /* non-waiting control op */
2770 break;
2771 case 'r':
2772 if (mnemonic[2] == 's')
2773 return 3; /* frstor/frstpm */
2774 break;
2775 case 's':
2776 if (mnemonic[2] == 'a')
2777 return 3; /* fsave */
2778 if (mnemonic[2] == 't')
2779 {
2780 switch (mnemonic[3])
2781 {
2782 case 'c': /* fstcw */
2783 case 'd': /* fstdw */
2784 case 'e': /* fstenv */
2785 case 's': /* fsts[gw] */
2786 return 3;
2787 }
2788 }
2789 break;
2790 case 'x':
2791 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2792 return 0; /* fxsave/fxrstor are not really math ops */
2793 break;
2794 }
2795
2796 return 1;
2797 }
2798
2799 /* Build the VEX prefix. */
2800
2801 static void
2802 build_vex_prefix (const insn_template *t)
2803 {
2804 unsigned int register_specifier;
2805 unsigned int implied_prefix;
2806 unsigned int vector_length;
2807
2808 /* Check register specifier. */
2809 if (i.vex.register_specifier)
2810 {
2811 register_specifier = i.vex.register_specifier->reg_num;
2812 if ((i.vex.register_specifier->reg_flags & RegRex))
2813 register_specifier += 8;
2814 register_specifier = ~register_specifier & 0xf;
2815 }
2816 else
2817 register_specifier = 0xf;
2818
2819 /* Use 2-byte VEX prefix by swappping destination and source
2820 operand. */
2821 if (!i.swap_operand
2822 && i.operands == i.reg_operands
2823 && i.tm.opcode_modifier.vexopcode == VEX0F
2824 && i.tm.opcode_modifier.s
2825 && i.rex == REX_B)
2826 {
2827 unsigned int xchg = i.operands - 1;
2828 union i386_op temp_op;
2829 i386_operand_type temp_type;
2830
2831 temp_type = i.types[xchg];
2832 i.types[xchg] = i.types[0];
2833 i.types[0] = temp_type;
2834 temp_op = i.op[xchg];
2835 i.op[xchg] = i.op[0];
2836 i.op[0] = temp_op;
2837
2838 gas_assert (i.rm.mode == 3);
2839
2840 i.rex = REX_R;
2841 xchg = i.rm.regmem;
2842 i.rm.regmem = i.rm.reg;
2843 i.rm.reg = xchg;
2844
2845 /* Use the next insn. */
2846 i.tm = t[1];
2847 }
2848
2849 if (i.tm.opcode_modifier.vex == VEXScalar)
2850 vector_length = avxscalar;
2851 else
2852 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2853
2854 switch ((i.tm.base_opcode >> 8) & 0xff)
2855 {
2856 case 0:
2857 implied_prefix = 0;
2858 break;
2859 case DATA_PREFIX_OPCODE:
2860 implied_prefix = 1;
2861 break;
2862 case REPE_PREFIX_OPCODE:
2863 implied_prefix = 2;
2864 break;
2865 case REPNE_PREFIX_OPCODE:
2866 implied_prefix = 3;
2867 break;
2868 default:
2869 abort ();
2870 }
2871
2872 /* Use 2-byte VEX prefix if possible. */
2873 if (i.tm.opcode_modifier.vexopcode == VEX0F
2874 && i.tm.opcode_modifier.vexw != VEXW1
2875 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2876 {
2877 /* 2-byte VEX prefix. */
2878 unsigned int r;
2879
2880 i.vex.length = 2;
2881 i.vex.bytes[0] = 0xc5;
2882
2883 /* Check the REX.R bit. */
2884 r = (i.rex & REX_R) ? 0 : 1;
2885 i.vex.bytes[1] = (r << 7
2886 | register_specifier << 3
2887 | vector_length << 2
2888 | implied_prefix);
2889 }
2890 else
2891 {
2892 /* 3-byte VEX prefix. */
2893 unsigned int m, w;
2894
2895 i.vex.length = 3;
2896
2897 switch (i.tm.opcode_modifier.vexopcode)
2898 {
2899 case VEX0F:
2900 m = 0x1;
2901 i.vex.bytes[0] = 0xc4;
2902 break;
2903 case VEX0F38:
2904 m = 0x2;
2905 i.vex.bytes[0] = 0xc4;
2906 break;
2907 case VEX0F3A:
2908 m = 0x3;
2909 i.vex.bytes[0] = 0xc4;
2910 break;
2911 case XOP08:
2912 m = 0x8;
2913 i.vex.bytes[0] = 0x8f;
2914 break;
2915 case XOP09:
2916 m = 0x9;
2917 i.vex.bytes[0] = 0x8f;
2918 break;
2919 case XOP0A:
2920 m = 0xa;
2921 i.vex.bytes[0] = 0x8f;
2922 break;
2923 default:
2924 abort ();
2925 }
2926
2927 /* The high 3 bits of the second VEX byte are 1's compliment
2928 of RXB bits from REX. */
2929 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2930
2931 /* Check the REX.W bit. */
2932 w = (i.rex & REX_W) ? 1 : 0;
2933 if (i.tm.opcode_modifier.vexw)
2934 {
2935 if (w)
2936 abort ();
2937
2938 if (i.tm.opcode_modifier.vexw == VEXW1)
2939 w = 1;
2940 }
2941
2942 i.vex.bytes[2] = (w << 7
2943 | register_specifier << 3
2944 | vector_length << 2
2945 | implied_prefix);
2946 }
2947 }
2948
2949 static void
2950 process_immext (void)
2951 {
2952 expressionS *exp;
2953
2954 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2955 {
2956 /* SSE3 Instructions have the fixed operands with an opcode
2957 suffix which is coded in the same place as an 8-bit immediate
2958 field would be. Here we check those operands and remove them
2959 afterwards. */
2960 unsigned int x;
2961
2962 for (x = 0; x < i.operands; x++)
2963 if (i.op[x].regs->reg_num != x)
2964 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2965 register_prefix, i.op[x].regs->reg_name, x + 1,
2966 i.tm.name);
2967
2968 i.operands = 0;
2969 }
2970
2971 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2972 which is coded in the same place as an 8-bit immediate field
2973 would be. Here we fake an 8-bit immediate operand from the
2974 opcode suffix stored in tm.extension_opcode.
2975
2976 AVX instructions also use this encoding, for some of
2977 3 argument instructions. */
2978
2979 gas_assert (i.imm_operands == 0
2980 && (i.operands <= 2
2981 || (i.tm.opcode_modifier.vex
2982 && i.operands <= 4)));
2983
2984 exp = &im_expressions[i.imm_operands++];
2985 i.op[i.operands].imms = exp;
2986 i.types[i.operands] = imm8;
2987 i.operands++;
2988 exp->X_op = O_constant;
2989 exp->X_add_number = i.tm.extension_opcode;
2990 i.tm.extension_opcode = None;
2991 }
2992
2993 /* This is the guts of the machine-dependent assembler. LINE points to a
2994 machine dependent instruction. This function is supposed to emit
2995 the frags/bytes it assembles to. */
2996
2997 void
2998 md_assemble (char *line)
2999 {
3000 unsigned int j;
3001 char mnemonic[MAX_MNEM_SIZE];
3002 const insn_template *t;
3003
3004 /* Initialize globals. */
3005 memset (&i, '\0', sizeof (i));
3006 for (j = 0; j < MAX_OPERANDS; j++)
3007 i.reloc[j] = NO_RELOC;
3008 memset (disp_expressions, '\0', sizeof (disp_expressions));
3009 memset (im_expressions, '\0', sizeof (im_expressions));
3010 save_stack_p = save_stack;
3011
3012 /* First parse an instruction mnemonic & call i386_operand for the operands.
3013 We assume that the scrubber has arranged it so that line[0] is the valid
3014 start of a (possibly prefixed) mnemonic. */
3015
3016 line = parse_insn (line, mnemonic);
3017 if (line == NULL)
3018 return;
3019
3020 line = parse_operands (line, mnemonic);
3021 this_operand = -1;
3022 if (line == NULL)
3023 return;
3024
3025 /* Now we've parsed the mnemonic into a set of templates, and have the
3026 operands at hand. */
3027
3028 /* All intel opcodes have reversed operands except for "bound" and
3029 "enter". We also don't reverse intersegment "jmp" and "call"
3030 instructions with 2 immediate operands so that the immediate segment
3031 precedes the offset, as it does when in AT&T mode. */
3032 if (intel_syntax
3033 && i.operands > 1
3034 && (strcmp (mnemonic, "bound") != 0)
3035 && (strcmp (mnemonic, "invlpga") != 0)
3036 && !(operand_type_check (i.types[0], imm)
3037 && operand_type_check (i.types[1], imm)))
3038 swap_operands ();
3039
3040 /* The order of the immediates should be reversed
3041 for 2 immediates extrq and insertq instructions */
3042 if (i.imm_operands == 2
3043 && (strcmp (mnemonic, "extrq") == 0
3044 || strcmp (mnemonic, "insertq") == 0))
3045 swap_2_operands (0, 1);
3046
3047 if (i.imm_operands)
3048 optimize_imm ();
3049
3050 /* Don't optimize displacement for movabs since it only takes 64bit
3051 displacement. */
3052 if (i.disp_operands
3053 && !i.disp32_encoding
3054 && (flag_code != CODE_64BIT
3055 || strcmp (mnemonic, "movabs") != 0))
3056 optimize_disp ();
3057
3058 /* Next, we find a template that matches the given insn,
3059 making sure the overlap of the given operands types is consistent
3060 with the template operand types. */
3061
3062 if (!(t = match_template ()))
3063 return;
3064
3065 if (sse_check != sse_check_none
3066 && !i.tm.opcode_modifier.noavx
3067 && (i.tm.cpu_flags.bitfield.cpusse
3068 || i.tm.cpu_flags.bitfield.cpusse2
3069 || i.tm.cpu_flags.bitfield.cpusse3
3070 || i.tm.cpu_flags.bitfield.cpussse3
3071 || i.tm.cpu_flags.bitfield.cpusse4_1
3072 || i.tm.cpu_flags.bitfield.cpusse4_2))
3073 {
3074 (sse_check == sse_check_warning
3075 ? as_warn
3076 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3077 }
3078
3079 /* Zap movzx and movsx suffix. The suffix has been set from
3080 "word ptr" or "byte ptr" on the source operand in Intel syntax
3081 or extracted from mnemonic in AT&T syntax. But we'll use
3082 the destination register to choose the suffix for encoding. */
3083 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3084 {
3085 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3086 there is no suffix, the default will be byte extension. */
3087 if (i.reg_operands != 2
3088 && !i.suffix
3089 && intel_syntax)
3090 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3091
3092 i.suffix = 0;
3093 }
3094
3095 if (i.tm.opcode_modifier.fwait)
3096 if (!add_prefix (FWAIT_OPCODE))
3097 return;
3098
3099 /* Check for lock without a lockable instruction. Destination operand
3100 must be memory unless it is xchg (0x86). */
3101 if (i.prefix[LOCK_PREFIX]
3102 && (!i.tm.opcode_modifier.islockable
3103 || i.mem_operands == 0
3104 || (i.tm.base_opcode != 0x86
3105 && !operand_type_check (i.types[i.operands - 1], anymem))))
3106 {
3107 as_bad (_("expecting lockable instruction after `lock'"));
3108 return;
3109 }
3110
3111 /* Check string instruction segment overrides. */
3112 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3113 {
3114 if (!check_string ())
3115 return;
3116 i.disp_operands = 0;
3117 }
3118
3119 if (!process_suffix ())
3120 return;
3121
3122 /* Update operand types. */
3123 for (j = 0; j < i.operands; j++)
3124 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3125
3126 /* Make still unresolved immediate matches conform to size of immediate
3127 given in i.suffix. */
3128 if (!finalize_imm ())
3129 return;
3130
3131 if (i.types[0].bitfield.imm1)
3132 i.imm_operands = 0; /* kludge for shift insns. */
3133
3134 /* We only need to check those implicit registers for instructions
3135 with 3 operands or less. */
3136 if (i.operands <= 3)
3137 for (j = 0; j < i.operands; j++)
3138 if (i.types[j].bitfield.inoutportreg
3139 || i.types[j].bitfield.shiftcount
3140 || i.types[j].bitfield.acc
3141 || i.types[j].bitfield.floatacc)
3142 i.reg_operands--;
3143
3144 /* ImmExt should be processed after SSE2AVX. */
3145 if (!i.tm.opcode_modifier.sse2avx
3146 && i.tm.opcode_modifier.immext)
3147 process_immext ();
3148
3149 /* For insns with operands there are more diddles to do to the opcode. */
3150 if (i.operands)
3151 {
3152 if (!process_operands ())
3153 return;
3154 }
3155 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3156 {
3157 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3158 as_warn (_("translating to `%sp'"), i.tm.name);
3159 }
3160
3161 if (i.tm.opcode_modifier.vex)
3162 build_vex_prefix (t);
3163
3164 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3165 instructions may define INT_OPCODE as well, so avoid this corner
3166 case for those instructions that use MODRM. */
3167 if (i.tm.base_opcode == INT_OPCODE
3168 && !i.tm.opcode_modifier.modrm
3169 && i.op[0].imms->X_add_number == 3)
3170 {
3171 i.tm.base_opcode = INT3_OPCODE;
3172 i.imm_operands = 0;
3173 }
3174
3175 if ((i.tm.opcode_modifier.jump
3176 || i.tm.opcode_modifier.jumpbyte
3177 || i.tm.opcode_modifier.jumpdword)
3178 && i.op[0].disps->X_op == O_constant)
3179 {
3180 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3181 the absolute address given by the constant. Since ix86 jumps and
3182 calls are pc relative, we need to generate a reloc. */
3183 i.op[0].disps->X_add_symbol = &abs_symbol;
3184 i.op[0].disps->X_op = O_symbol;
3185 }
3186
3187 if (i.tm.opcode_modifier.rex64)
3188 i.rex |= REX_W;
3189
3190 /* For 8 bit registers we need an empty rex prefix. Also if the
3191 instruction already has a prefix, we need to convert old
3192 registers to new ones. */
3193
3194 if ((i.types[0].bitfield.reg8
3195 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3196 || (i.types[1].bitfield.reg8
3197 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3198 || ((i.types[0].bitfield.reg8
3199 || i.types[1].bitfield.reg8)
3200 && i.rex != 0))
3201 {
3202 int x;
3203
3204 i.rex |= REX_OPCODE;
3205 for (x = 0; x < 2; x++)
3206 {
3207 /* Look for 8 bit operand that uses old registers. */
3208 if (i.types[x].bitfield.reg8
3209 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3210 {
3211 /* In case it is "hi" register, give up. */
3212 if (i.op[x].regs->reg_num > 3)
3213 as_bad (_("can't encode register '%s%s' in an "
3214 "instruction requiring REX prefix."),
3215 register_prefix, i.op[x].regs->reg_name);
3216
3217 /* Otherwise it is equivalent to the extended register.
3218 Since the encoding doesn't change this is merely
3219 cosmetic cleanup for debug output. */
3220
3221 i.op[x].regs = i.op[x].regs + 8;
3222 }
3223 }
3224 }
3225
3226 if (i.rex != 0)
3227 add_prefix (REX_OPCODE | i.rex);
3228
3229 /* We are ready to output the insn. */
3230 output_insn ();
3231 }
3232
3233 static char *
3234 parse_insn (char *line, char *mnemonic)
3235 {
3236 char *l = line;
3237 char *token_start = l;
3238 char *mnem_p;
3239 int supported;
3240 const insn_template *t;
3241 char *dot_p = NULL;
3242
3243 /* Non-zero if we found a prefix only acceptable with string insns. */
3244 const char *expecting_string_instruction = NULL;
3245
3246 while (1)
3247 {
3248 mnem_p = mnemonic;
3249 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3250 {
3251 if (*mnem_p == '.')
3252 dot_p = mnem_p;
3253 mnem_p++;
3254 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3255 {
3256 as_bad (_("no such instruction: `%s'"), token_start);
3257 return NULL;
3258 }
3259 l++;
3260 }
3261 if (!is_space_char (*l)
3262 && *l != END_OF_INSN
3263 && (intel_syntax
3264 || (*l != PREFIX_SEPARATOR
3265 && *l != ',')))
3266 {
3267 as_bad (_("invalid character %s in mnemonic"),
3268 output_invalid (*l));
3269 return NULL;
3270 }
3271 if (token_start == l)
3272 {
3273 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3274 as_bad (_("expecting prefix; got nothing"));
3275 else
3276 as_bad (_("expecting mnemonic; got nothing"));
3277 return NULL;
3278 }
3279
3280 /* Look up instruction (or prefix) via hash table. */
3281 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3282
3283 if (*l != END_OF_INSN
3284 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3285 && current_templates
3286 && current_templates->start->opcode_modifier.isprefix)
3287 {
3288 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3289 {
3290 as_bad ((flag_code != CODE_64BIT
3291 ? _("`%s' is only supported in 64-bit mode")
3292 : _("`%s' is not supported in 64-bit mode")),
3293 current_templates->start->name);
3294 return NULL;
3295 }
3296 /* If we are in 16-bit mode, do not allow addr16 or data16.
3297 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3298 if ((current_templates->start->opcode_modifier.size16
3299 || current_templates->start->opcode_modifier.size32)
3300 && flag_code != CODE_64BIT
3301 && (current_templates->start->opcode_modifier.size32
3302 ^ (flag_code == CODE_16BIT)))
3303 {
3304 as_bad (_("redundant %s prefix"),
3305 current_templates->start->name);
3306 return NULL;
3307 }
3308 /* Add prefix, checking for repeated prefixes. */
3309 switch (add_prefix (current_templates->start->base_opcode))
3310 {
3311 case PREFIX_EXIST:
3312 return NULL;
3313 case PREFIX_REP:
3314 expecting_string_instruction = current_templates->start->name;
3315 break;
3316 default:
3317 break;
3318 }
3319 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3320 token_start = ++l;
3321 }
3322 else
3323 break;
3324 }
3325
3326 if (!current_templates)
3327 {
3328 /* Check if we should swap operand or force 32bit displacement in
3329 encoding. */
3330 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3331 i.swap_operand = 1;
3332 else if (mnem_p - 4 == dot_p
3333 && dot_p[1] == 'd'
3334 && dot_p[2] == '3'
3335 && dot_p[3] == '2')
3336 i.disp32_encoding = 1;
3337 else
3338 goto check_suffix;
3339 mnem_p = dot_p;
3340 *dot_p = '\0';
3341 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3342 }
3343
3344 if (!current_templates)
3345 {
3346 check_suffix:
3347 /* See if we can get a match by trimming off a suffix. */
3348 switch (mnem_p[-1])
3349 {
3350 case WORD_MNEM_SUFFIX:
3351 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3352 i.suffix = SHORT_MNEM_SUFFIX;
3353 else
3354 case BYTE_MNEM_SUFFIX:
3355 case QWORD_MNEM_SUFFIX:
3356 i.suffix = mnem_p[-1];
3357 mnem_p[-1] = '\0';
3358 current_templates = (const templates *) hash_find (op_hash,
3359 mnemonic);
3360 break;
3361 case SHORT_MNEM_SUFFIX:
3362 case LONG_MNEM_SUFFIX:
3363 if (!intel_syntax)
3364 {
3365 i.suffix = mnem_p[-1];
3366 mnem_p[-1] = '\0';
3367 current_templates = (const templates *) hash_find (op_hash,
3368 mnemonic);
3369 }
3370 break;
3371
3372 /* Intel Syntax. */
3373 case 'd':
3374 if (intel_syntax)
3375 {
3376 if (intel_float_operand (mnemonic) == 1)
3377 i.suffix = SHORT_MNEM_SUFFIX;
3378 else
3379 i.suffix = LONG_MNEM_SUFFIX;
3380 mnem_p[-1] = '\0';
3381 current_templates = (const templates *) hash_find (op_hash,
3382 mnemonic);
3383 }
3384 break;
3385 }
3386 if (!current_templates)
3387 {
3388 as_bad (_("no such instruction: `%s'"), token_start);
3389 return NULL;
3390 }
3391 }
3392
3393 if (current_templates->start->opcode_modifier.jump
3394 || current_templates->start->opcode_modifier.jumpbyte)
3395 {
3396 /* Check for a branch hint. We allow ",pt" and ",pn" for
3397 predict taken and predict not taken respectively.
3398 I'm not sure that branch hints actually do anything on loop
3399 and jcxz insns (JumpByte) for current Pentium4 chips. They
3400 may work in the future and it doesn't hurt to accept them
3401 now. */
3402 if (l[0] == ',' && l[1] == 'p')
3403 {
3404 if (l[2] == 't')
3405 {
3406 if (!add_prefix (DS_PREFIX_OPCODE))
3407 return NULL;
3408 l += 3;
3409 }
3410 else if (l[2] == 'n')
3411 {
3412 if (!add_prefix (CS_PREFIX_OPCODE))
3413 return NULL;
3414 l += 3;
3415 }
3416 }
3417 }
3418 /* Any other comma loses. */
3419 if (*l == ',')
3420 {
3421 as_bad (_("invalid character %s in mnemonic"),
3422 output_invalid (*l));
3423 return NULL;
3424 }
3425
3426 /* Check if instruction is supported on specified architecture. */
3427 supported = 0;
3428 for (t = current_templates->start; t < current_templates->end; ++t)
3429 {
3430 supported |= cpu_flags_match (t);
3431 if (supported == CPU_FLAGS_PERFECT_MATCH)
3432 goto skip;
3433 }
3434
3435 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3436 {
3437 as_bad (flag_code == CODE_64BIT
3438 ? _("`%s' is not supported in 64-bit mode")
3439 : _("`%s' is only supported in 64-bit mode"),
3440 current_templates->start->name);
3441 return NULL;
3442 }
3443 if (supported != CPU_FLAGS_PERFECT_MATCH)
3444 {
3445 as_bad (_("`%s' is not supported on `%s%s'"),
3446 current_templates->start->name,
3447 cpu_arch_name ? cpu_arch_name : default_arch,
3448 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3449 return NULL;
3450 }
3451
3452 skip:
3453 if (!cpu_arch_flags.bitfield.cpui386
3454 && (flag_code != CODE_16BIT))
3455 {
3456 as_warn (_("use .code16 to ensure correct addressing mode"));
3457 }
3458
3459 /* Check for rep/repne without a string instruction. */
3460 if (expecting_string_instruction)
3461 {
3462 static templates override;
3463
3464 for (t = current_templates->start; t < current_templates->end; ++t)
3465 if (t->opcode_modifier.isstring)
3466 break;
3467 if (t >= current_templates->end)
3468 {
3469 as_bad (_("expecting string instruction after `%s'"),
3470 expecting_string_instruction);
3471 return NULL;
3472 }
3473 for (override.start = t; t < current_templates->end; ++t)
3474 if (!t->opcode_modifier.isstring)
3475 break;
3476 override.end = t;
3477 current_templates = &override;
3478 }
3479
3480 return l;
3481 }
3482
3483 static char *
3484 parse_operands (char *l, const char *mnemonic)
3485 {
3486 char *token_start;
3487
3488 /* 1 if operand is pending after ','. */
3489 unsigned int expecting_operand = 0;
3490
3491 /* Non-zero if operand parens not balanced. */
3492 unsigned int paren_not_balanced;
3493
3494 while (*l != END_OF_INSN)
3495 {
3496 /* Skip optional white space before operand. */
3497 if (is_space_char (*l))
3498 ++l;
3499 if (!is_operand_char (*l) && *l != END_OF_INSN)
3500 {
3501 as_bad (_("invalid character %s before operand %d"),
3502 output_invalid (*l),
3503 i.operands + 1);
3504 return NULL;
3505 }
3506 token_start = l; /* after white space */
3507 paren_not_balanced = 0;
3508 while (paren_not_balanced || *l != ',')
3509 {
3510 if (*l == END_OF_INSN)
3511 {
3512 if (paren_not_balanced)
3513 {
3514 if (!intel_syntax)
3515 as_bad (_("unbalanced parenthesis in operand %d."),
3516 i.operands + 1);
3517 else
3518 as_bad (_("unbalanced brackets in operand %d."),
3519 i.operands + 1);
3520 return NULL;
3521 }
3522 else
3523 break; /* we are done */
3524 }
3525 else if (!is_operand_char (*l) && !is_space_char (*l))
3526 {
3527 as_bad (_("invalid character %s in operand %d"),
3528 output_invalid (*l),
3529 i.operands + 1);
3530 return NULL;
3531 }
3532 if (!intel_syntax)
3533 {
3534 if (*l == '(')
3535 ++paren_not_balanced;
3536 if (*l == ')')
3537 --paren_not_balanced;
3538 }
3539 else
3540 {
3541 if (*l == '[')
3542 ++paren_not_balanced;
3543 if (*l == ']')
3544 --paren_not_balanced;
3545 }
3546 l++;
3547 }
3548 if (l != token_start)
3549 { /* Yes, we've read in another operand. */
3550 unsigned int operand_ok;
3551 this_operand = i.operands++;
3552 i.types[this_operand].bitfield.unspecified = 1;
3553 if (i.operands > MAX_OPERANDS)
3554 {
3555 as_bad (_("spurious operands; (%d operands/instruction max)"),
3556 MAX_OPERANDS);
3557 return NULL;
3558 }
3559 /* Now parse operand adding info to 'i' as we go along. */
3560 END_STRING_AND_SAVE (l);
3561
3562 if (intel_syntax)
3563 operand_ok =
3564 i386_intel_operand (token_start,
3565 intel_float_operand (mnemonic));
3566 else
3567 operand_ok = i386_att_operand (token_start);
3568
3569 RESTORE_END_STRING (l);
3570 if (!operand_ok)
3571 return NULL;
3572 }
3573 else
3574 {
3575 if (expecting_operand)
3576 {
3577 expecting_operand_after_comma:
3578 as_bad (_("expecting operand after ','; got nothing"));
3579 return NULL;
3580 }
3581 if (*l == ',')
3582 {
3583 as_bad (_("expecting operand before ','; got nothing"));
3584 return NULL;
3585 }
3586 }
3587
3588 /* Now *l must be either ',' or END_OF_INSN. */
3589 if (*l == ',')
3590 {
3591 if (*++l == END_OF_INSN)
3592 {
3593 /* Just skip it, if it's \n complain. */
3594 goto expecting_operand_after_comma;
3595 }
3596 expecting_operand = 1;
3597 }
3598 }
3599 return l;
3600 }
3601
3602 static void
3603 swap_2_operands (int xchg1, int xchg2)
3604 {
3605 union i386_op temp_op;
3606 i386_operand_type temp_type;
3607 enum bfd_reloc_code_real temp_reloc;
3608
3609 temp_type = i.types[xchg2];
3610 i.types[xchg2] = i.types[xchg1];
3611 i.types[xchg1] = temp_type;
3612 temp_op = i.op[xchg2];
3613 i.op[xchg2] = i.op[xchg1];
3614 i.op[xchg1] = temp_op;
3615 temp_reloc = i.reloc[xchg2];
3616 i.reloc[xchg2] = i.reloc[xchg1];
3617 i.reloc[xchg1] = temp_reloc;
3618 }
3619
3620 static void
3621 swap_operands (void)
3622 {
3623 switch (i.operands)
3624 {
3625 case 5:
3626 case 4:
3627 swap_2_operands (1, i.operands - 2);
3628 case 3:
3629 case 2:
3630 swap_2_operands (0, i.operands - 1);
3631 break;
3632 default:
3633 abort ();
3634 }
3635
3636 if (i.mem_operands == 2)
3637 {
3638 const seg_entry *temp_seg;
3639 temp_seg = i.seg[0];
3640 i.seg[0] = i.seg[1];
3641 i.seg[1] = temp_seg;
3642 }
3643 }
3644
3645 /* Try to ensure constant immediates are represented in the smallest
3646 opcode possible. */
3647 static void
3648 optimize_imm (void)
3649 {
3650 char guess_suffix = 0;
3651 int op;
3652
3653 if (i.suffix)
3654 guess_suffix = i.suffix;
3655 else if (i.reg_operands)
3656 {
3657 /* Figure out a suffix from the last register operand specified.
3658 We can't do this properly yet, ie. excluding InOutPortReg,
3659 but the following works for instructions with immediates.
3660 In any case, we can't set i.suffix yet. */
3661 for (op = i.operands; --op >= 0;)
3662 if (i.types[op].bitfield.reg8)
3663 {
3664 guess_suffix = BYTE_MNEM_SUFFIX;
3665 break;
3666 }
3667 else if (i.types[op].bitfield.reg16)
3668 {
3669 guess_suffix = WORD_MNEM_SUFFIX;
3670 break;
3671 }
3672 else if (i.types[op].bitfield.reg32)
3673 {
3674 guess_suffix = LONG_MNEM_SUFFIX;
3675 break;
3676 }
3677 else if (i.types[op].bitfield.reg64)
3678 {
3679 guess_suffix = QWORD_MNEM_SUFFIX;
3680 break;
3681 }
3682 }
3683 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3684 guess_suffix = WORD_MNEM_SUFFIX;
3685
3686 for (op = i.operands; --op >= 0;)
3687 if (operand_type_check (i.types[op], imm))
3688 {
3689 switch (i.op[op].imms->X_op)
3690 {
3691 case O_constant:
3692 /* If a suffix is given, this operand may be shortened. */
3693 switch (guess_suffix)
3694 {
3695 case LONG_MNEM_SUFFIX:
3696 i.types[op].bitfield.imm32 = 1;
3697 i.types[op].bitfield.imm64 = 1;
3698 break;
3699 case WORD_MNEM_SUFFIX:
3700 i.types[op].bitfield.imm16 = 1;
3701 i.types[op].bitfield.imm32 = 1;
3702 i.types[op].bitfield.imm32s = 1;
3703 i.types[op].bitfield.imm64 = 1;
3704 break;
3705 case BYTE_MNEM_SUFFIX:
3706 i.types[op].bitfield.imm8 = 1;
3707 i.types[op].bitfield.imm8s = 1;
3708 i.types[op].bitfield.imm16 = 1;
3709 i.types[op].bitfield.imm32 = 1;
3710 i.types[op].bitfield.imm32s = 1;
3711 i.types[op].bitfield.imm64 = 1;
3712 break;
3713 }
3714
3715 /* If this operand is at most 16 bits, convert it
3716 to a signed 16 bit number before trying to see
3717 whether it will fit in an even smaller size.
3718 This allows a 16-bit operand such as $0xffe0 to
3719 be recognised as within Imm8S range. */
3720 if ((i.types[op].bitfield.imm16)
3721 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3722 {
3723 i.op[op].imms->X_add_number =
3724 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3725 }
3726 if ((i.types[op].bitfield.imm32)
3727 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3728 == 0))
3729 {
3730 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3731 ^ ((offsetT) 1 << 31))
3732 - ((offsetT) 1 << 31));
3733 }
3734 i.types[op]
3735 = operand_type_or (i.types[op],
3736 smallest_imm_type (i.op[op].imms->X_add_number));
3737
3738 /* We must avoid matching of Imm32 templates when 64bit
3739 only immediate is available. */
3740 if (guess_suffix == QWORD_MNEM_SUFFIX)
3741 i.types[op].bitfield.imm32 = 0;
3742 break;
3743
3744 case O_absent:
3745 case O_register:
3746 abort ();
3747
3748 /* Symbols and expressions. */
3749 default:
3750 /* Convert symbolic operand to proper sizes for matching, but don't
3751 prevent matching a set of insns that only supports sizes other
3752 than those matching the insn suffix. */
3753 {
3754 i386_operand_type mask, allowed;
3755 const insn_template *t;
3756
3757 operand_type_set (&mask, 0);
3758 operand_type_set (&allowed, 0);
3759
3760 for (t = current_templates->start;
3761 t < current_templates->end;
3762 ++t)
3763 allowed = operand_type_or (allowed,
3764 t->operand_types[op]);
3765 switch (guess_suffix)
3766 {
3767 case QWORD_MNEM_SUFFIX:
3768 mask.bitfield.imm64 = 1;
3769 mask.bitfield.imm32s = 1;
3770 break;
3771 case LONG_MNEM_SUFFIX:
3772 mask.bitfield.imm32 = 1;
3773 break;
3774 case WORD_MNEM_SUFFIX:
3775 mask.bitfield.imm16 = 1;
3776 break;
3777 case BYTE_MNEM_SUFFIX:
3778 mask.bitfield.imm8 = 1;
3779 break;
3780 default:
3781 break;
3782 }
3783 allowed = operand_type_and (mask, allowed);
3784 if (!operand_type_all_zero (&allowed))
3785 i.types[op] = operand_type_and (i.types[op], mask);
3786 }
3787 break;
3788 }
3789 }
3790 }
3791
3792 /* Try to use the smallest displacement type too. */
3793 static void
3794 optimize_disp (void)
3795 {
3796 int op;
3797
3798 for (op = i.operands; --op >= 0;)
3799 if (operand_type_check (i.types[op], disp))
3800 {
3801 if (i.op[op].disps->X_op == O_constant)
3802 {
3803 offsetT op_disp = i.op[op].disps->X_add_number;
3804
3805 if (i.types[op].bitfield.disp16
3806 && (op_disp & ~(offsetT) 0xffff) == 0)
3807 {
3808 /* If this operand is at most 16 bits, convert
3809 to a signed 16 bit number and don't use 64bit
3810 displacement. */
3811 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3812 i.types[op].bitfield.disp64 = 0;
3813 }
3814 if (i.types[op].bitfield.disp32
3815 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3816 {
3817 /* If this operand is at most 32 bits, convert
3818 to a signed 32 bit number and don't use 64bit
3819 displacement. */
3820 op_disp &= (((offsetT) 2 << 31) - 1);
3821 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3822 i.types[op].bitfield.disp64 = 0;
3823 }
3824 if (!op_disp && i.types[op].bitfield.baseindex)
3825 {
3826 i.types[op].bitfield.disp8 = 0;
3827 i.types[op].bitfield.disp16 = 0;
3828 i.types[op].bitfield.disp32 = 0;
3829 i.types[op].bitfield.disp32s = 0;
3830 i.types[op].bitfield.disp64 = 0;
3831 i.op[op].disps = 0;
3832 i.disp_operands--;
3833 }
3834 else if (flag_code == CODE_64BIT)
3835 {
3836 if (fits_in_signed_long (op_disp))
3837 {
3838 i.types[op].bitfield.disp64 = 0;
3839 i.types[op].bitfield.disp32s = 1;
3840 }
3841 if (i.prefix[ADDR_PREFIX]
3842 && fits_in_unsigned_long (op_disp))
3843 i.types[op].bitfield.disp32 = 1;
3844 }
3845 if ((i.types[op].bitfield.disp32
3846 || i.types[op].bitfield.disp32s
3847 || i.types[op].bitfield.disp16)
3848 && fits_in_signed_byte (op_disp))
3849 i.types[op].bitfield.disp8 = 1;
3850 }
3851 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3852 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3853 {
3854 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3855 i.op[op].disps, 0, i.reloc[op]);
3856 i.types[op].bitfield.disp8 = 0;
3857 i.types[op].bitfield.disp16 = 0;
3858 i.types[op].bitfield.disp32 = 0;
3859 i.types[op].bitfield.disp32s = 0;
3860 i.types[op].bitfield.disp64 = 0;
3861 }
3862 else
3863 /* We only support 64bit displacement on constants. */
3864 i.types[op].bitfield.disp64 = 0;
3865 }
3866 }
3867
3868 /* Check if operands are valid for the instruction. */
3869
3870 static int
3871 check_VecOperands (const insn_template *t)
3872 {
3873 /* Without VSIB byte, we can't have a vector register for index. */
3874 if (!t->opcode_modifier.vecsib
3875 && i.index_reg
3876 && (i.index_reg->reg_type.bitfield.regxmm
3877 || i.index_reg->reg_type.bitfield.regymm))
3878 {
3879 i.error = unsupported_vector_index_register;
3880 return 1;
3881 }
3882
3883 /* For VSIB byte, we need a vector register for index and no PC
3884 relative addressing is allowed. */
3885 if (t->opcode_modifier.vecsib
3886 && (!i.index_reg
3887 || !((t->opcode_modifier.vecsib == VecSIB128
3888 && i.index_reg->reg_type.bitfield.regxmm)
3889 || (t->opcode_modifier.vecsib == VecSIB256
3890 && i.index_reg->reg_type.bitfield.regymm))
3891 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3892 {
3893 i.error = invalid_vsib_address;
3894 return 1;
3895 }
3896
3897 return 0;
3898 }
3899
3900 /* Check if operands are valid for the instruction. Update VEX
3901 operand types. */
3902
3903 static int
3904 VEX_check_operands (const insn_template *t)
3905 {
3906 if (!t->opcode_modifier.vex)
3907 return 0;
3908
3909 /* Only check VEX_Imm4, which must be the first operand. */
3910 if (t->operand_types[0].bitfield.vec_imm4)
3911 {
3912 if (i.op[0].imms->X_op != O_constant
3913 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3914 {
3915 i.error = bad_imm4;
3916 return 1;
3917 }
3918
3919 /* Turn off Imm8 so that update_imm won't complain. */
3920 i.types[0] = vec_imm4;
3921 }
3922
3923 return 0;
3924 }
3925
3926 static const insn_template *
3927 match_template (void)
3928 {
3929 /* Points to template once we've found it. */
3930 const insn_template *t;
3931 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3932 i386_operand_type overlap4;
3933 unsigned int found_reverse_match;
3934 i386_opcode_modifier suffix_check;
3935 i386_operand_type operand_types [MAX_OPERANDS];
3936 int addr_prefix_disp;
3937 unsigned int j;
3938 unsigned int found_cpu_match;
3939 unsigned int check_register;
3940
3941 #if MAX_OPERANDS != 5
3942 # error "MAX_OPERANDS must be 5."
3943 #endif
3944
3945 found_reverse_match = 0;
3946 addr_prefix_disp = -1;
3947
3948 memset (&suffix_check, 0, sizeof (suffix_check));
3949 if (i.suffix == BYTE_MNEM_SUFFIX)
3950 suffix_check.no_bsuf = 1;
3951 else if (i.suffix == WORD_MNEM_SUFFIX)
3952 suffix_check.no_wsuf = 1;
3953 else if (i.suffix == SHORT_MNEM_SUFFIX)
3954 suffix_check.no_ssuf = 1;
3955 else if (i.suffix == LONG_MNEM_SUFFIX)
3956 suffix_check.no_lsuf = 1;
3957 else if (i.suffix == QWORD_MNEM_SUFFIX)
3958 suffix_check.no_qsuf = 1;
3959 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3960 suffix_check.no_ldsuf = 1;
3961
3962 /* Must have right number of operands. */
3963 i.error = number_of_operands_mismatch;
3964
3965 for (t = current_templates->start; t < current_templates->end; t++)
3966 {
3967 addr_prefix_disp = -1;
3968
3969 if (i.operands != t->operands)
3970 continue;
3971
3972 /* Check processor support. */
3973 i.error = unsupported;
3974 found_cpu_match = (cpu_flags_match (t)
3975 == CPU_FLAGS_PERFECT_MATCH);
3976 if (!found_cpu_match)
3977 continue;
3978
3979 /* Check old gcc support. */
3980 i.error = old_gcc_only;
3981 if (!old_gcc && t->opcode_modifier.oldgcc)
3982 continue;
3983
3984 /* Check AT&T mnemonic. */
3985 i.error = unsupported_with_intel_mnemonic;
3986 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3987 continue;
3988
3989 /* Check AT&T/Intel syntax. */
3990 i.error = unsupported_syntax;
3991 if ((intel_syntax && t->opcode_modifier.attsyntax)
3992 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3993 continue;
3994
3995 /* Check the suffix, except for some instructions in intel mode. */
3996 i.error = invalid_instruction_suffix;
3997 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3998 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3999 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4000 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4001 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4002 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4003 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4004 continue;
4005
4006 if (!operand_size_match (t))
4007 continue;
4008
4009 for (j = 0; j < MAX_OPERANDS; j++)
4010 operand_types[j] = t->operand_types[j];
4011
4012 /* In general, don't allow 64-bit operands in 32-bit mode. */
4013 if (i.suffix == QWORD_MNEM_SUFFIX
4014 && flag_code != CODE_64BIT
4015 && (intel_syntax
4016 ? (!t->opcode_modifier.ignoresize
4017 && !intel_float_operand (t->name))
4018 : intel_float_operand (t->name) != 2)
4019 && ((!operand_types[0].bitfield.regmmx
4020 && !operand_types[0].bitfield.regxmm
4021 && !operand_types[0].bitfield.regymm)
4022 || (!operand_types[t->operands > 1].bitfield.regmmx
4023 && !!operand_types[t->operands > 1].bitfield.regxmm
4024 && !!operand_types[t->operands > 1].bitfield.regymm))
4025 && (t->base_opcode != 0x0fc7
4026 || t->extension_opcode != 1 /* cmpxchg8b */))
4027 continue;
4028
4029 /* In general, don't allow 32-bit operands on pre-386. */
4030 else if (i.suffix == LONG_MNEM_SUFFIX
4031 && !cpu_arch_flags.bitfield.cpui386
4032 && (intel_syntax
4033 ? (!t->opcode_modifier.ignoresize
4034 && !intel_float_operand (t->name))
4035 : intel_float_operand (t->name) != 2)
4036 && ((!operand_types[0].bitfield.regmmx
4037 && !operand_types[0].bitfield.regxmm)
4038 || (!operand_types[t->operands > 1].bitfield.regmmx
4039 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4040 continue;
4041
4042 /* Do not verify operands when there are none. */
4043 else
4044 {
4045 if (!t->operands)
4046 /* We've found a match; break out of loop. */
4047 break;
4048 }
4049
4050 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4051 into Disp32/Disp16/Disp32 operand. */
4052 if (i.prefix[ADDR_PREFIX] != 0)
4053 {
4054 /* There should be only one Disp operand. */
4055 switch (flag_code)
4056 {
4057 case CODE_16BIT:
4058 for (j = 0; j < MAX_OPERANDS; j++)
4059 {
4060 if (operand_types[j].bitfield.disp16)
4061 {
4062 addr_prefix_disp = j;
4063 operand_types[j].bitfield.disp32 = 1;
4064 operand_types[j].bitfield.disp16 = 0;
4065 break;
4066 }
4067 }
4068 break;
4069 case CODE_32BIT:
4070 for (j = 0; j < MAX_OPERANDS; j++)
4071 {
4072 if (operand_types[j].bitfield.disp32)
4073 {
4074 addr_prefix_disp = j;
4075 operand_types[j].bitfield.disp32 = 0;
4076 operand_types[j].bitfield.disp16 = 1;
4077 break;
4078 }
4079 }
4080 break;
4081 case CODE_64BIT:
4082 for (j = 0; j < MAX_OPERANDS; j++)
4083 {
4084 if (operand_types[j].bitfield.disp64)
4085 {
4086 addr_prefix_disp = j;
4087 operand_types[j].bitfield.disp64 = 0;
4088 operand_types[j].bitfield.disp32 = 1;
4089 break;
4090 }
4091 }
4092 break;
4093 }
4094 }
4095
4096 /* We check register size if needed. */
4097 check_register = t->opcode_modifier.checkregsize;
4098 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4099 switch (t->operands)
4100 {
4101 case 1:
4102 if (!operand_type_match (overlap0, i.types[0]))
4103 continue;
4104 break;
4105 case 2:
4106 /* xchg %eax, %eax is a special case. It is an aliase for nop
4107 only in 32bit mode and we can use opcode 0x90. In 64bit
4108 mode, we can't use 0x90 for xchg %eax, %eax since it should
4109 zero-extend %eax to %rax. */
4110 if (flag_code == CODE_64BIT
4111 && t->base_opcode == 0x90
4112 && operand_type_equal (&i.types [0], &acc32)
4113 && operand_type_equal (&i.types [1], &acc32))
4114 continue;
4115 if (i.swap_operand)
4116 {
4117 /* If we swap operand in encoding, we either match
4118 the next one or reverse direction of operands. */
4119 if (t->opcode_modifier.s)
4120 continue;
4121 else if (t->opcode_modifier.d)
4122 goto check_reverse;
4123 }
4124
4125 case 3:
4126 /* If we swap operand in encoding, we match the next one. */
4127 if (i.swap_operand && t->opcode_modifier.s)
4128 continue;
4129 case 4:
4130 case 5:
4131 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4132 if (!operand_type_match (overlap0, i.types[0])
4133 || !operand_type_match (overlap1, i.types[1])
4134 || (check_register
4135 && !operand_type_register_match (overlap0, i.types[0],
4136 operand_types[0],
4137 overlap1, i.types[1],
4138 operand_types[1])))
4139 {
4140 /* Check if other direction is valid ... */
4141 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4142 continue;
4143
4144 check_reverse:
4145 /* Try reversing direction of operands. */
4146 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4147 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4148 if (!operand_type_match (overlap0, i.types[0])
4149 || !operand_type_match (overlap1, i.types[1])
4150 || (check_register
4151 && !operand_type_register_match (overlap0,
4152 i.types[0],
4153 operand_types[1],
4154 overlap1,
4155 i.types[1],
4156 operand_types[0])))
4157 {
4158 /* Does not match either direction. */
4159 continue;
4160 }
4161 /* found_reverse_match holds which of D or FloatDR
4162 we've found. */
4163 if (t->opcode_modifier.d)
4164 found_reverse_match = Opcode_D;
4165 else if (t->opcode_modifier.floatd)
4166 found_reverse_match = Opcode_FloatD;
4167 else
4168 found_reverse_match = 0;
4169 if (t->opcode_modifier.floatr)
4170 found_reverse_match |= Opcode_FloatR;
4171 }
4172 else
4173 {
4174 /* Found a forward 2 operand match here. */
4175 switch (t->operands)
4176 {
4177 case 5:
4178 overlap4 = operand_type_and (i.types[4],
4179 operand_types[4]);
4180 case 4:
4181 overlap3 = operand_type_and (i.types[3],
4182 operand_types[3]);
4183 case 3:
4184 overlap2 = operand_type_and (i.types[2],
4185 operand_types[2]);
4186 break;
4187 }
4188
4189 switch (t->operands)
4190 {
4191 case 5:
4192 if (!operand_type_match (overlap4, i.types[4])
4193 || !operand_type_register_match (overlap3,
4194 i.types[3],
4195 operand_types[3],
4196 overlap4,
4197 i.types[4],
4198 operand_types[4]))
4199 continue;
4200 case 4:
4201 if (!operand_type_match (overlap3, i.types[3])
4202 || (check_register
4203 && !operand_type_register_match (overlap2,
4204 i.types[2],
4205 operand_types[2],
4206 overlap3,
4207 i.types[3],
4208 operand_types[3])))
4209 continue;
4210 case 3:
4211 /* Here we make use of the fact that there are no
4212 reverse match 3 operand instructions, and all 3
4213 operand instructions only need to be checked for
4214 register consistency between operands 2 and 3. */
4215 if (!operand_type_match (overlap2, i.types[2])
4216 || (check_register
4217 && !operand_type_register_match (overlap1,
4218 i.types[1],
4219 operand_types[1],
4220 overlap2,
4221 i.types[2],
4222 operand_types[2])))
4223 continue;
4224 break;
4225 }
4226 }
4227 /* Found either forward/reverse 2, 3 or 4 operand match here:
4228 slip through to break. */
4229 }
4230 if (!found_cpu_match)
4231 {
4232 found_reverse_match = 0;
4233 continue;
4234 }
4235
4236 /* Check if vector operands are valid. */
4237 if (check_VecOperands (t))
4238 continue;
4239
4240 /* Check if VEX operands are valid. */
4241 if (VEX_check_operands (t))
4242 continue;
4243
4244 /* We've found a match; break out of loop. */
4245 break;
4246 }
4247
4248 if (t == current_templates->end)
4249 {
4250 /* We found no match. */
4251 const char *err_msg;
4252 switch (i.error)
4253 {
4254 default:
4255 abort ();
4256 case operand_size_mismatch:
4257 err_msg = _("operand size mismatch");
4258 break;
4259 case operand_type_mismatch:
4260 err_msg = _("operand type mismatch");
4261 break;
4262 case register_type_mismatch:
4263 err_msg = _("register type mismatch");
4264 break;
4265 case number_of_operands_mismatch:
4266 err_msg = _("number of operands mismatch");
4267 break;
4268 case invalid_instruction_suffix:
4269 err_msg = _("invalid instruction suffix");
4270 break;
4271 case bad_imm4:
4272 err_msg = _("Imm4 isn't the first operand");
4273 break;
4274 case old_gcc_only:
4275 err_msg = _("only supported with old gcc");
4276 break;
4277 case unsupported_with_intel_mnemonic:
4278 err_msg = _("unsupported with Intel mnemonic");
4279 break;
4280 case unsupported_syntax:
4281 err_msg = _("unsupported syntax");
4282 break;
4283 case unsupported:
4284 err_msg = _("unsupported");
4285 break;
4286 case invalid_vsib_address:
4287 err_msg = _("invalid VSIB address");
4288 break;
4289 case unsupported_vector_index_register:
4290 err_msg = _("unsupported vector index register");
4291 break;
4292 }
4293 as_bad (_("%s for `%s'"), err_msg,
4294 current_templates->start->name);
4295 return NULL;
4296 }
4297
4298 if (!quiet_warnings)
4299 {
4300 if (!intel_syntax
4301 && (i.types[0].bitfield.jumpabsolute
4302 != operand_types[0].bitfield.jumpabsolute))
4303 {
4304 as_warn (_("indirect %s without `*'"), t->name);
4305 }
4306
4307 if (t->opcode_modifier.isprefix
4308 && t->opcode_modifier.ignoresize)
4309 {
4310 /* Warn them that a data or address size prefix doesn't
4311 affect assembly of the next line of code. */
4312 as_warn (_("stand-alone `%s' prefix"), t->name);
4313 }
4314 }
4315
4316 /* Copy the template we found. */
4317 i.tm = *t;
4318
4319 if (addr_prefix_disp != -1)
4320 i.tm.operand_types[addr_prefix_disp]
4321 = operand_types[addr_prefix_disp];
4322
4323 if (found_reverse_match)
4324 {
4325 /* If we found a reverse match we must alter the opcode
4326 direction bit. found_reverse_match holds bits to change
4327 (different for int & float insns). */
4328
4329 i.tm.base_opcode ^= found_reverse_match;
4330
4331 i.tm.operand_types[0] = operand_types[1];
4332 i.tm.operand_types[1] = operand_types[0];
4333 }
4334
4335 return t;
4336 }
4337
4338 static int
4339 check_string (void)
4340 {
4341 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4342 if (i.tm.operand_types[mem_op].bitfield.esseg)
4343 {
4344 if (i.seg[0] != NULL && i.seg[0] != &es)
4345 {
4346 as_bad (_("`%s' operand %d must use `%ses' segment"),
4347 i.tm.name,
4348 mem_op + 1,
4349 register_prefix);
4350 return 0;
4351 }
4352 /* There's only ever one segment override allowed per instruction.
4353 This instruction possibly has a legal segment override on the
4354 second operand, so copy the segment to where non-string
4355 instructions store it, allowing common code. */
4356 i.seg[0] = i.seg[1];
4357 }
4358 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4359 {
4360 if (i.seg[1] != NULL && i.seg[1] != &es)
4361 {
4362 as_bad (_("`%s' operand %d must use `%ses' segment"),
4363 i.tm.name,
4364 mem_op + 2,
4365 register_prefix);
4366 return 0;
4367 }
4368 }
4369 return 1;
4370 }
4371
4372 static int
4373 process_suffix (void)
4374 {
4375 /* If matched instruction specifies an explicit instruction mnemonic
4376 suffix, use it. */
4377 if (i.tm.opcode_modifier.size16)
4378 i.suffix = WORD_MNEM_SUFFIX;
4379 else if (i.tm.opcode_modifier.size32)
4380 i.suffix = LONG_MNEM_SUFFIX;
4381 else if (i.tm.opcode_modifier.size64)
4382 i.suffix = QWORD_MNEM_SUFFIX;
4383 else if (i.reg_operands)
4384 {
4385 /* If there's no instruction mnemonic suffix we try to invent one
4386 based on register operands. */
4387 if (!i.suffix)
4388 {
4389 /* We take i.suffix from the last register operand specified,
4390 Destination register type is more significant than source
4391 register type. crc32 in SSE4.2 prefers source register
4392 type. */
4393 if (i.tm.base_opcode == 0xf20f38f1)
4394 {
4395 if (i.types[0].bitfield.reg16)
4396 i.suffix = WORD_MNEM_SUFFIX;
4397 else if (i.types[0].bitfield.reg32)
4398 i.suffix = LONG_MNEM_SUFFIX;
4399 else if (i.types[0].bitfield.reg64)
4400 i.suffix = QWORD_MNEM_SUFFIX;
4401 }
4402 else if (i.tm.base_opcode == 0xf20f38f0)
4403 {
4404 if (i.types[0].bitfield.reg8)
4405 i.suffix = BYTE_MNEM_SUFFIX;
4406 }
4407
4408 if (!i.suffix)
4409 {
4410 int op;
4411
4412 if (i.tm.base_opcode == 0xf20f38f1
4413 || i.tm.base_opcode == 0xf20f38f0)
4414 {
4415 /* We have to know the operand size for crc32. */
4416 as_bad (_("ambiguous memory operand size for `%s`"),
4417 i.tm.name);
4418 return 0;
4419 }
4420
4421 for (op = i.operands; --op >= 0;)
4422 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4423 {
4424 if (i.types[op].bitfield.reg8)
4425 {
4426 i.suffix = BYTE_MNEM_SUFFIX;
4427 break;
4428 }
4429 else if (i.types[op].bitfield.reg16)
4430 {
4431 i.suffix = WORD_MNEM_SUFFIX;
4432 break;
4433 }
4434 else if (i.types[op].bitfield.reg32)
4435 {
4436 i.suffix = LONG_MNEM_SUFFIX;
4437 break;
4438 }
4439 else if (i.types[op].bitfield.reg64)
4440 {
4441 i.suffix = QWORD_MNEM_SUFFIX;
4442 break;
4443 }
4444 }
4445 }
4446 }
4447 else if (i.suffix == BYTE_MNEM_SUFFIX)
4448 {
4449 if (intel_syntax
4450 && i.tm.opcode_modifier.ignoresize
4451 && i.tm.opcode_modifier.no_bsuf)
4452 i.suffix = 0;
4453 else if (!check_byte_reg ())
4454 return 0;
4455 }
4456 else if (i.suffix == LONG_MNEM_SUFFIX)
4457 {
4458 if (intel_syntax
4459 && i.tm.opcode_modifier.ignoresize
4460 && i.tm.opcode_modifier.no_lsuf)
4461 i.suffix = 0;
4462 else if (!check_long_reg ())
4463 return 0;
4464 }
4465 else if (i.suffix == QWORD_MNEM_SUFFIX)
4466 {
4467 if (intel_syntax
4468 && i.tm.opcode_modifier.ignoresize
4469 && i.tm.opcode_modifier.no_qsuf)
4470 i.suffix = 0;
4471 else if (!check_qword_reg ())
4472 return 0;
4473 }
4474 else if (i.suffix == WORD_MNEM_SUFFIX)
4475 {
4476 if (intel_syntax
4477 && i.tm.opcode_modifier.ignoresize
4478 && i.tm.opcode_modifier.no_wsuf)
4479 i.suffix = 0;
4480 else if (!check_word_reg ())
4481 return 0;
4482 }
4483 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4484 || i.suffix == YMMWORD_MNEM_SUFFIX)
4485 {
4486 /* Skip if the instruction has x/y suffix. match_template
4487 should check if it is a valid suffix. */
4488 }
4489 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4490 /* Do nothing if the instruction is going to ignore the prefix. */
4491 ;
4492 else
4493 abort ();
4494 }
4495 else if (i.tm.opcode_modifier.defaultsize
4496 && !i.suffix
4497 /* exclude fldenv/frstor/fsave/fstenv */
4498 && i.tm.opcode_modifier.no_ssuf)
4499 {
4500 i.suffix = stackop_size;
4501 }
4502 else if (intel_syntax
4503 && !i.suffix
4504 && (i.tm.operand_types[0].bitfield.jumpabsolute
4505 || i.tm.opcode_modifier.jumpbyte
4506 || i.tm.opcode_modifier.jumpintersegment
4507 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4508 && i.tm.extension_opcode <= 3)))
4509 {
4510 switch (flag_code)
4511 {
4512 case CODE_64BIT:
4513 if (!i.tm.opcode_modifier.no_qsuf)
4514 {
4515 i.suffix = QWORD_MNEM_SUFFIX;
4516 break;
4517 }
4518 case CODE_32BIT:
4519 if (!i.tm.opcode_modifier.no_lsuf)
4520 i.suffix = LONG_MNEM_SUFFIX;
4521 break;
4522 case CODE_16BIT:
4523 if (!i.tm.opcode_modifier.no_wsuf)
4524 i.suffix = WORD_MNEM_SUFFIX;
4525 break;
4526 }
4527 }
4528
4529 if (!i.suffix)
4530 {
4531 if (!intel_syntax)
4532 {
4533 if (i.tm.opcode_modifier.w)
4534 {
4535 as_bad (_("no instruction mnemonic suffix given and "
4536 "no register operands; can't size instruction"));
4537 return 0;
4538 }
4539 }
4540 else
4541 {
4542 unsigned int suffixes;
4543
4544 suffixes = !i.tm.opcode_modifier.no_bsuf;
4545 if (!i.tm.opcode_modifier.no_wsuf)
4546 suffixes |= 1 << 1;
4547 if (!i.tm.opcode_modifier.no_lsuf)
4548 suffixes |= 1 << 2;
4549 if (!i.tm.opcode_modifier.no_ldsuf)
4550 suffixes |= 1 << 3;
4551 if (!i.tm.opcode_modifier.no_ssuf)
4552 suffixes |= 1 << 4;
4553 if (!i.tm.opcode_modifier.no_qsuf)
4554 suffixes |= 1 << 5;
4555
4556 /* There are more than suffix matches. */
4557 if (i.tm.opcode_modifier.w
4558 || ((suffixes & (suffixes - 1))
4559 && !i.tm.opcode_modifier.defaultsize
4560 && !i.tm.opcode_modifier.ignoresize))
4561 {
4562 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4563 return 0;
4564 }
4565 }
4566 }
4567
4568 /* Change the opcode based on the operand size given by i.suffix;
4569 We don't need to change things for byte insns. */
4570
4571 if (i.suffix
4572 && i.suffix != BYTE_MNEM_SUFFIX
4573 && i.suffix != XMMWORD_MNEM_SUFFIX
4574 && i.suffix != YMMWORD_MNEM_SUFFIX)
4575 {
4576 /* It's not a byte, select word/dword operation. */
4577 if (i.tm.opcode_modifier.w)
4578 {
4579 if (i.tm.opcode_modifier.shortform)
4580 i.tm.base_opcode |= 8;
4581 else
4582 i.tm.base_opcode |= 1;
4583 }
4584
4585 /* Now select between word & dword operations via the operand
4586 size prefix, except for instructions that will ignore this
4587 prefix anyway. */
4588 if (i.tm.opcode_modifier.addrprefixop0)
4589 {
4590 /* The address size override prefix changes the size of the
4591 first operand. */
4592 if ((flag_code == CODE_32BIT
4593 && i.op->regs[0].reg_type.bitfield.reg16)
4594 || (flag_code != CODE_32BIT
4595 && i.op->regs[0].reg_type.bitfield.reg32))
4596 if (!add_prefix (ADDR_PREFIX_OPCODE))
4597 return 0;
4598 }
4599 else if (i.suffix != QWORD_MNEM_SUFFIX
4600 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4601 && !i.tm.opcode_modifier.ignoresize
4602 && !i.tm.opcode_modifier.floatmf
4603 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4604 || (flag_code == CODE_64BIT
4605 && i.tm.opcode_modifier.jumpbyte)))
4606 {
4607 unsigned int prefix = DATA_PREFIX_OPCODE;
4608
4609 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4610 prefix = ADDR_PREFIX_OPCODE;
4611
4612 if (!add_prefix (prefix))
4613 return 0;
4614 }
4615
4616 /* Set mode64 for an operand. */
4617 if (i.suffix == QWORD_MNEM_SUFFIX
4618 && flag_code == CODE_64BIT
4619 && !i.tm.opcode_modifier.norex64)
4620 {
4621 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4622 need rex64. cmpxchg8b is also a special case. */
4623 if (! (i.operands == 2
4624 && i.tm.base_opcode == 0x90
4625 && i.tm.extension_opcode == None
4626 && operand_type_equal (&i.types [0], &acc64)
4627 && operand_type_equal (&i.types [1], &acc64))
4628 && ! (i.operands == 1
4629 && i.tm.base_opcode == 0xfc7
4630 && i.tm.extension_opcode == 1
4631 && !operand_type_check (i.types [0], reg)
4632 && operand_type_check (i.types [0], anymem)))
4633 i.rex |= REX_W;
4634 }
4635
4636 /* Size floating point instruction. */
4637 if (i.suffix == LONG_MNEM_SUFFIX)
4638 if (i.tm.opcode_modifier.floatmf)
4639 i.tm.base_opcode ^= 4;
4640 }
4641
4642 return 1;
4643 }
4644
4645 static int
4646 check_byte_reg (void)
4647 {
4648 int op;
4649
4650 for (op = i.operands; --op >= 0;)
4651 {
4652 /* If this is an eight bit register, it's OK. If it's the 16 or
4653 32 bit version of an eight bit register, we will just use the
4654 low portion, and that's OK too. */
4655 if (i.types[op].bitfield.reg8)
4656 continue;
4657
4658 /* crc32 doesn't generate this warning. */
4659 if (i.tm.base_opcode == 0xf20f38f0)
4660 continue;
4661
4662 if ((i.types[op].bitfield.reg16
4663 || i.types[op].bitfield.reg32
4664 || i.types[op].bitfield.reg64)
4665 && i.op[op].regs->reg_num < 4)
4666 {
4667 /* Prohibit these changes in the 64bit mode, since the
4668 lowering is more complicated. */
4669 if (flag_code == CODE_64BIT
4670 && !i.tm.operand_types[op].bitfield.inoutportreg)
4671 {
4672 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4673 register_prefix, i.op[op].regs->reg_name,
4674 i.suffix);
4675 return 0;
4676 }
4677 #if REGISTER_WARNINGS
4678 if (!quiet_warnings
4679 && !i.tm.operand_types[op].bitfield.inoutportreg)
4680 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4681 register_prefix,
4682 (i.op[op].regs + (i.types[op].bitfield.reg16
4683 ? REGNAM_AL - REGNAM_AX
4684 : REGNAM_AL - REGNAM_EAX))->reg_name,
4685 register_prefix,
4686 i.op[op].regs->reg_name,
4687 i.suffix);
4688 #endif
4689 continue;
4690 }
4691 /* Any other register is bad. */
4692 if (i.types[op].bitfield.reg16
4693 || i.types[op].bitfield.reg32
4694 || i.types[op].bitfield.reg64
4695 || i.types[op].bitfield.regmmx
4696 || i.types[op].bitfield.regxmm
4697 || i.types[op].bitfield.regymm
4698 || i.types[op].bitfield.sreg2
4699 || i.types[op].bitfield.sreg3
4700 || i.types[op].bitfield.control
4701 || i.types[op].bitfield.debug
4702 || i.types[op].bitfield.test
4703 || i.types[op].bitfield.floatreg
4704 || i.types[op].bitfield.floatacc)
4705 {
4706 as_bad (_("`%s%s' not allowed with `%s%c'"),
4707 register_prefix,
4708 i.op[op].regs->reg_name,
4709 i.tm.name,
4710 i.suffix);
4711 return 0;
4712 }
4713 }
4714 return 1;
4715 }
4716
4717 static int
4718 check_long_reg (void)
4719 {
4720 int op;
4721
4722 for (op = i.operands; --op >= 0;)
4723 /* Reject eight bit registers, except where the template requires
4724 them. (eg. movzb) */
4725 if (i.types[op].bitfield.reg8
4726 && (i.tm.operand_types[op].bitfield.reg16
4727 || i.tm.operand_types[op].bitfield.reg32
4728 || i.tm.operand_types[op].bitfield.acc))
4729 {
4730 as_bad (_("`%s%s' not allowed with `%s%c'"),
4731 register_prefix,
4732 i.op[op].regs->reg_name,
4733 i.tm.name,
4734 i.suffix);
4735 return 0;
4736 }
4737 /* Warn if the e prefix on a general reg is missing. */
4738 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4739 && i.types[op].bitfield.reg16
4740 && (i.tm.operand_types[op].bitfield.reg32
4741 || i.tm.operand_types[op].bitfield.acc))
4742 {
4743 /* Prohibit these changes in the 64bit mode, since the
4744 lowering is more complicated. */
4745 if (flag_code == CODE_64BIT)
4746 {
4747 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4748 register_prefix, i.op[op].regs->reg_name,
4749 i.suffix);
4750 return 0;
4751 }
4752 #if REGISTER_WARNINGS
4753 else
4754 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4755 register_prefix,
4756 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4757 register_prefix,
4758 i.op[op].regs->reg_name,
4759 i.suffix);
4760 #endif
4761 }
4762 /* Warn if the r prefix on a general reg is missing. */
4763 else if (i.types[op].bitfield.reg64
4764 && (i.tm.operand_types[op].bitfield.reg32
4765 || i.tm.operand_types[op].bitfield.acc))
4766 {
4767 if (intel_syntax
4768 && i.tm.opcode_modifier.toqword
4769 && !i.types[0].bitfield.regxmm)
4770 {
4771 /* Convert to QWORD. We want REX byte. */
4772 i.suffix = QWORD_MNEM_SUFFIX;
4773 }
4774 else
4775 {
4776 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4777 register_prefix, i.op[op].regs->reg_name,
4778 i.suffix);
4779 return 0;
4780 }
4781 }
4782 return 1;
4783 }
4784
4785 static int
4786 check_qword_reg (void)
4787 {
4788 int op;
4789
4790 for (op = i.operands; --op >= 0; )
4791 /* Reject eight bit registers, except where the template requires
4792 them. (eg. movzb) */
4793 if (i.types[op].bitfield.reg8
4794 && (i.tm.operand_types[op].bitfield.reg16
4795 || i.tm.operand_types[op].bitfield.reg32
4796 || i.tm.operand_types[op].bitfield.acc))
4797 {
4798 as_bad (_("`%s%s' not allowed with `%s%c'"),
4799 register_prefix,
4800 i.op[op].regs->reg_name,
4801 i.tm.name,
4802 i.suffix);
4803 return 0;
4804 }
4805 /* Warn if the e prefix on a general reg is missing. */
4806 else if ((i.types[op].bitfield.reg16
4807 || i.types[op].bitfield.reg32)
4808 && (i.tm.operand_types[op].bitfield.reg32
4809 || i.tm.operand_types[op].bitfield.acc))
4810 {
4811 /* Prohibit these changes in the 64bit mode, since the
4812 lowering is more complicated. */
4813 if (intel_syntax
4814 && i.tm.opcode_modifier.todword
4815 && !i.types[0].bitfield.regxmm)
4816 {
4817 /* Convert to DWORD. We don't want REX byte. */
4818 i.suffix = LONG_MNEM_SUFFIX;
4819 }
4820 else
4821 {
4822 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4823 register_prefix, i.op[op].regs->reg_name,
4824 i.suffix);
4825 return 0;
4826 }
4827 }
4828 return 1;
4829 }
4830
4831 static int
4832 check_word_reg (void)
4833 {
4834 int op;
4835 for (op = i.operands; --op >= 0;)
4836 /* Reject eight bit registers, except where the template requires
4837 them. (eg. movzb) */
4838 if (i.types[op].bitfield.reg8
4839 && (i.tm.operand_types[op].bitfield.reg16
4840 || i.tm.operand_types[op].bitfield.reg32
4841 || i.tm.operand_types[op].bitfield.acc))
4842 {
4843 as_bad (_("`%s%s' not allowed with `%s%c'"),
4844 register_prefix,
4845 i.op[op].regs->reg_name,
4846 i.tm.name,
4847 i.suffix);
4848 return 0;
4849 }
4850 /* Warn if the e prefix on a general reg is present. */
4851 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4852 && i.types[op].bitfield.reg32
4853 && (i.tm.operand_types[op].bitfield.reg16
4854 || i.tm.operand_types[op].bitfield.acc))
4855 {
4856 /* Prohibit these changes in the 64bit mode, since the
4857 lowering is more complicated. */
4858 if (flag_code == CODE_64BIT)
4859 {
4860 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4861 register_prefix, i.op[op].regs->reg_name,
4862 i.suffix);
4863 return 0;
4864 }
4865 else
4866 #if REGISTER_WARNINGS
4867 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4868 register_prefix,
4869 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4870 register_prefix,
4871 i.op[op].regs->reg_name,
4872 i.suffix);
4873 #endif
4874 }
4875 return 1;
4876 }
4877
4878 static int
4879 update_imm (unsigned int j)
4880 {
4881 i386_operand_type overlap = i.types[j];
4882 if ((overlap.bitfield.imm8
4883 || overlap.bitfield.imm8s
4884 || overlap.bitfield.imm16
4885 || overlap.bitfield.imm32
4886 || overlap.bitfield.imm32s
4887 || overlap.bitfield.imm64)
4888 && !operand_type_equal (&overlap, &imm8)
4889 && !operand_type_equal (&overlap, &imm8s)
4890 && !operand_type_equal (&overlap, &imm16)
4891 && !operand_type_equal (&overlap, &imm32)
4892 && !operand_type_equal (&overlap, &imm32s)
4893 && !operand_type_equal (&overlap, &imm64))
4894 {
4895 if (i.suffix)
4896 {
4897 i386_operand_type temp;
4898
4899 operand_type_set (&temp, 0);
4900 if (i.suffix == BYTE_MNEM_SUFFIX)
4901 {
4902 temp.bitfield.imm8 = overlap.bitfield.imm8;
4903 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4904 }
4905 else if (i.suffix == WORD_MNEM_SUFFIX)
4906 temp.bitfield.imm16 = overlap.bitfield.imm16;
4907 else if (i.suffix == QWORD_MNEM_SUFFIX)
4908 {
4909 temp.bitfield.imm64 = overlap.bitfield.imm64;
4910 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4911 }
4912 else
4913 temp.bitfield.imm32 = overlap.bitfield.imm32;
4914 overlap = temp;
4915 }
4916 else if (operand_type_equal (&overlap, &imm16_32_32s)
4917 || operand_type_equal (&overlap, &imm16_32)
4918 || operand_type_equal (&overlap, &imm16_32s))
4919 {
4920 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4921 overlap = imm16;
4922 else
4923 overlap = imm32s;
4924 }
4925 if (!operand_type_equal (&overlap, &imm8)
4926 && !operand_type_equal (&overlap, &imm8s)
4927 && !operand_type_equal (&overlap, &imm16)
4928 && !operand_type_equal (&overlap, &imm32)
4929 && !operand_type_equal (&overlap, &imm32s)
4930 && !operand_type_equal (&overlap, &imm64))
4931 {
4932 as_bad (_("no instruction mnemonic suffix given; "
4933 "can't determine immediate size"));
4934 return 0;
4935 }
4936 }
4937 i.types[j] = overlap;
4938
4939 return 1;
4940 }
4941
4942 static int
4943 finalize_imm (void)
4944 {
4945 unsigned int j, n;
4946
4947 /* Update the first 2 immediate operands. */
4948 n = i.operands > 2 ? 2 : i.operands;
4949 if (n)
4950 {
4951 for (j = 0; j < n; j++)
4952 if (update_imm (j) == 0)
4953 return 0;
4954
4955 /* The 3rd operand can't be immediate operand. */
4956 gas_assert (operand_type_check (i.types[2], imm) == 0);
4957 }
4958
4959 return 1;
4960 }
4961
4962 static int
4963 bad_implicit_operand (int xmm)
4964 {
4965 const char *ireg = xmm ? "xmm0" : "ymm0";
4966
4967 if (intel_syntax)
4968 as_bad (_("the last operand of `%s' must be `%s%s'"),
4969 i.tm.name, register_prefix, ireg);
4970 else
4971 as_bad (_("the first operand of `%s' must be `%s%s'"),
4972 i.tm.name, register_prefix, ireg);
4973 return 0;
4974 }
4975
4976 static int
4977 process_operands (void)
4978 {
4979 /* Default segment register this instruction will use for memory
4980 accesses. 0 means unknown. This is only for optimizing out
4981 unnecessary segment overrides. */
4982 const seg_entry *default_seg = 0;
4983
4984 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4985 {
4986 unsigned int dupl = i.operands;
4987 unsigned int dest = dupl - 1;
4988 unsigned int j;
4989
4990 /* The destination must be an xmm register. */
4991 gas_assert (i.reg_operands
4992 && MAX_OPERANDS > dupl
4993 && operand_type_equal (&i.types[dest], &regxmm));
4994
4995 if (i.tm.opcode_modifier.firstxmm0)
4996 {
4997 /* The first operand is implicit and must be xmm0. */
4998 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4999 if (i.op[0].regs->reg_num != 0)
5000 return bad_implicit_operand (1);
5001
5002 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5003 {
5004 /* Keep xmm0 for instructions with VEX prefix and 3
5005 sources. */
5006 goto duplicate;
5007 }
5008 else
5009 {
5010 /* We remove the first xmm0 and keep the number of
5011 operands unchanged, which in fact duplicates the
5012 destination. */
5013 for (j = 1; j < i.operands; j++)
5014 {
5015 i.op[j - 1] = i.op[j];
5016 i.types[j - 1] = i.types[j];
5017 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5018 }
5019 }
5020 }
5021 else if (i.tm.opcode_modifier.implicit1stxmm0)
5022 {
5023 gas_assert ((MAX_OPERANDS - 1) > dupl
5024 && (i.tm.opcode_modifier.vexsources
5025 == VEX3SOURCES));
5026
5027 /* Add the implicit xmm0 for instructions with VEX prefix
5028 and 3 sources. */
5029 for (j = i.operands; j > 0; j--)
5030 {
5031 i.op[j] = i.op[j - 1];
5032 i.types[j] = i.types[j - 1];
5033 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5034 }
5035 i.op[0].regs
5036 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5037 i.types[0] = regxmm;
5038 i.tm.operand_types[0] = regxmm;
5039
5040 i.operands += 2;
5041 i.reg_operands += 2;
5042 i.tm.operands += 2;
5043
5044 dupl++;
5045 dest++;
5046 i.op[dupl] = i.op[dest];
5047 i.types[dupl] = i.types[dest];
5048 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5049 }
5050 else
5051 {
5052 duplicate:
5053 i.operands++;
5054 i.reg_operands++;
5055 i.tm.operands++;
5056
5057 i.op[dupl] = i.op[dest];
5058 i.types[dupl] = i.types[dest];
5059 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5060 }
5061
5062 if (i.tm.opcode_modifier.immext)
5063 process_immext ();
5064 }
5065 else if (i.tm.opcode_modifier.firstxmm0)
5066 {
5067 unsigned int j;
5068
5069 /* The first operand is implicit and must be xmm0/ymm0. */
5070 gas_assert (i.reg_operands
5071 && (operand_type_equal (&i.types[0], &regxmm)
5072 || operand_type_equal (&i.types[0], &regymm)));
5073 if (i.op[0].regs->reg_num != 0)
5074 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5075
5076 for (j = 1; j < i.operands; j++)
5077 {
5078 i.op[j - 1] = i.op[j];
5079 i.types[j - 1] = i.types[j];
5080
5081 /* We need to adjust fields in i.tm since they are used by
5082 build_modrm_byte. */
5083 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5084 }
5085
5086 i.operands--;
5087 i.reg_operands--;
5088 i.tm.operands--;
5089 }
5090 else if (i.tm.opcode_modifier.regkludge)
5091 {
5092 /* The imul $imm, %reg instruction is converted into
5093 imul $imm, %reg, %reg, and the clr %reg instruction
5094 is converted into xor %reg, %reg. */
5095
5096 unsigned int first_reg_op;
5097
5098 if (operand_type_check (i.types[0], reg))
5099 first_reg_op = 0;
5100 else
5101 first_reg_op = 1;
5102 /* Pretend we saw the extra register operand. */
5103 gas_assert (i.reg_operands == 1
5104 && i.op[first_reg_op + 1].regs == 0);
5105 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5106 i.types[first_reg_op + 1] = i.types[first_reg_op];
5107 i.operands++;
5108 i.reg_operands++;
5109 }
5110
5111 if (i.tm.opcode_modifier.shortform)
5112 {
5113 if (i.types[0].bitfield.sreg2
5114 || i.types[0].bitfield.sreg3)
5115 {
5116 if (i.tm.base_opcode == POP_SEG_SHORT
5117 && i.op[0].regs->reg_num == 1)
5118 {
5119 as_bad (_("you can't `pop %scs'"), register_prefix);
5120 return 0;
5121 }
5122 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5123 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5124 i.rex |= REX_B;
5125 }
5126 else
5127 {
5128 /* The register or float register operand is in operand
5129 0 or 1. */
5130 unsigned int op;
5131
5132 if (i.types[0].bitfield.floatreg
5133 || operand_type_check (i.types[0], reg))
5134 op = 0;
5135 else
5136 op = 1;
5137 /* Register goes in low 3 bits of opcode. */
5138 i.tm.base_opcode |= i.op[op].regs->reg_num;
5139 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5140 i.rex |= REX_B;
5141 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5142 {
5143 /* Warn about some common errors, but press on regardless.
5144 The first case can be generated by gcc (<= 2.8.1). */
5145 if (i.operands == 2)
5146 {
5147 /* Reversed arguments on faddp, fsubp, etc. */
5148 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5149 register_prefix, i.op[!intel_syntax].regs->reg_name,
5150 register_prefix, i.op[intel_syntax].regs->reg_name);
5151 }
5152 else
5153 {
5154 /* Extraneous `l' suffix on fp insn. */
5155 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5156 register_prefix, i.op[0].regs->reg_name);
5157 }
5158 }
5159 }
5160 }
5161 else if (i.tm.opcode_modifier.modrm)
5162 {
5163 /* The opcode is completed (modulo i.tm.extension_opcode which
5164 must be put into the modrm byte). Now, we make the modrm and
5165 index base bytes based on all the info we've collected. */
5166
5167 default_seg = build_modrm_byte ();
5168 }
5169 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5170 {
5171 default_seg = &ds;
5172 }
5173 else if (i.tm.opcode_modifier.isstring)
5174 {
5175 /* For the string instructions that allow a segment override
5176 on one of their operands, the default segment is ds. */
5177 default_seg = &ds;
5178 }
5179
5180 if (i.tm.base_opcode == 0x8d /* lea */
5181 && i.seg[0]
5182 && !quiet_warnings)
5183 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5184
5185 /* If a segment was explicitly specified, and the specified segment
5186 is not the default, use an opcode prefix to select it. If we
5187 never figured out what the default segment is, then default_seg
5188 will be zero at this point, and the specified segment prefix will
5189 always be used. */
5190 if ((i.seg[0]) && (i.seg[0] != default_seg))
5191 {
5192 if (!add_prefix (i.seg[0]->seg_prefix))
5193 return 0;
5194 }
5195 return 1;
5196 }
5197
5198 static const seg_entry *
5199 build_modrm_byte (void)
5200 {
5201 const seg_entry *default_seg = 0;
5202 unsigned int source, dest;
5203 int vex_3_sources;
5204
5205 /* The first operand of instructions with VEX prefix and 3 sources
5206 must be VEX_Imm4. */
5207 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5208 if (vex_3_sources)
5209 {
5210 unsigned int nds, reg_slot;
5211 expressionS *exp;
5212
5213 if (i.tm.opcode_modifier.veximmext
5214 && i.tm.opcode_modifier.immext)
5215 {
5216 dest = i.operands - 2;
5217 gas_assert (dest == 3);
5218 }
5219 else
5220 dest = i.operands - 1;
5221 nds = dest - 1;
5222
5223 /* There are 2 kinds of instructions:
5224 1. 5 operands: 4 register operands or 3 register operands
5225 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5226 VexW0 or VexW1. The destination must be either XMM or YMM
5227 register.
5228 2. 4 operands: 4 register operands or 3 register operands
5229 plus 1 memory operand, VexXDS, and VexImmExt */
5230 gas_assert ((i.reg_operands == 4
5231 || (i.reg_operands == 3 && i.mem_operands == 1))
5232 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5233 && (i.tm.opcode_modifier.veximmext
5234 || (i.imm_operands == 1
5235 && i.types[0].bitfield.vec_imm4
5236 && (i.tm.opcode_modifier.vexw == VEXW0
5237 || i.tm.opcode_modifier.vexw == VEXW1)
5238 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5239 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5240
5241 if (i.imm_operands == 0)
5242 {
5243 /* When there is no immediate operand, generate an 8bit
5244 immediate operand to encode the first operand. */
5245 exp = &im_expressions[i.imm_operands++];
5246 i.op[i.operands].imms = exp;
5247 i.types[i.operands] = imm8;
5248 i.operands++;
5249 /* If VexW1 is set, the first operand is the source and
5250 the second operand is encoded in the immediate operand. */
5251 if (i.tm.opcode_modifier.vexw == VEXW1)
5252 {
5253 source = 0;
5254 reg_slot = 1;
5255 }
5256 else
5257 {
5258 source = 1;
5259 reg_slot = 0;
5260 }
5261
5262 /* FMA swaps REG and NDS. */
5263 if (i.tm.cpu_flags.bitfield.cpufma)
5264 {
5265 unsigned int tmp;
5266 tmp = reg_slot;
5267 reg_slot = nds;
5268 nds = tmp;
5269 }
5270
5271 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5272 &regxmm)
5273 || operand_type_equal (&i.tm.operand_types[reg_slot],
5274 &regymm));
5275 exp->X_op = O_constant;
5276 exp->X_add_number
5277 = ((i.op[reg_slot].regs->reg_num
5278 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5279 << 4);
5280 }
5281 else
5282 {
5283 unsigned int imm_slot;
5284
5285 if (i.tm.opcode_modifier.vexw == VEXW0)
5286 {
5287 /* If VexW0 is set, the third operand is the source and
5288 the second operand is encoded in the immediate
5289 operand. */
5290 source = 2;
5291 reg_slot = 1;
5292 }
5293 else
5294 {
5295 /* VexW1 is set, the second operand is the source and
5296 the third operand is encoded in the immediate
5297 operand. */
5298 source = 1;
5299 reg_slot = 2;
5300 }
5301
5302 if (i.tm.opcode_modifier.immext)
5303 {
5304 /* When ImmExt is set, the immdiate byte is the last
5305 operand. */
5306 imm_slot = i.operands - 1;
5307 source--;
5308 reg_slot--;
5309 }
5310 else
5311 {
5312 imm_slot = 0;
5313
5314 /* Turn on Imm8 so that output_imm will generate it. */
5315 i.types[imm_slot].bitfield.imm8 = 1;
5316 }
5317
5318 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5319 &regxmm)
5320 || operand_type_equal (&i.tm.operand_types[reg_slot],
5321 &regymm));
5322 i.op[imm_slot].imms->X_add_number
5323 |= ((i.op[reg_slot].regs->reg_num
5324 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5325 << 4);
5326 }
5327
5328 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5329 || operand_type_equal (&i.tm.operand_types[nds],
5330 &regymm));
5331 i.vex.register_specifier = i.op[nds].regs;
5332 }
5333 else
5334 source = dest = 0;
5335
5336 /* i.reg_operands MUST be the number of real register operands;
5337 implicit registers do not count. If there are 3 register
5338 operands, it must be a instruction with VexNDS. For a
5339 instruction with VexNDD, the destination register is encoded
5340 in VEX prefix. If there are 4 register operands, it must be
5341 a instruction with VEX prefix and 3 sources. */
5342 if (i.mem_operands == 0
5343 && ((i.reg_operands == 2
5344 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5345 || (i.reg_operands == 3
5346 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5347 || (i.reg_operands == 4 && vex_3_sources)))
5348 {
5349 switch (i.operands)
5350 {
5351 case 2:
5352 source = 0;
5353 break;
5354 case 3:
5355 /* When there are 3 operands, one of them may be immediate,
5356 which may be the first or the last operand. Otherwise,
5357 the first operand must be shift count register (cl) or it
5358 is an instruction with VexNDS. */
5359 gas_assert (i.imm_operands == 1
5360 || (i.imm_operands == 0
5361 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5362 || i.types[0].bitfield.shiftcount)));
5363 if (operand_type_check (i.types[0], imm)
5364 || i.types[0].bitfield.shiftcount)
5365 source = 1;
5366 else
5367 source = 0;
5368 break;
5369 case 4:
5370 /* When there are 4 operands, the first two must be 8bit
5371 immediate operands. The source operand will be the 3rd
5372 one.
5373
5374 For instructions with VexNDS, if the first operand
5375 an imm8, the source operand is the 2nd one. If the last
5376 operand is imm8, the source operand is the first one. */
5377 gas_assert ((i.imm_operands == 2
5378 && i.types[0].bitfield.imm8
5379 && i.types[1].bitfield.imm8)
5380 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5381 && i.imm_operands == 1
5382 && (i.types[0].bitfield.imm8
5383 || i.types[i.operands - 1].bitfield.imm8)));
5384 if (i.imm_operands == 2)
5385 source = 2;
5386 else
5387 {
5388 if (i.types[0].bitfield.imm8)
5389 source = 1;
5390 else
5391 source = 0;
5392 }
5393 break;
5394 case 5:
5395 break;
5396 default:
5397 abort ();
5398 }
5399
5400 if (!vex_3_sources)
5401 {
5402 dest = source + 1;
5403
5404 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5405 {
5406 /* For instructions with VexNDS, the register-only
5407 source operand must be 32/64bit integer, XMM or
5408 YMM register. It is encoded in VEX prefix. We
5409 need to clear RegMem bit before calling
5410 operand_type_equal. */
5411
5412 i386_operand_type op;
5413 unsigned int vvvv;
5414
5415 /* Check register-only source operand when two source
5416 operands are swapped. */
5417 if (!i.tm.operand_types[source].bitfield.baseindex
5418 && i.tm.operand_types[dest].bitfield.baseindex)
5419 {
5420 vvvv = source;
5421 source = dest;
5422 }
5423 else
5424 vvvv = dest;
5425
5426 op = i.tm.operand_types[vvvv];
5427 op.bitfield.regmem = 0;
5428 if ((dest + 1) >= i.operands
5429 || (op.bitfield.reg32 != 1
5430 && !op.bitfield.reg64 != 1
5431 && !operand_type_equal (&op, &regxmm)
5432 && !operand_type_equal (&op, &regymm)))
5433 abort ();
5434 i.vex.register_specifier = i.op[vvvv].regs;
5435 dest++;
5436 }
5437 }
5438
5439 i.rm.mode = 3;
5440 /* One of the register operands will be encoded in the i.tm.reg
5441 field, the other in the combined i.tm.mode and i.tm.regmem
5442 fields. If no form of this instruction supports a memory
5443 destination operand, then we assume the source operand may
5444 sometimes be a memory operand and so we need to store the
5445 destination in the i.rm.reg field. */
5446 if (!i.tm.operand_types[dest].bitfield.regmem
5447 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5448 {
5449 i.rm.reg = i.op[dest].regs->reg_num;
5450 i.rm.regmem = i.op[source].regs->reg_num;
5451 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5452 i.rex |= REX_R;
5453 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5454 i.rex |= REX_B;
5455 }
5456 else
5457 {
5458 i.rm.reg = i.op[source].regs->reg_num;
5459 i.rm.regmem = i.op[dest].regs->reg_num;
5460 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5461 i.rex |= REX_B;
5462 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5463 i.rex |= REX_R;
5464 }
5465 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5466 {
5467 if (!i.types[0].bitfield.control
5468 && !i.types[1].bitfield.control)
5469 abort ();
5470 i.rex &= ~(REX_R | REX_B);
5471 add_prefix (LOCK_PREFIX_OPCODE);
5472 }
5473 }
5474 else
5475 { /* If it's not 2 reg operands... */
5476 unsigned int mem;
5477
5478 if (i.mem_operands)
5479 {
5480 unsigned int fake_zero_displacement = 0;
5481 unsigned int op;
5482
5483 for (op = 0; op < i.operands; op++)
5484 if (operand_type_check (i.types[op], anymem))
5485 break;
5486 gas_assert (op < i.operands);
5487
5488 if (i.tm.opcode_modifier.vecsib)
5489 {
5490 if (i.index_reg->reg_num == RegEiz
5491 || i.index_reg->reg_num == RegRiz)
5492 abort ();
5493
5494 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5495 if (!i.base_reg)
5496 {
5497 i.sib.base = NO_BASE_REGISTER;
5498 i.sib.scale = i.log2_scale_factor;
5499 i.types[op].bitfield.disp8 = 0;
5500 i.types[op].bitfield.disp16 = 0;
5501 i.types[op].bitfield.disp64 = 0;
5502 if (flag_code != CODE_64BIT)
5503 {
5504 /* Must be 32 bit */
5505 i.types[op].bitfield.disp32 = 1;
5506 i.types[op].bitfield.disp32s = 0;
5507 }
5508 else
5509 {
5510 i.types[op].bitfield.disp32 = 0;
5511 i.types[op].bitfield.disp32s = 1;
5512 }
5513 }
5514 i.sib.index = i.index_reg->reg_num;
5515 if ((i.index_reg->reg_flags & RegRex) != 0)
5516 i.rex |= REX_X;
5517 }
5518
5519 default_seg = &ds;
5520
5521 if (i.base_reg == 0)
5522 {
5523 i.rm.mode = 0;
5524 if (!i.disp_operands)
5525 {
5526 fake_zero_displacement = 1;
5527 /* Instructions with VSIB byte need 32bit displacement
5528 if there is no base register. */
5529 if (i.tm.opcode_modifier.vecsib)
5530 i.types[op].bitfield.disp32 = 1;
5531 }
5532 if (i.index_reg == 0)
5533 {
5534 gas_assert (!i.tm.opcode_modifier.vecsib);
5535 /* Operand is just <disp> */
5536 if (flag_code == CODE_64BIT)
5537 {
5538 /* 64bit mode overwrites the 32bit absolute
5539 addressing by RIP relative addressing and
5540 absolute addressing is encoded by one of the
5541 redundant SIB forms. */
5542 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5543 i.sib.base = NO_BASE_REGISTER;
5544 i.sib.index = NO_INDEX_REGISTER;
5545 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5546 ? disp32s : disp32);
5547 }
5548 else if ((flag_code == CODE_16BIT)
5549 ^ (i.prefix[ADDR_PREFIX] != 0))
5550 {
5551 i.rm.regmem = NO_BASE_REGISTER_16;
5552 i.types[op] = disp16;
5553 }
5554 else
5555 {
5556 i.rm.regmem = NO_BASE_REGISTER;
5557 i.types[op] = disp32;
5558 }
5559 }
5560 else if (!i.tm.opcode_modifier.vecsib)
5561 {
5562 /* !i.base_reg && i.index_reg */
5563 if (i.index_reg->reg_num == RegEiz
5564 || i.index_reg->reg_num == RegRiz)
5565 i.sib.index = NO_INDEX_REGISTER;
5566 else
5567 i.sib.index = i.index_reg->reg_num;
5568 i.sib.base = NO_BASE_REGISTER;
5569 i.sib.scale = i.log2_scale_factor;
5570 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5571 i.types[op].bitfield.disp8 = 0;
5572 i.types[op].bitfield.disp16 = 0;
5573 i.types[op].bitfield.disp64 = 0;
5574 if (flag_code != CODE_64BIT)
5575 {
5576 /* Must be 32 bit */
5577 i.types[op].bitfield.disp32 = 1;
5578 i.types[op].bitfield.disp32s = 0;
5579 }
5580 else
5581 {
5582 i.types[op].bitfield.disp32 = 0;
5583 i.types[op].bitfield.disp32s = 1;
5584 }
5585 if ((i.index_reg->reg_flags & RegRex) != 0)
5586 i.rex |= REX_X;
5587 }
5588 }
5589 /* RIP addressing for 64bit mode. */
5590 else if (i.base_reg->reg_num == RegRip ||
5591 i.base_reg->reg_num == RegEip)
5592 {
5593 gas_assert (!i.tm.opcode_modifier.vecsib);
5594 i.rm.regmem = NO_BASE_REGISTER;
5595 i.types[op].bitfield.disp8 = 0;
5596 i.types[op].bitfield.disp16 = 0;
5597 i.types[op].bitfield.disp32 = 0;
5598 i.types[op].bitfield.disp32s = 1;
5599 i.types[op].bitfield.disp64 = 0;
5600 i.flags[op] |= Operand_PCrel;
5601 if (! i.disp_operands)
5602 fake_zero_displacement = 1;
5603 }
5604 else if (i.base_reg->reg_type.bitfield.reg16)
5605 {
5606 gas_assert (!i.tm.opcode_modifier.vecsib);
5607 switch (i.base_reg->reg_num)
5608 {
5609 case 3: /* (%bx) */
5610 if (i.index_reg == 0)
5611 i.rm.regmem = 7;
5612 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5613 i.rm.regmem = i.index_reg->reg_num - 6;
5614 break;
5615 case 5: /* (%bp) */
5616 default_seg = &ss;
5617 if (i.index_reg == 0)
5618 {
5619 i.rm.regmem = 6;
5620 if (operand_type_check (i.types[op], disp) == 0)
5621 {
5622 /* fake (%bp) into 0(%bp) */
5623 i.types[op].bitfield.disp8 = 1;
5624 fake_zero_displacement = 1;
5625 }
5626 }
5627 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5628 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5629 break;
5630 default: /* (%si) -> 4 or (%di) -> 5 */
5631 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5632 }
5633 i.rm.mode = mode_from_disp_size (i.types[op]);
5634 }
5635 else /* i.base_reg and 32/64 bit mode */
5636 {
5637 if (flag_code == CODE_64BIT
5638 && operand_type_check (i.types[op], disp))
5639 {
5640 i386_operand_type temp;
5641 operand_type_set (&temp, 0);
5642 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5643 i.types[op] = temp;
5644 if (i.prefix[ADDR_PREFIX] == 0)
5645 i.types[op].bitfield.disp32s = 1;
5646 else
5647 i.types[op].bitfield.disp32 = 1;
5648 }
5649
5650 if (!i.tm.opcode_modifier.vecsib)
5651 i.rm.regmem = i.base_reg->reg_num;
5652 if ((i.base_reg->reg_flags & RegRex) != 0)
5653 i.rex |= REX_B;
5654 i.sib.base = i.base_reg->reg_num;
5655 /* x86-64 ignores REX prefix bit here to avoid decoder
5656 complications. */
5657 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5658 {
5659 default_seg = &ss;
5660 if (i.disp_operands == 0)
5661 {
5662 fake_zero_displacement = 1;
5663 i.types[op].bitfield.disp8 = 1;
5664 }
5665 }
5666 else if (i.base_reg->reg_num == ESP_REG_NUM)
5667 {
5668 default_seg = &ss;
5669 }
5670 i.sib.scale = i.log2_scale_factor;
5671 if (i.index_reg == 0)
5672 {
5673 gas_assert (!i.tm.opcode_modifier.vecsib);
5674 /* <disp>(%esp) becomes two byte modrm with no index
5675 register. We've already stored the code for esp
5676 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5677 Any base register besides %esp will not use the
5678 extra modrm byte. */
5679 i.sib.index = NO_INDEX_REGISTER;
5680 }
5681 else if (!i.tm.opcode_modifier.vecsib)
5682 {
5683 if (i.index_reg->reg_num == RegEiz
5684 || i.index_reg->reg_num == RegRiz)
5685 i.sib.index = NO_INDEX_REGISTER;
5686 else
5687 i.sib.index = i.index_reg->reg_num;
5688 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5689 if ((i.index_reg->reg_flags & RegRex) != 0)
5690 i.rex |= REX_X;
5691 }
5692
5693 if (i.disp_operands
5694 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5695 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5696 i.rm.mode = 0;
5697 else
5698 i.rm.mode = mode_from_disp_size (i.types[op]);
5699 }
5700
5701 if (fake_zero_displacement)
5702 {
5703 /* Fakes a zero displacement assuming that i.types[op]
5704 holds the correct displacement size. */
5705 expressionS *exp;
5706
5707 gas_assert (i.op[op].disps == 0);
5708 exp = &disp_expressions[i.disp_operands++];
5709 i.op[op].disps = exp;
5710 exp->X_op = O_constant;
5711 exp->X_add_number = 0;
5712 exp->X_add_symbol = (symbolS *) 0;
5713 exp->X_op_symbol = (symbolS *) 0;
5714 }
5715
5716 mem = op;
5717 }
5718 else
5719 mem = ~0;
5720
5721 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5722 {
5723 if (operand_type_check (i.types[0], imm))
5724 i.vex.register_specifier = NULL;
5725 else
5726 {
5727 /* VEX.vvvv encodes one of the sources when the first
5728 operand is not an immediate. */
5729 if (i.tm.opcode_modifier.vexw == VEXW0)
5730 i.vex.register_specifier = i.op[0].regs;
5731 else
5732 i.vex.register_specifier = i.op[1].regs;
5733 }
5734
5735 /* Destination is a XMM register encoded in the ModRM.reg
5736 and VEX.R bit. */
5737 i.rm.reg = i.op[2].regs->reg_num;
5738 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5739 i.rex |= REX_R;
5740
5741 /* ModRM.rm and VEX.B encodes the other source. */
5742 if (!i.mem_operands)
5743 {
5744 i.rm.mode = 3;
5745
5746 if (i.tm.opcode_modifier.vexw == VEXW0)
5747 i.rm.regmem = i.op[1].regs->reg_num;
5748 else
5749 i.rm.regmem = i.op[0].regs->reg_num;
5750
5751 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5752 i.rex |= REX_B;
5753 }
5754 }
5755 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5756 {
5757 i.vex.register_specifier = i.op[2].regs;
5758 if (!i.mem_operands)
5759 {
5760 i.rm.mode = 3;
5761 i.rm.regmem = i.op[1].regs->reg_num;
5762 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5763 i.rex |= REX_B;
5764 }
5765 }
5766 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5767 (if any) based on i.tm.extension_opcode. Again, we must be
5768 careful to make sure that segment/control/debug/test/MMX
5769 registers are coded into the i.rm.reg field. */
5770 else if (i.reg_operands)
5771 {
5772 unsigned int op;
5773 unsigned int vex_reg = ~0;
5774
5775 for (op = 0; op < i.operands; op++)
5776 if (i.types[op].bitfield.reg8
5777 || i.types[op].bitfield.reg16
5778 || i.types[op].bitfield.reg32
5779 || i.types[op].bitfield.reg64
5780 || i.types[op].bitfield.regmmx
5781 || i.types[op].bitfield.regxmm
5782 || i.types[op].bitfield.regymm
5783 || i.types[op].bitfield.sreg2
5784 || i.types[op].bitfield.sreg3
5785 || i.types[op].bitfield.control
5786 || i.types[op].bitfield.debug
5787 || i.types[op].bitfield.test)
5788 break;
5789
5790 if (vex_3_sources)
5791 op = dest;
5792 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5793 {
5794 /* For instructions with VexNDS, the register-only
5795 source operand is encoded in VEX prefix. */
5796 gas_assert (mem != (unsigned int) ~0);
5797
5798 if (op > mem)
5799 {
5800 vex_reg = op++;
5801 gas_assert (op < i.operands);
5802 }
5803 else
5804 {
5805 /* Check register-only source operand when two source
5806 operands are swapped. */
5807 if (!i.tm.operand_types[op].bitfield.baseindex
5808 && i.tm.operand_types[op + 1].bitfield.baseindex)
5809 {
5810 vex_reg = op;
5811 op += 2;
5812 gas_assert (mem == (vex_reg + 1)
5813 && op < i.operands);
5814 }
5815 else
5816 {
5817 vex_reg = op + 1;
5818 gas_assert (vex_reg < i.operands);
5819 }
5820 }
5821 }
5822 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5823 {
5824 /* For instructions with VexNDD, the register destination
5825 is encoded in VEX prefix. */
5826 if (i.mem_operands == 0)
5827 {
5828 /* There is no memory operand. */
5829 gas_assert ((op + 2) == i.operands);
5830 vex_reg = op + 1;
5831 }
5832 else
5833 {
5834 /* There are only 2 operands. */
5835 gas_assert (op < 2 && i.operands == 2);
5836 vex_reg = 1;
5837 }
5838 }
5839 else
5840 gas_assert (op < i.operands);
5841
5842 if (vex_reg != (unsigned int) ~0)
5843 {
5844 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5845
5846 if (type->bitfield.reg32 != 1
5847 && type->bitfield.reg64 != 1
5848 && !operand_type_equal (type, &regxmm)
5849 && !operand_type_equal (type, &regymm))
5850 abort ();
5851
5852 i.vex.register_specifier = i.op[vex_reg].regs;
5853 }
5854
5855 /* Don't set OP operand twice. */
5856 if (vex_reg != op)
5857 {
5858 /* If there is an extension opcode to put here, the
5859 register number must be put into the regmem field. */
5860 if (i.tm.extension_opcode != None)
5861 {
5862 i.rm.regmem = i.op[op].regs->reg_num;
5863 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5864 i.rex |= REX_B;
5865 }
5866 else
5867 {
5868 i.rm.reg = i.op[op].regs->reg_num;
5869 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5870 i.rex |= REX_R;
5871 }
5872 }
5873
5874 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5875 must set it to 3 to indicate this is a register operand
5876 in the regmem field. */
5877 if (!i.mem_operands)
5878 i.rm.mode = 3;
5879 }
5880
5881 /* Fill in i.rm.reg field with extension opcode (if any). */
5882 if (i.tm.extension_opcode != None)
5883 i.rm.reg = i.tm.extension_opcode;
5884 }
5885 return default_seg;
5886 }
5887
5888 static void
5889 output_branch (void)
5890 {
5891 char *p;
5892 int size;
5893 int code16;
5894 int prefix;
5895 relax_substateT subtype;
5896 symbolS *sym;
5897 offsetT off;
5898
5899 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5900 size = i.disp32_encoding ? BIG : SMALL;
5901
5902 prefix = 0;
5903 if (i.prefix[DATA_PREFIX] != 0)
5904 {
5905 prefix = 1;
5906 i.prefixes -= 1;
5907 code16 ^= CODE16;
5908 }
5909 /* Pentium4 branch hints. */
5910 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5911 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5912 {
5913 prefix++;
5914 i.prefixes--;
5915 }
5916 if (i.prefix[REX_PREFIX] != 0)
5917 {
5918 prefix++;
5919 i.prefixes--;
5920 }
5921
5922 if (i.prefixes != 0 && !intel_syntax)
5923 as_warn (_("skipping prefixes on this instruction"));
5924
5925 /* It's always a symbol; End frag & setup for relax.
5926 Make sure there is enough room in this frag for the largest
5927 instruction we may generate in md_convert_frag. This is 2
5928 bytes for the opcode and room for the prefix and largest
5929 displacement. */
5930 frag_grow (prefix + 2 + 4);
5931 /* Prefix and 1 opcode byte go in fr_fix. */
5932 p = frag_more (prefix + 1);
5933 if (i.prefix[DATA_PREFIX] != 0)
5934 *p++ = DATA_PREFIX_OPCODE;
5935 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5936 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5937 *p++ = i.prefix[SEG_PREFIX];
5938 if (i.prefix[REX_PREFIX] != 0)
5939 *p++ = i.prefix[REX_PREFIX];
5940 *p = i.tm.base_opcode;
5941
5942 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5943 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5944 else if (cpu_arch_flags.bitfield.cpui386)
5945 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5946 else
5947 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5948 subtype |= code16;
5949
5950 sym = i.op[0].disps->X_add_symbol;
5951 off = i.op[0].disps->X_add_number;
5952
5953 if (i.op[0].disps->X_op != O_constant
5954 && i.op[0].disps->X_op != O_symbol)
5955 {
5956 /* Handle complex expressions. */
5957 sym = make_expr_symbol (i.op[0].disps);
5958 off = 0;
5959 }
5960
5961 /* 1 possible extra opcode + 4 byte displacement go in var part.
5962 Pass reloc in fr_var. */
5963 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5964 }
5965
5966 static void
5967 output_jump (void)
5968 {
5969 char *p;
5970 int size;
5971 fixS *fixP;
5972
5973 if (i.tm.opcode_modifier.jumpbyte)
5974 {
5975 /* This is a loop or jecxz type instruction. */
5976 size = 1;
5977 if (i.prefix[ADDR_PREFIX] != 0)
5978 {
5979 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5980 i.prefixes -= 1;
5981 }
5982 /* Pentium4 branch hints. */
5983 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5984 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5985 {
5986 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5987 i.prefixes--;
5988 }
5989 }
5990 else
5991 {
5992 int code16;
5993
5994 code16 = 0;
5995 if (flag_code == CODE_16BIT)
5996 code16 = CODE16;
5997
5998 if (i.prefix[DATA_PREFIX] != 0)
5999 {
6000 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6001 i.prefixes -= 1;
6002 code16 ^= CODE16;
6003 }
6004
6005 size = 4;
6006 if (code16)
6007 size = 2;
6008 }
6009
6010 if (i.prefix[REX_PREFIX] != 0)
6011 {
6012 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6013 i.prefixes -= 1;
6014 }
6015
6016 if (i.prefixes != 0 && !intel_syntax)
6017 as_warn (_("skipping prefixes on this instruction"));
6018
6019 p = frag_more (1 + size);
6020 *p++ = i.tm.base_opcode;
6021
6022 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6023 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6024
6025 /* All jumps handled here are signed, but don't use a signed limit
6026 check for 32 and 16 bit jumps as we want to allow wrap around at
6027 4G and 64k respectively. */
6028 if (size == 1)
6029 fixP->fx_signed = 1;
6030 }
6031
6032 static void
6033 output_interseg_jump (void)
6034 {
6035 char *p;
6036 int size;
6037 int prefix;
6038 int code16;
6039
6040 code16 = 0;
6041 if (flag_code == CODE_16BIT)
6042 code16 = CODE16;
6043
6044 prefix = 0;
6045 if (i.prefix[DATA_PREFIX] != 0)
6046 {
6047 prefix = 1;
6048 i.prefixes -= 1;
6049 code16 ^= CODE16;
6050 }
6051 if (i.prefix[REX_PREFIX] != 0)
6052 {
6053 prefix++;
6054 i.prefixes -= 1;
6055 }
6056
6057 size = 4;
6058 if (code16)
6059 size = 2;
6060
6061 if (i.prefixes != 0 && !intel_syntax)
6062 as_warn (_("skipping prefixes on this instruction"));
6063
6064 /* 1 opcode; 2 segment; offset */
6065 p = frag_more (prefix + 1 + 2 + size);
6066
6067 if (i.prefix[DATA_PREFIX] != 0)
6068 *p++ = DATA_PREFIX_OPCODE;
6069
6070 if (i.prefix[REX_PREFIX] != 0)
6071 *p++ = i.prefix[REX_PREFIX];
6072
6073 *p++ = i.tm.base_opcode;
6074 if (i.op[1].imms->X_op == O_constant)
6075 {
6076 offsetT n = i.op[1].imms->X_add_number;
6077
6078 if (size == 2
6079 && !fits_in_unsigned_word (n)
6080 && !fits_in_signed_word (n))
6081 {
6082 as_bad (_("16-bit jump out of range"));
6083 return;
6084 }
6085 md_number_to_chars (p, n, size);
6086 }
6087 else
6088 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6089 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6090 if (i.op[0].imms->X_op != O_constant)
6091 as_bad (_("can't handle non absolute segment in `%s'"),
6092 i.tm.name);
6093 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6094 }
6095
6096 static void
6097 output_insn (void)
6098 {
6099 fragS *insn_start_frag;
6100 offsetT insn_start_off;
6101
6102 /* Tie dwarf2 debug info to the address at the start of the insn.
6103 We can't do this after the insn has been output as the current
6104 frag may have been closed off. eg. by frag_var. */
6105 dwarf2_emit_insn (0);
6106
6107 insn_start_frag = frag_now;
6108 insn_start_off = frag_now_fix ();
6109
6110 /* Output jumps. */
6111 if (i.tm.opcode_modifier.jump)
6112 output_branch ();
6113 else if (i.tm.opcode_modifier.jumpbyte
6114 || i.tm.opcode_modifier.jumpdword)
6115 output_jump ();
6116 else if (i.tm.opcode_modifier.jumpintersegment)
6117 output_interseg_jump ();
6118 else
6119 {
6120 /* Output normal instructions here. */
6121 char *p;
6122 unsigned char *q;
6123 unsigned int j;
6124 unsigned int prefix;
6125
6126 /* Since the VEX prefix contains the implicit prefix, we don't
6127 need the explicit prefix. */
6128 if (!i.tm.opcode_modifier.vex)
6129 {
6130 switch (i.tm.opcode_length)
6131 {
6132 case 3:
6133 if (i.tm.base_opcode & 0xff000000)
6134 {
6135 prefix = (i.tm.base_opcode >> 24) & 0xff;
6136 goto check_prefix;
6137 }
6138 break;
6139 case 2:
6140 if ((i.tm.base_opcode & 0xff0000) != 0)
6141 {
6142 prefix = (i.tm.base_opcode >> 16) & 0xff;
6143 if (i.tm.cpu_flags.bitfield.cpupadlock)
6144 {
6145 check_prefix:
6146 if (prefix != REPE_PREFIX_OPCODE
6147 || (i.prefix[REP_PREFIX]
6148 != REPE_PREFIX_OPCODE))
6149 add_prefix (prefix);
6150 }
6151 else
6152 add_prefix (prefix);
6153 }
6154 break;
6155 case 1:
6156 break;
6157 default:
6158 abort ();
6159 }
6160
6161 /* The prefix bytes. */
6162 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6163 if (*q)
6164 FRAG_APPEND_1_CHAR (*q);
6165 }
6166
6167 if (i.tm.opcode_modifier.vex)
6168 {
6169 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6170 if (*q)
6171 switch (j)
6172 {
6173 case REX_PREFIX:
6174 /* REX byte is encoded in VEX prefix. */
6175 break;
6176 case SEG_PREFIX:
6177 case ADDR_PREFIX:
6178 FRAG_APPEND_1_CHAR (*q);
6179 break;
6180 default:
6181 /* There should be no other prefixes for instructions
6182 with VEX prefix. */
6183 abort ();
6184 }
6185
6186 /* Now the VEX prefix. */
6187 p = frag_more (i.vex.length);
6188 for (j = 0; j < i.vex.length; j++)
6189 p[j] = i.vex.bytes[j];
6190 }
6191
6192 /* Now the opcode; be careful about word order here! */
6193 if (i.tm.opcode_length == 1)
6194 {
6195 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6196 }
6197 else
6198 {
6199 switch (i.tm.opcode_length)
6200 {
6201 case 3:
6202 p = frag_more (3);
6203 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6204 break;
6205 case 2:
6206 p = frag_more (2);
6207 break;
6208 default:
6209 abort ();
6210 break;
6211 }
6212
6213 /* Put out high byte first: can't use md_number_to_chars! */
6214 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6215 *p = i.tm.base_opcode & 0xff;
6216 }
6217
6218 /* Now the modrm byte and sib byte (if present). */
6219 if (i.tm.opcode_modifier.modrm)
6220 {
6221 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6222 | i.rm.reg << 3
6223 | i.rm.mode << 6));
6224 /* If i.rm.regmem == ESP (4)
6225 && i.rm.mode != (Register mode)
6226 && not 16 bit
6227 ==> need second modrm byte. */
6228 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6229 && i.rm.mode != 3
6230 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6231 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6232 | i.sib.index << 3
6233 | i.sib.scale << 6));
6234 }
6235
6236 if (i.disp_operands)
6237 output_disp (insn_start_frag, insn_start_off);
6238
6239 if (i.imm_operands)
6240 output_imm (insn_start_frag, insn_start_off);
6241 }
6242
6243 #ifdef DEBUG386
6244 if (flag_debug)
6245 {
6246 pi ("" /*line*/, &i);
6247 }
6248 #endif /* DEBUG386 */
6249 }
6250
6251 /* Return the size of the displacement operand N. */
6252
6253 static int
6254 disp_size (unsigned int n)
6255 {
6256 int size = 4;
6257 if (i.types[n].bitfield.disp64)
6258 size = 8;
6259 else if (i.types[n].bitfield.disp8)
6260 size = 1;
6261 else if (i.types[n].bitfield.disp16)
6262 size = 2;
6263 return size;
6264 }
6265
6266 /* Return the size of the immediate operand N. */
6267
6268 static int
6269 imm_size (unsigned int n)
6270 {
6271 int size = 4;
6272 if (i.types[n].bitfield.imm64)
6273 size = 8;
6274 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6275 size = 1;
6276 else if (i.types[n].bitfield.imm16)
6277 size = 2;
6278 return size;
6279 }
6280
6281 static void
6282 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6283 {
6284 char *p;
6285 unsigned int n;
6286
6287 for (n = 0; n < i.operands; n++)
6288 {
6289 if (operand_type_check (i.types[n], disp))
6290 {
6291 if (i.op[n].disps->X_op == O_constant)
6292 {
6293 int size = disp_size (n);
6294 offsetT val;
6295
6296 val = offset_in_range (i.op[n].disps->X_add_number,
6297 size);
6298 p = frag_more (size);
6299 md_number_to_chars (p, val, size);
6300 }
6301 else
6302 {
6303 enum bfd_reloc_code_real reloc_type;
6304 int size = disp_size (n);
6305 int sign = i.types[n].bitfield.disp32s;
6306 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6307
6308 /* We can't have 8 bit displacement here. */
6309 gas_assert (!i.types[n].bitfield.disp8);
6310
6311 /* The PC relative address is computed relative
6312 to the instruction boundary, so in case immediate
6313 fields follows, we need to adjust the value. */
6314 if (pcrel && i.imm_operands)
6315 {
6316 unsigned int n1;
6317 int sz = 0;
6318
6319 for (n1 = 0; n1 < i.operands; n1++)
6320 if (operand_type_check (i.types[n1], imm))
6321 {
6322 /* Only one immediate is allowed for PC
6323 relative address. */
6324 gas_assert (sz == 0);
6325 sz = imm_size (n1);
6326 i.op[n].disps->X_add_number -= sz;
6327 }
6328 /* We should find the immediate. */
6329 gas_assert (sz != 0);
6330 }
6331
6332 p = frag_more (size);
6333 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6334 if (GOT_symbol
6335 && GOT_symbol == i.op[n].disps->X_add_symbol
6336 && (((reloc_type == BFD_RELOC_32
6337 || reloc_type == BFD_RELOC_X86_64_32S
6338 || (reloc_type == BFD_RELOC_64
6339 && object_64bit))
6340 && (i.op[n].disps->X_op == O_symbol
6341 || (i.op[n].disps->X_op == O_add
6342 && ((symbol_get_value_expression
6343 (i.op[n].disps->X_op_symbol)->X_op)
6344 == O_subtract))))
6345 || reloc_type == BFD_RELOC_32_PCREL))
6346 {
6347 offsetT add;
6348
6349 if (insn_start_frag == frag_now)
6350 add = (p - frag_now->fr_literal) - insn_start_off;
6351 else
6352 {
6353 fragS *fr;
6354
6355 add = insn_start_frag->fr_fix - insn_start_off;
6356 for (fr = insn_start_frag->fr_next;
6357 fr && fr != frag_now; fr = fr->fr_next)
6358 add += fr->fr_fix;
6359 add += p - frag_now->fr_literal;
6360 }
6361
6362 if (!object_64bit)
6363 {
6364 reloc_type = BFD_RELOC_386_GOTPC;
6365 i.op[n].imms->X_add_number += add;
6366 }
6367 else if (reloc_type == BFD_RELOC_64)
6368 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6369 else
6370 /* Don't do the adjustment for x86-64, as there
6371 the pcrel addressing is relative to the _next_
6372 insn, and that is taken care of in other code. */
6373 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6374 }
6375 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6376 i.op[n].disps, pcrel, reloc_type);
6377 }
6378 }
6379 }
6380 }
6381
6382 static void
6383 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6384 {
6385 char *p;
6386 unsigned int n;
6387
6388 for (n = 0; n < i.operands; n++)
6389 {
6390 if (operand_type_check (i.types[n], imm))
6391 {
6392 if (i.op[n].imms->X_op == O_constant)
6393 {
6394 int size = imm_size (n);
6395 offsetT val;
6396
6397 val = offset_in_range (i.op[n].imms->X_add_number,
6398 size);
6399 p = frag_more (size);
6400 md_number_to_chars (p, val, size);
6401 }
6402 else
6403 {
6404 /* Not absolute_section.
6405 Need a 32-bit fixup (don't support 8bit
6406 non-absolute imms). Try to support other
6407 sizes ... */
6408 enum bfd_reloc_code_real reloc_type;
6409 int size = imm_size (n);
6410 int sign;
6411
6412 if (i.types[n].bitfield.imm32s
6413 && (i.suffix == QWORD_MNEM_SUFFIX
6414 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6415 sign = 1;
6416 else
6417 sign = 0;
6418
6419 p = frag_more (size);
6420 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6421
6422 /* This is tough to explain. We end up with this one if we
6423 * have operands that look like
6424 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6425 * obtain the absolute address of the GOT, and it is strongly
6426 * preferable from a performance point of view to avoid using
6427 * a runtime relocation for this. The actual sequence of
6428 * instructions often look something like:
6429 *
6430 * call .L66
6431 * .L66:
6432 * popl %ebx
6433 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6434 *
6435 * The call and pop essentially return the absolute address
6436 * of the label .L66 and store it in %ebx. The linker itself
6437 * will ultimately change the first operand of the addl so
6438 * that %ebx points to the GOT, but to keep things simple, the
6439 * .o file must have this operand set so that it generates not
6440 * the absolute address of .L66, but the absolute address of
6441 * itself. This allows the linker itself simply treat a GOTPC
6442 * relocation as asking for a pcrel offset to the GOT to be
6443 * added in, and the addend of the relocation is stored in the
6444 * operand field for the instruction itself.
6445 *
6446 * Our job here is to fix the operand so that it would add
6447 * the correct offset so that %ebx would point to itself. The
6448 * thing that is tricky is that .-.L66 will point to the
6449 * beginning of the instruction, so we need to further modify
6450 * the operand so that it will point to itself. There are
6451 * other cases where you have something like:
6452 *
6453 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6454 *
6455 * and here no correction would be required. Internally in
6456 * the assembler we treat operands of this form as not being
6457 * pcrel since the '.' is explicitly mentioned, and I wonder
6458 * whether it would simplify matters to do it this way. Who
6459 * knows. In earlier versions of the PIC patches, the
6460 * pcrel_adjust field was used to store the correction, but
6461 * since the expression is not pcrel, I felt it would be
6462 * confusing to do it this way. */
6463
6464 if ((reloc_type == BFD_RELOC_32
6465 || reloc_type == BFD_RELOC_X86_64_32S
6466 || reloc_type == BFD_RELOC_64)
6467 && GOT_symbol
6468 && GOT_symbol == i.op[n].imms->X_add_symbol
6469 && (i.op[n].imms->X_op == O_symbol
6470 || (i.op[n].imms->X_op == O_add
6471 && ((symbol_get_value_expression
6472 (i.op[n].imms->X_op_symbol)->X_op)
6473 == O_subtract))))
6474 {
6475 offsetT add;
6476
6477 if (insn_start_frag == frag_now)
6478 add = (p - frag_now->fr_literal) - insn_start_off;
6479 else
6480 {
6481 fragS *fr;
6482
6483 add = insn_start_frag->fr_fix - insn_start_off;
6484 for (fr = insn_start_frag->fr_next;
6485 fr && fr != frag_now; fr = fr->fr_next)
6486 add += fr->fr_fix;
6487 add += p - frag_now->fr_literal;
6488 }
6489
6490 if (!object_64bit)
6491 reloc_type = BFD_RELOC_386_GOTPC;
6492 else if (size == 4)
6493 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6494 else if (size == 8)
6495 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6496 i.op[n].imms->X_add_number += add;
6497 }
6498 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6499 i.op[n].imms, 0, reloc_type);
6500 }
6501 }
6502 }
6503 }
6504 \f
6505 /* x86_cons_fix_new is called via the expression parsing code when a
6506 reloc is needed. We use this hook to get the correct .got reloc. */
6507 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6508 static int cons_sign = -1;
6509
6510 void
6511 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6512 expressionS *exp)
6513 {
6514 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6515
6516 got_reloc = NO_RELOC;
6517
6518 #ifdef TE_PE
6519 if (exp->X_op == O_secrel)
6520 {
6521 exp->X_op = O_symbol;
6522 r = BFD_RELOC_32_SECREL;
6523 }
6524 #endif
6525
6526 fix_new_exp (frag, off, len, exp, 0, r);
6527 }
6528
6529 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6530 # define lex_got(reloc, adjust, types) NULL
6531 #else
6532 /* Parse operands of the form
6533 <symbol>@GOTOFF+<nnn>
6534 and similar .plt or .got references.
6535
6536 If we find one, set up the correct relocation in RELOC and copy the
6537 input string, minus the `@GOTOFF' into a malloc'd buffer for
6538 parsing by the calling routine. Return this buffer, and if ADJUST
6539 is non-null set it to the length of the string we removed from the
6540 input line. Otherwise return NULL. */
6541 static char *
6542 lex_got (enum bfd_reloc_code_real *rel,
6543 int *adjust,
6544 i386_operand_type *types)
6545 {
6546 /* Some of the relocations depend on the size of what field is to
6547 be relocated. But in our callers i386_immediate and i386_displacement
6548 we don't yet know the operand size (this will be set by insn
6549 matching). Hence we record the word32 relocation here,
6550 and adjust the reloc according to the real size in reloc(). */
6551 static const struct {
6552 const char *str;
6553 int len;
6554 const enum bfd_reloc_code_real rel[2];
6555 const i386_operand_type types64;
6556 } gotrel[] = {
6557 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6558 BFD_RELOC_X86_64_PLTOFF64 },
6559 OPERAND_TYPE_IMM64 },
6560 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6561 BFD_RELOC_X86_64_PLT32 },
6562 OPERAND_TYPE_IMM32_32S_DISP32 },
6563 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6564 BFD_RELOC_X86_64_GOTPLT64 },
6565 OPERAND_TYPE_IMM64_DISP64 },
6566 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6567 BFD_RELOC_X86_64_GOTOFF64 },
6568 OPERAND_TYPE_IMM64_DISP64 },
6569 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6570 BFD_RELOC_X86_64_GOTPCREL },
6571 OPERAND_TYPE_IMM32_32S_DISP32 },
6572 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6573 BFD_RELOC_X86_64_TLSGD },
6574 OPERAND_TYPE_IMM32_32S_DISP32 },
6575 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6576 _dummy_first_bfd_reloc_code_real },
6577 OPERAND_TYPE_NONE },
6578 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6579 BFD_RELOC_X86_64_TLSLD },
6580 OPERAND_TYPE_IMM32_32S_DISP32 },
6581 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6582 BFD_RELOC_X86_64_GOTTPOFF },
6583 OPERAND_TYPE_IMM32_32S_DISP32 },
6584 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6585 BFD_RELOC_X86_64_TPOFF32 },
6586 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6587 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6588 _dummy_first_bfd_reloc_code_real },
6589 OPERAND_TYPE_NONE },
6590 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6591 BFD_RELOC_X86_64_DTPOFF32 },
6592 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6593 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6594 _dummy_first_bfd_reloc_code_real },
6595 OPERAND_TYPE_NONE },
6596 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6597 _dummy_first_bfd_reloc_code_real },
6598 OPERAND_TYPE_NONE },
6599 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6600 BFD_RELOC_X86_64_GOT32 },
6601 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6602 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6603 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6604 OPERAND_TYPE_IMM32_32S_DISP32 },
6605 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6606 BFD_RELOC_X86_64_TLSDESC_CALL },
6607 OPERAND_TYPE_IMM32_32S_DISP32 },
6608 };
6609 char *cp;
6610 unsigned int j;
6611
6612 if (!IS_ELF)
6613 return NULL;
6614
6615 for (cp = input_line_pointer; *cp != '@'; cp++)
6616 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6617 return NULL;
6618
6619 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6620 {
6621 int len = gotrel[j].len;
6622 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6623 {
6624 if (gotrel[j].rel[object_64bit] != 0)
6625 {
6626 int first, second;
6627 char *tmpbuf, *past_reloc;
6628
6629 *rel = gotrel[j].rel[object_64bit];
6630 if (adjust)
6631 *adjust = len;
6632
6633 if (types)
6634 {
6635 if (flag_code != CODE_64BIT)
6636 {
6637 types->bitfield.imm32 = 1;
6638 types->bitfield.disp32 = 1;
6639 }
6640 else
6641 *types = gotrel[j].types64;
6642 }
6643
6644 if (GOT_symbol == NULL)
6645 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6646
6647 /* The length of the first part of our input line. */
6648 first = cp - input_line_pointer;
6649
6650 /* The second part goes from after the reloc token until
6651 (and including) an end_of_line char or comma. */
6652 past_reloc = cp + 1 + len;
6653 cp = past_reloc;
6654 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6655 ++cp;
6656 second = cp + 1 - past_reloc;
6657
6658 /* Allocate and copy string. The trailing NUL shouldn't
6659 be necessary, but be safe. */
6660 tmpbuf = (char *) xmalloc (first + second + 2);
6661 memcpy (tmpbuf, input_line_pointer, first);
6662 if (second != 0 && *past_reloc != ' ')
6663 /* Replace the relocation token with ' ', so that
6664 errors like foo@GOTOFF1 will be detected. */
6665 tmpbuf[first++] = ' ';
6666 memcpy (tmpbuf + first, past_reloc, second);
6667 tmpbuf[first + second] = '\0';
6668 return tmpbuf;
6669 }
6670
6671 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6672 gotrel[j].str, 1 << (5 + object_64bit));
6673 return NULL;
6674 }
6675 }
6676
6677 /* Might be a symbol version string. Don't as_bad here. */
6678 return NULL;
6679 }
6680 #endif
6681
6682 void
6683 x86_cons (expressionS *exp, int size)
6684 {
6685 intel_syntax = -intel_syntax;
6686
6687 exp->X_md = 0;
6688 if (size == 4 || (object_64bit && size == 8))
6689 {
6690 /* Handle @GOTOFF and the like in an expression. */
6691 char *save;
6692 char *gotfree_input_line;
6693 int adjust = 0;
6694
6695 save = input_line_pointer;
6696 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6697 if (gotfree_input_line)
6698 input_line_pointer = gotfree_input_line;
6699
6700 expression (exp);
6701
6702 if (gotfree_input_line)
6703 {
6704 /* expression () has merrily parsed up to the end of line,
6705 or a comma - in the wrong buffer. Transfer how far
6706 input_line_pointer has moved to the right buffer. */
6707 input_line_pointer = (save
6708 + (input_line_pointer - gotfree_input_line)
6709 + adjust);
6710 free (gotfree_input_line);
6711 if (exp->X_op == O_constant
6712 || exp->X_op == O_absent
6713 || exp->X_op == O_illegal
6714 || exp->X_op == O_register
6715 || exp->X_op == O_big)
6716 {
6717 char c = *input_line_pointer;
6718 *input_line_pointer = 0;
6719 as_bad (_("missing or invalid expression `%s'"), save);
6720 *input_line_pointer = c;
6721 }
6722 }
6723 }
6724 else
6725 expression (exp);
6726
6727 intel_syntax = -intel_syntax;
6728
6729 if (intel_syntax)
6730 i386_intel_simplify (exp);
6731 }
6732
6733 static void
6734 signed_cons (int size)
6735 {
6736 if (flag_code == CODE_64BIT)
6737 cons_sign = 1;
6738 cons (size);
6739 cons_sign = -1;
6740 }
6741
6742 #ifdef TE_PE
6743 static void
6744 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6745 {
6746 expressionS exp;
6747
6748 do
6749 {
6750 expression (&exp);
6751 if (exp.X_op == O_symbol)
6752 exp.X_op = O_secrel;
6753
6754 emit_expr (&exp, 4);
6755 }
6756 while (*input_line_pointer++ == ',');
6757
6758 input_line_pointer--;
6759 demand_empty_rest_of_line ();
6760 }
6761 #endif
6762
6763 static int
6764 i386_immediate (char *imm_start)
6765 {
6766 char *save_input_line_pointer;
6767 char *gotfree_input_line;
6768 segT exp_seg = 0;
6769 expressionS *exp;
6770 i386_operand_type types;
6771
6772 operand_type_set (&types, ~0);
6773
6774 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6775 {
6776 as_bad (_("at most %d immediate operands are allowed"),
6777 MAX_IMMEDIATE_OPERANDS);
6778 return 0;
6779 }
6780
6781 exp = &im_expressions[i.imm_operands++];
6782 i.op[this_operand].imms = exp;
6783
6784 if (is_space_char (*imm_start))
6785 ++imm_start;
6786
6787 save_input_line_pointer = input_line_pointer;
6788 input_line_pointer = imm_start;
6789
6790 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6791 if (gotfree_input_line)
6792 input_line_pointer = gotfree_input_line;
6793
6794 exp_seg = expression (exp);
6795
6796 SKIP_WHITESPACE ();
6797 if (*input_line_pointer)
6798 as_bad (_("junk `%s' after expression"), input_line_pointer);
6799
6800 input_line_pointer = save_input_line_pointer;
6801 if (gotfree_input_line)
6802 {
6803 free (gotfree_input_line);
6804
6805 if (exp->X_op == O_constant || exp->X_op == O_register)
6806 exp->X_op = O_illegal;
6807 }
6808
6809 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6810 }
6811
6812 static int
6813 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6814 i386_operand_type types, const char *imm_start)
6815 {
6816 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6817 {
6818 if (imm_start)
6819 as_bad (_("missing or invalid immediate expression `%s'"),
6820 imm_start);
6821 return 0;
6822 }
6823 else if (exp->X_op == O_constant)
6824 {
6825 /* Size it properly later. */
6826 i.types[this_operand].bitfield.imm64 = 1;
6827 /* If not 64bit, sign extend val. */
6828 if (flag_code != CODE_64BIT
6829 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6830 exp->X_add_number
6831 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6832 }
6833 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6834 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6835 && exp_seg != absolute_section
6836 && exp_seg != text_section
6837 && exp_seg != data_section
6838 && exp_seg != bss_section
6839 && exp_seg != undefined_section
6840 && !bfd_is_com_section (exp_seg))
6841 {
6842 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6843 return 0;
6844 }
6845 #endif
6846 else if (!intel_syntax && exp->X_op == O_register)
6847 {
6848 if (imm_start)
6849 as_bad (_("illegal immediate register operand %s"), imm_start);
6850 return 0;
6851 }
6852 else
6853 {
6854 /* This is an address. The size of the address will be
6855 determined later, depending on destination register,
6856 suffix, or the default for the section. */
6857 i.types[this_operand].bitfield.imm8 = 1;
6858 i.types[this_operand].bitfield.imm16 = 1;
6859 i.types[this_operand].bitfield.imm32 = 1;
6860 i.types[this_operand].bitfield.imm32s = 1;
6861 i.types[this_operand].bitfield.imm64 = 1;
6862 i.types[this_operand] = operand_type_and (i.types[this_operand],
6863 types);
6864 }
6865
6866 return 1;
6867 }
6868
6869 static char *
6870 i386_scale (char *scale)
6871 {
6872 offsetT val;
6873 char *save = input_line_pointer;
6874
6875 input_line_pointer = scale;
6876 val = get_absolute_expression ();
6877
6878 switch (val)
6879 {
6880 case 1:
6881 i.log2_scale_factor = 0;
6882 break;
6883 case 2:
6884 i.log2_scale_factor = 1;
6885 break;
6886 case 4:
6887 i.log2_scale_factor = 2;
6888 break;
6889 case 8:
6890 i.log2_scale_factor = 3;
6891 break;
6892 default:
6893 {
6894 char sep = *input_line_pointer;
6895
6896 *input_line_pointer = '\0';
6897 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6898 scale);
6899 *input_line_pointer = sep;
6900 input_line_pointer = save;
6901 return NULL;
6902 }
6903 }
6904 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6905 {
6906 as_warn (_("scale factor of %d without an index register"),
6907 1 << i.log2_scale_factor);
6908 i.log2_scale_factor = 0;
6909 }
6910 scale = input_line_pointer;
6911 input_line_pointer = save;
6912 return scale;
6913 }
6914
6915 static int
6916 i386_displacement (char *disp_start, char *disp_end)
6917 {
6918 expressionS *exp;
6919 segT exp_seg = 0;
6920 char *save_input_line_pointer;
6921 char *gotfree_input_line;
6922 int override;
6923 i386_operand_type bigdisp, types = anydisp;
6924 int ret;
6925
6926 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6927 {
6928 as_bad (_("at most %d displacement operands are allowed"),
6929 MAX_MEMORY_OPERANDS);
6930 return 0;
6931 }
6932
6933 operand_type_set (&bigdisp, 0);
6934 if ((i.types[this_operand].bitfield.jumpabsolute)
6935 || (!current_templates->start->opcode_modifier.jump
6936 && !current_templates->start->opcode_modifier.jumpdword))
6937 {
6938 bigdisp.bitfield.disp32 = 1;
6939 override = (i.prefix[ADDR_PREFIX] != 0);
6940 if (flag_code == CODE_64BIT)
6941 {
6942 if (!override)
6943 {
6944 bigdisp.bitfield.disp32s = 1;
6945 bigdisp.bitfield.disp64 = 1;
6946 }
6947 }
6948 else if ((flag_code == CODE_16BIT) ^ override)
6949 {
6950 bigdisp.bitfield.disp32 = 0;
6951 bigdisp.bitfield.disp16 = 1;
6952 }
6953 }
6954 else
6955 {
6956 /* For PC-relative branches, the width of the displacement
6957 is dependent upon data size, not address size. */
6958 override = (i.prefix[DATA_PREFIX] != 0);
6959 if (flag_code == CODE_64BIT)
6960 {
6961 if (override || i.suffix == WORD_MNEM_SUFFIX)
6962 bigdisp.bitfield.disp16 = 1;
6963 else
6964 {
6965 bigdisp.bitfield.disp32 = 1;
6966 bigdisp.bitfield.disp32s = 1;
6967 }
6968 }
6969 else
6970 {
6971 if (!override)
6972 override = (i.suffix == (flag_code != CODE_16BIT
6973 ? WORD_MNEM_SUFFIX
6974 : LONG_MNEM_SUFFIX));
6975 bigdisp.bitfield.disp32 = 1;
6976 if ((flag_code == CODE_16BIT) ^ override)
6977 {
6978 bigdisp.bitfield.disp32 = 0;
6979 bigdisp.bitfield.disp16 = 1;
6980 }
6981 }
6982 }
6983 i.types[this_operand] = operand_type_or (i.types[this_operand],
6984 bigdisp);
6985
6986 exp = &disp_expressions[i.disp_operands];
6987 i.op[this_operand].disps = exp;
6988 i.disp_operands++;
6989 save_input_line_pointer = input_line_pointer;
6990 input_line_pointer = disp_start;
6991 END_STRING_AND_SAVE (disp_end);
6992
6993 #ifndef GCC_ASM_O_HACK
6994 #define GCC_ASM_O_HACK 0
6995 #endif
6996 #if GCC_ASM_O_HACK
6997 END_STRING_AND_SAVE (disp_end + 1);
6998 if (i.types[this_operand].bitfield.baseIndex
6999 && displacement_string_end[-1] == '+')
7000 {
7001 /* This hack is to avoid a warning when using the "o"
7002 constraint within gcc asm statements.
7003 For instance:
7004
7005 #define _set_tssldt_desc(n,addr,limit,type) \
7006 __asm__ __volatile__ ( \
7007 "movw %w2,%0\n\t" \
7008 "movw %w1,2+%0\n\t" \
7009 "rorl $16,%1\n\t" \
7010 "movb %b1,4+%0\n\t" \
7011 "movb %4,5+%0\n\t" \
7012 "movb $0,6+%0\n\t" \
7013 "movb %h1,7+%0\n\t" \
7014 "rorl $16,%1" \
7015 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7016
7017 This works great except that the output assembler ends
7018 up looking a bit weird if it turns out that there is
7019 no offset. You end up producing code that looks like:
7020
7021 #APP
7022 movw $235,(%eax)
7023 movw %dx,2+(%eax)
7024 rorl $16,%edx
7025 movb %dl,4+(%eax)
7026 movb $137,5+(%eax)
7027 movb $0,6+(%eax)
7028 movb %dh,7+(%eax)
7029 rorl $16,%edx
7030 #NO_APP
7031
7032 So here we provide the missing zero. */
7033
7034 *displacement_string_end = '0';
7035 }
7036 #endif
7037 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7038 if (gotfree_input_line)
7039 input_line_pointer = gotfree_input_line;
7040
7041 exp_seg = expression (exp);
7042
7043 SKIP_WHITESPACE ();
7044 if (*input_line_pointer)
7045 as_bad (_("junk `%s' after expression"), input_line_pointer);
7046 #if GCC_ASM_O_HACK
7047 RESTORE_END_STRING (disp_end + 1);
7048 #endif
7049 input_line_pointer = save_input_line_pointer;
7050 if (gotfree_input_line)
7051 {
7052 free (gotfree_input_line);
7053
7054 if (exp->X_op == O_constant || exp->X_op == O_register)
7055 exp->X_op = O_illegal;
7056 }
7057
7058 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7059
7060 RESTORE_END_STRING (disp_end);
7061
7062 return ret;
7063 }
7064
7065 static int
7066 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7067 i386_operand_type types, const char *disp_start)
7068 {
7069 i386_operand_type bigdisp;
7070 int ret = 1;
7071
7072 /* We do this to make sure that the section symbol is in
7073 the symbol table. We will ultimately change the relocation
7074 to be relative to the beginning of the section. */
7075 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7076 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7077 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7078 {
7079 if (exp->X_op != O_symbol)
7080 goto inv_disp;
7081
7082 if (S_IS_LOCAL (exp->X_add_symbol)
7083 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7084 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7085 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7086 exp->X_op = O_subtract;
7087 exp->X_op_symbol = GOT_symbol;
7088 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7089 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7090 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7091 i.reloc[this_operand] = BFD_RELOC_64;
7092 else
7093 i.reloc[this_operand] = BFD_RELOC_32;
7094 }
7095
7096 else if (exp->X_op == O_absent
7097 || exp->X_op == O_illegal
7098 || exp->X_op == O_big)
7099 {
7100 inv_disp:
7101 as_bad (_("missing or invalid displacement expression `%s'"),
7102 disp_start);
7103 ret = 0;
7104 }
7105
7106 else if (flag_code == CODE_64BIT
7107 && !i.prefix[ADDR_PREFIX]
7108 && exp->X_op == O_constant)
7109 {
7110 /* Since displacement is signed extended to 64bit, don't allow
7111 disp32 and turn off disp32s if they are out of range. */
7112 i.types[this_operand].bitfield.disp32 = 0;
7113 if (!fits_in_signed_long (exp->X_add_number))
7114 {
7115 i.types[this_operand].bitfield.disp32s = 0;
7116 if (i.types[this_operand].bitfield.baseindex)
7117 {
7118 as_bad (_("0x%lx out range of signed 32bit displacement"),
7119 (long) exp->X_add_number);
7120 ret = 0;
7121 }
7122 }
7123 }
7124
7125 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7126 else if (exp->X_op != O_constant
7127 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7128 && exp_seg != absolute_section
7129 && exp_seg != text_section
7130 && exp_seg != data_section
7131 && exp_seg != bss_section
7132 && exp_seg != undefined_section
7133 && !bfd_is_com_section (exp_seg))
7134 {
7135 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7136 ret = 0;
7137 }
7138 #endif
7139
7140 /* Check if this is a displacement only operand. */
7141 bigdisp = i.types[this_operand];
7142 bigdisp.bitfield.disp8 = 0;
7143 bigdisp.bitfield.disp16 = 0;
7144 bigdisp.bitfield.disp32 = 0;
7145 bigdisp.bitfield.disp32s = 0;
7146 bigdisp.bitfield.disp64 = 0;
7147 if (operand_type_all_zero (&bigdisp))
7148 i.types[this_operand] = operand_type_and (i.types[this_operand],
7149 types);
7150
7151 return ret;
7152 }
7153
7154 /* Make sure the memory operand we've been dealt is valid.
7155 Return 1 on success, 0 on a failure. */
7156
7157 static int
7158 i386_index_check (const char *operand_string)
7159 {
7160 int ok;
7161 const char *kind = "base/index";
7162 #if INFER_ADDR_PREFIX
7163 int fudged = 0;
7164
7165 tryprefix:
7166 #endif
7167 ok = 1;
7168 if (current_templates->start->opcode_modifier.isstring
7169 && !current_templates->start->opcode_modifier.immext
7170 && (current_templates->end[-1].opcode_modifier.isstring
7171 || i.mem_operands))
7172 {
7173 /* Memory operands of string insns are special in that they only allow
7174 a single register (rDI, rSI, or rBX) as their memory address. */
7175 unsigned int expected;
7176
7177 kind = "string address";
7178
7179 if (current_templates->start->opcode_modifier.w)
7180 {
7181 i386_operand_type type = current_templates->end[-1].operand_types[0];
7182
7183 if (!type.bitfield.baseindex
7184 || ((!i.mem_operands != !intel_syntax)
7185 && current_templates->end[-1].operand_types[1]
7186 .bitfield.baseindex))
7187 type = current_templates->end[-1].operand_types[1];
7188 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7189 }
7190 else
7191 expected = 3 /* rBX */;
7192
7193 if (!i.base_reg || i.index_reg
7194 || operand_type_check (i.types[this_operand], disp))
7195 ok = -1;
7196 else if (!(flag_code == CODE_64BIT
7197 ? i.prefix[ADDR_PREFIX]
7198 ? i.base_reg->reg_type.bitfield.reg32
7199 : i.base_reg->reg_type.bitfield.reg64
7200 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7201 ? i.base_reg->reg_type.bitfield.reg32
7202 : i.base_reg->reg_type.bitfield.reg16))
7203 ok = 0;
7204 else if (i.base_reg->reg_num != expected)
7205 ok = -1;
7206
7207 if (ok < 0)
7208 {
7209 unsigned int j;
7210
7211 for (j = 0; j < i386_regtab_size; ++j)
7212 if ((flag_code == CODE_64BIT
7213 ? i.prefix[ADDR_PREFIX]
7214 ? i386_regtab[j].reg_type.bitfield.reg32
7215 : i386_regtab[j].reg_type.bitfield.reg64
7216 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7217 ? i386_regtab[j].reg_type.bitfield.reg32
7218 : i386_regtab[j].reg_type.bitfield.reg16)
7219 && i386_regtab[j].reg_num == expected)
7220 break;
7221 gas_assert (j < i386_regtab_size);
7222 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7223 operand_string,
7224 intel_syntax ? '[' : '(',
7225 register_prefix,
7226 i386_regtab[j].reg_name,
7227 intel_syntax ? ']' : ')');
7228 ok = 1;
7229 }
7230 }
7231 else if (flag_code == CODE_64BIT)
7232 {
7233 if ((i.base_reg
7234 && ((i.prefix[ADDR_PREFIX] == 0
7235 && !i.base_reg->reg_type.bitfield.reg64)
7236 || (i.prefix[ADDR_PREFIX]
7237 && !i.base_reg->reg_type.bitfield.reg32))
7238 && (i.index_reg
7239 || i.base_reg->reg_num !=
7240 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7241 || (i.index_reg
7242 && !(i.index_reg->reg_type.bitfield.regxmm
7243 || i.index_reg->reg_type.bitfield.regymm)
7244 && (!i.index_reg->reg_type.bitfield.baseindex
7245 || (i.prefix[ADDR_PREFIX] == 0
7246 && i.index_reg->reg_num != RegRiz
7247 && !i.index_reg->reg_type.bitfield.reg64
7248 )
7249 || (i.prefix[ADDR_PREFIX]
7250 && i.index_reg->reg_num != RegEiz
7251 && !i.index_reg->reg_type.bitfield.reg32))))
7252 ok = 0;
7253 }
7254 else
7255 {
7256 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7257 {
7258 /* 16bit checks. */
7259 if ((i.base_reg
7260 && (!i.base_reg->reg_type.bitfield.reg16
7261 || !i.base_reg->reg_type.bitfield.baseindex))
7262 || (i.index_reg
7263 && (!i.index_reg->reg_type.bitfield.reg16
7264 || !i.index_reg->reg_type.bitfield.baseindex
7265 || !(i.base_reg
7266 && i.base_reg->reg_num < 6
7267 && i.index_reg->reg_num >= 6
7268 && i.log2_scale_factor == 0))))
7269 ok = 0;
7270 }
7271 else
7272 {
7273 /* 32bit checks. */
7274 if ((i.base_reg
7275 && !i.base_reg->reg_type.bitfield.reg32)
7276 || (i.index_reg
7277 && !i.index_reg->reg_type.bitfield.regxmm
7278 && !i.index_reg->reg_type.bitfield.regymm
7279 && ((!i.index_reg->reg_type.bitfield.reg32
7280 && i.index_reg->reg_num != RegEiz)
7281 || !i.index_reg->reg_type.bitfield.baseindex)))
7282 ok = 0;
7283 }
7284 }
7285 if (!ok)
7286 {
7287 #if INFER_ADDR_PREFIX
7288 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7289 {
7290 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7291 i.prefixes += 1;
7292 /* Change the size of any displacement too. At most one of
7293 Disp16 or Disp32 is set.
7294 FIXME. There doesn't seem to be any real need for separate
7295 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7296 Removing them would probably clean up the code quite a lot. */
7297 if (flag_code != CODE_64BIT
7298 && (i.types[this_operand].bitfield.disp16
7299 || i.types[this_operand].bitfield.disp32))
7300 i.types[this_operand]
7301 = operand_type_xor (i.types[this_operand], disp16_32);
7302 fudged = 1;
7303 goto tryprefix;
7304 }
7305 if (fudged)
7306 as_bad (_("`%s' is not a valid %s expression"),
7307 operand_string,
7308 kind);
7309 else
7310 #endif
7311 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7312 operand_string,
7313 flag_code_names[i.prefix[ADDR_PREFIX]
7314 ? flag_code == CODE_32BIT
7315 ? CODE_16BIT
7316 : CODE_32BIT
7317 : flag_code],
7318 kind);
7319 }
7320 return ok;
7321 }
7322
7323 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7324 on error. */
7325
7326 static int
7327 i386_att_operand (char *operand_string)
7328 {
7329 const reg_entry *r;
7330 char *end_op;
7331 char *op_string = operand_string;
7332
7333 if (is_space_char (*op_string))
7334 ++op_string;
7335
7336 /* We check for an absolute prefix (differentiating,
7337 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7338 if (*op_string == ABSOLUTE_PREFIX)
7339 {
7340 ++op_string;
7341 if (is_space_char (*op_string))
7342 ++op_string;
7343 i.types[this_operand].bitfield.jumpabsolute = 1;
7344 }
7345
7346 /* Check if operand is a register. */
7347 if ((r = parse_register (op_string, &end_op)) != NULL)
7348 {
7349 i386_operand_type temp;
7350
7351 /* Check for a segment override by searching for ':' after a
7352 segment register. */
7353 op_string = end_op;
7354 if (is_space_char (*op_string))
7355 ++op_string;
7356 if (*op_string == ':'
7357 && (r->reg_type.bitfield.sreg2
7358 || r->reg_type.bitfield.sreg3))
7359 {
7360 switch (r->reg_num)
7361 {
7362 case 0:
7363 i.seg[i.mem_operands] = &es;
7364 break;
7365 case 1:
7366 i.seg[i.mem_operands] = &cs;
7367 break;
7368 case 2:
7369 i.seg[i.mem_operands] = &ss;
7370 break;
7371 case 3:
7372 i.seg[i.mem_operands] = &ds;
7373 break;
7374 case 4:
7375 i.seg[i.mem_operands] = &fs;
7376 break;
7377 case 5:
7378 i.seg[i.mem_operands] = &gs;
7379 break;
7380 }
7381
7382 /* Skip the ':' and whitespace. */
7383 ++op_string;
7384 if (is_space_char (*op_string))
7385 ++op_string;
7386
7387 if (!is_digit_char (*op_string)
7388 && !is_identifier_char (*op_string)
7389 && *op_string != '('
7390 && *op_string != ABSOLUTE_PREFIX)
7391 {
7392 as_bad (_("bad memory operand `%s'"), op_string);
7393 return 0;
7394 }
7395 /* Handle case of %es:*foo. */
7396 if (*op_string == ABSOLUTE_PREFIX)
7397 {
7398 ++op_string;
7399 if (is_space_char (*op_string))
7400 ++op_string;
7401 i.types[this_operand].bitfield.jumpabsolute = 1;
7402 }
7403 goto do_memory_reference;
7404 }
7405 if (*op_string)
7406 {
7407 as_bad (_("junk `%s' after register"), op_string);
7408 return 0;
7409 }
7410 temp = r->reg_type;
7411 temp.bitfield.baseindex = 0;
7412 i.types[this_operand] = operand_type_or (i.types[this_operand],
7413 temp);
7414 i.types[this_operand].bitfield.unspecified = 0;
7415 i.op[this_operand].regs = r;
7416 i.reg_operands++;
7417 }
7418 else if (*op_string == REGISTER_PREFIX)
7419 {
7420 as_bad (_("bad register name `%s'"), op_string);
7421 return 0;
7422 }
7423 else if (*op_string == IMMEDIATE_PREFIX)
7424 {
7425 ++op_string;
7426 if (i.types[this_operand].bitfield.jumpabsolute)
7427 {
7428 as_bad (_("immediate operand illegal with absolute jump"));
7429 return 0;
7430 }
7431 if (!i386_immediate (op_string))
7432 return 0;
7433 }
7434 else if (is_digit_char (*op_string)
7435 || is_identifier_char (*op_string)
7436 || *op_string == '(')
7437 {
7438 /* This is a memory reference of some sort. */
7439 char *base_string;
7440
7441 /* Start and end of displacement string expression (if found). */
7442 char *displacement_string_start;
7443 char *displacement_string_end;
7444
7445 do_memory_reference:
7446 if ((i.mem_operands == 1
7447 && !current_templates->start->opcode_modifier.isstring)
7448 || i.mem_operands == 2)
7449 {
7450 as_bad (_("too many memory references for `%s'"),
7451 current_templates->start->name);
7452 return 0;
7453 }
7454
7455 /* Check for base index form. We detect the base index form by
7456 looking for an ')' at the end of the operand, searching
7457 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7458 after the '('. */
7459 base_string = op_string + strlen (op_string);
7460
7461 --base_string;
7462 if (is_space_char (*base_string))
7463 --base_string;
7464
7465 /* If we only have a displacement, set-up for it to be parsed later. */
7466 displacement_string_start = op_string;
7467 displacement_string_end = base_string + 1;
7468
7469 if (*base_string == ')')
7470 {
7471 char *temp_string;
7472 unsigned int parens_balanced = 1;
7473 /* We've already checked that the number of left & right ()'s are
7474 equal, so this loop will not be infinite. */
7475 do
7476 {
7477 base_string--;
7478 if (*base_string == ')')
7479 parens_balanced++;
7480 if (*base_string == '(')
7481 parens_balanced--;
7482 }
7483 while (parens_balanced);
7484
7485 temp_string = base_string;
7486
7487 /* Skip past '(' and whitespace. */
7488 ++base_string;
7489 if (is_space_char (*base_string))
7490 ++base_string;
7491
7492 if (*base_string == ','
7493 || ((i.base_reg = parse_register (base_string, &end_op))
7494 != NULL))
7495 {
7496 displacement_string_end = temp_string;
7497
7498 i.types[this_operand].bitfield.baseindex = 1;
7499
7500 if (i.base_reg)
7501 {
7502 base_string = end_op;
7503 if (is_space_char (*base_string))
7504 ++base_string;
7505 }
7506
7507 /* There may be an index reg or scale factor here. */
7508 if (*base_string == ',')
7509 {
7510 ++base_string;
7511 if (is_space_char (*base_string))
7512 ++base_string;
7513
7514 if ((i.index_reg = parse_register (base_string, &end_op))
7515 != NULL)
7516 {
7517 base_string = end_op;
7518 if (is_space_char (*base_string))
7519 ++base_string;
7520 if (*base_string == ',')
7521 {
7522 ++base_string;
7523 if (is_space_char (*base_string))
7524 ++base_string;
7525 }
7526 else if (*base_string != ')')
7527 {
7528 as_bad (_("expecting `,' or `)' "
7529 "after index register in `%s'"),
7530 operand_string);
7531 return 0;
7532 }
7533 }
7534 else if (*base_string == REGISTER_PREFIX)
7535 {
7536 as_bad (_("bad register name `%s'"), base_string);
7537 return 0;
7538 }
7539
7540 /* Check for scale factor. */
7541 if (*base_string != ')')
7542 {
7543 char *end_scale = i386_scale (base_string);
7544
7545 if (!end_scale)
7546 return 0;
7547
7548 base_string = end_scale;
7549 if (is_space_char (*base_string))
7550 ++base_string;
7551 if (*base_string != ')')
7552 {
7553 as_bad (_("expecting `)' "
7554 "after scale factor in `%s'"),
7555 operand_string);
7556 return 0;
7557 }
7558 }
7559 else if (!i.index_reg)
7560 {
7561 as_bad (_("expecting index register or scale factor "
7562 "after `,'; got '%c'"),
7563 *base_string);
7564 return 0;
7565 }
7566 }
7567 else if (*base_string != ')')
7568 {
7569 as_bad (_("expecting `,' or `)' "
7570 "after base register in `%s'"),
7571 operand_string);
7572 return 0;
7573 }
7574 }
7575 else if (*base_string == REGISTER_PREFIX)
7576 {
7577 as_bad (_("bad register name `%s'"), base_string);
7578 return 0;
7579 }
7580 }
7581
7582 /* If there's an expression beginning the operand, parse it,
7583 assuming displacement_string_start and
7584 displacement_string_end are meaningful. */
7585 if (displacement_string_start != displacement_string_end)
7586 {
7587 if (!i386_displacement (displacement_string_start,
7588 displacement_string_end))
7589 return 0;
7590 }
7591
7592 /* Special case for (%dx) while doing input/output op. */
7593 if (i.base_reg
7594 && operand_type_equal (&i.base_reg->reg_type,
7595 &reg16_inoutportreg)
7596 && i.index_reg == 0
7597 && i.log2_scale_factor == 0
7598 && i.seg[i.mem_operands] == 0
7599 && !operand_type_check (i.types[this_operand], disp))
7600 {
7601 i.types[this_operand] = inoutportreg;
7602 return 1;
7603 }
7604
7605 if (i386_index_check (operand_string) == 0)
7606 return 0;
7607 i.types[this_operand].bitfield.mem = 1;
7608 i.mem_operands++;
7609 }
7610 else
7611 {
7612 /* It's not a memory operand; argh! */
7613 as_bad (_("invalid char %s beginning operand %d `%s'"),
7614 output_invalid (*op_string),
7615 this_operand + 1,
7616 op_string);
7617 return 0;
7618 }
7619 return 1; /* Normal return. */
7620 }
7621 \f
7622 /* md_estimate_size_before_relax()
7623
7624 Called just before relax() for rs_machine_dependent frags. The x86
7625 assembler uses these frags to handle variable size jump
7626 instructions.
7627
7628 Any symbol that is now undefined will not become defined.
7629 Return the correct fr_subtype in the frag.
7630 Return the initial "guess for variable size of frag" to caller.
7631 The guess is actually the growth beyond the fixed part. Whatever
7632 we do to grow the fixed or variable part contributes to our
7633 returned value. */
7634
7635 int
7636 md_estimate_size_before_relax (fragS *fragP, segT segment)
7637 {
7638 /* We've already got fragP->fr_subtype right; all we have to do is
7639 check for un-relaxable symbols. On an ELF system, we can't relax
7640 an externally visible symbol, because it may be overridden by a
7641 shared library. */
7642 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7643 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7644 || (IS_ELF
7645 && (S_IS_EXTERNAL (fragP->fr_symbol)
7646 || S_IS_WEAK (fragP->fr_symbol)
7647 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7648 & BSF_GNU_INDIRECT_FUNCTION))))
7649 #endif
7650 #if defined (OBJ_COFF) && defined (TE_PE)
7651 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7652 && S_IS_WEAK (fragP->fr_symbol))
7653 #endif
7654 )
7655 {
7656 /* Symbol is undefined in this segment, or we need to keep a
7657 reloc so that weak symbols can be overridden. */
7658 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7659 enum bfd_reloc_code_real reloc_type;
7660 unsigned char *opcode;
7661 int old_fr_fix;
7662
7663 if (fragP->fr_var != NO_RELOC)
7664 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7665 else if (size == 2)
7666 reloc_type = BFD_RELOC_16_PCREL;
7667 else
7668 reloc_type = BFD_RELOC_32_PCREL;
7669
7670 old_fr_fix = fragP->fr_fix;
7671 opcode = (unsigned char *) fragP->fr_opcode;
7672
7673 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7674 {
7675 case UNCOND_JUMP:
7676 /* Make jmp (0xeb) a (d)word displacement jump. */
7677 opcode[0] = 0xe9;
7678 fragP->fr_fix += size;
7679 fix_new (fragP, old_fr_fix, size,
7680 fragP->fr_symbol,
7681 fragP->fr_offset, 1,
7682 reloc_type);
7683 break;
7684
7685 case COND_JUMP86:
7686 if (size == 2
7687 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7688 {
7689 /* Negate the condition, and branch past an
7690 unconditional jump. */
7691 opcode[0] ^= 1;
7692 opcode[1] = 3;
7693 /* Insert an unconditional jump. */
7694 opcode[2] = 0xe9;
7695 /* We added two extra opcode bytes, and have a two byte
7696 offset. */
7697 fragP->fr_fix += 2 + 2;
7698 fix_new (fragP, old_fr_fix + 2, 2,
7699 fragP->fr_symbol,
7700 fragP->fr_offset, 1,
7701 reloc_type);
7702 break;
7703 }
7704 /* Fall through. */
7705
7706 case COND_JUMP:
7707 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7708 {
7709 fixS *fixP;
7710
7711 fragP->fr_fix += 1;
7712 fixP = fix_new (fragP, old_fr_fix, 1,
7713 fragP->fr_symbol,
7714 fragP->fr_offset, 1,
7715 BFD_RELOC_8_PCREL);
7716 fixP->fx_signed = 1;
7717 break;
7718 }
7719
7720 /* This changes the byte-displacement jump 0x7N
7721 to the (d)word-displacement jump 0x0f,0x8N. */
7722 opcode[1] = opcode[0] + 0x10;
7723 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7724 /* We've added an opcode byte. */
7725 fragP->fr_fix += 1 + size;
7726 fix_new (fragP, old_fr_fix + 1, size,
7727 fragP->fr_symbol,
7728 fragP->fr_offset, 1,
7729 reloc_type);
7730 break;
7731
7732 default:
7733 BAD_CASE (fragP->fr_subtype);
7734 break;
7735 }
7736 frag_wane (fragP);
7737 return fragP->fr_fix - old_fr_fix;
7738 }
7739
7740 /* Guess size depending on current relax state. Initially the relax
7741 state will correspond to a short jump and we return 1, because
7742 the variable part of the frag (the branch offset) is one byte
7743 long. However, we can relax a section more than once and in that
7744 case we must either set fr_subtype back to the unrelaxed state,
7745 or return the value for the appropriate branch. */
7746 return md_relax_table[fragP->fr_subtype].rlx_length;
7747 }
7748
7749 /* Called after relax() is finished.
7750
7751 In: Address of frag.
7752 fr_type == rs_machine_dependent.
7753 fr_subtype is what the address relaxed to.
7754
7755 Out: Any fixSs and constants are set up.
7756 Caller will turn frag into a ".space 0". */
7757
7758 void
7759 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7760 fragS *fragP)
7761 {
7762 unsigned char *opcode;
7763 unsigned char *where_to_put_displacement = NULL;
7764 offsetT target_address;
7765 offsetT opcode_address;
7766 unsigned int extension = 0;
7767 offsetT displacement_from_opcode_start;
7768
7769 opcode = (unsigned char *) fragP->fr_opcode;
7770
7771 /* Address we want to reach in file space. */
7772 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7773
7774 /* Address opcode resides at in file space. */
7775 opcode_address = fragP->fr_address + fragP->fr_fix;
7776
7777 /* Displacement from opcode start to fill into instruction. */
7778 displacement_from_opcode_start = target_address - opcode_address;
7779
7780 if ((fragP->fr_subtype & BIG) == 0)
7781 {
7782 /* Don't have to change opcode. */
7783 extension = 1; /* 1 opcode + 1 displacement */
7784 where_to_put_displacement = &opcode[1];
7785 }
7786 else
7787 {
7788 if (no_cond_jump_promotion
7789 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7790 as_warn_where (fragP->fr_file, fragP->fr_line,
7791 _("long jump required"));
7792
7793 switch (fragP->fr_subtype)
7794 {
7795 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7796 extension = 4; /* 1 opcode + 4 displacement */
7797 opcode[0] = 0xe9;
7798 where_to_put_displacement = &opcode[1];
7799 break;
7800
7801 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7802 extension = 2; /* 1 opcode + 2 displacement */
7803 opcode[0] = 0xe9;
7804 where_to_put_displacement = &opcode[1];
7805 break;
7806
7807 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7808 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7809 extension = 5; /* 2 opcode + 4 displacement */
7810 opcode[1] = opcode[0] + 0x10;
7811 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7812 where_to_put_displacement = &opcode[2];
7813 break;
7814
7815 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7816 extension = 3; /* 2 opcode + 2 displacement */
7817 opcode[1] = opcode[0] + 0x10;
7818 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7819 where_to_put_displacement = &opcode[2];
7820 break;
7821
7822 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7823 extension = 4;
7824 opcode[0] ^= 1;
7825 opcode[1] = 3;
7826 opcode[2] = 0xe9;
7827 where_to_put_displacement = &opcode[3];
7828 break;
7829
7830 default:
7831 BAD_CASE (fragP->fr_subtype);
7832 break;
7833 }
7834 }
7835
7836 /* If size if less then four we are sure that the operand fits,
7837 but if it's 4, then it could be that the displacement is larger
7838 then -/+ 2GB. */
7839 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7840 && object_64bit
7841 && ((addressT) (displacement_from_opcode_start - extension
7842 + ((addressT) 1 << 31))
7843 > (((addressT) 2 << 31) - 1)))
7844 {
7845 as_bad_where (fragP->fr_file, fragP->fr_line,
7846 _("jump target out of range"));
7847 /* Make us emit 0. */
7848 displacement_from_opcode_start = extension;
7849 }
7850 /* Now put displacement after opcode. */
7851 md_number_to_chars ((char *) where_to_put_displacement,
7852 (valueT) (displacement_from_opcode_start - extension),
7853 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7854 fragP->fr_fix += extension;
7855 }
7856 \f
7857 /* Apply a fixup (fixP) to segment data, once it has been determined
7858 by our caller that we have all the info we need to fix it up.
7859
7860 Parameter valP is the pointer to the value of the bits.
7861
7862 On the 386, immediates, displacements, and data pointers are all in
7863 the same (little-endian) format, so we don't need to care about which
7864 we are handling. */
7865
7866 void
7867 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7868 {
7869 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7870 valueT value = *valP;
7871
7872 #if !defined (TE_Mach)
7873 if (fixP->fx_pcrel)
7874 {
7875 switch (fixP->fx_r_type)
7876 {
7877 default:
7878 break;
7879
7880 case BFD_RELOC_64:
7881 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7882 break;
7883 case BFD_RELOC_32:
7884 case BFD_RELOC_X86_64_32S:
7885 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7886 break;
7887 case BFD_RELOC_16:
7888 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7889 break;
7890 case BFD_RELOC_8:
7891 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7892 break;
7893 }
7894 }
7895
7896 if (fixP->fx_addsy != NULL
7897 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7898 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7899 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7900 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7901 && !use_rela_relocations)
7902 {
7903 /* This is a hack. There should be a better way to handle this.
7904 This covers for the fact that bfd_install_relocation will
7905 subtract the current location (for partial_inplace, PC relative
7906 relocations); see more below. */
7907 #ifndef OBJ_AOUT
7908 if (IS_ELF
7909 #ifdef TE_PE
7910 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7911 #endif
7912 )
7913 value += fixP->fx_where + fixP->fx_frag->fr_address;
7914 #endif
7915 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7916 if (IS_ELF)
7917 {
7918 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7919
7920 if ((sym_seg == seg
7921 || (symbol_section_p (fixP->fx_addsy)
7922 && sym_seg != absolute_section))
7923 && !generic_force_reloc (fixP))
7924 {
7925 /* Yes, we add the values in twice. This is because
7926 bfd_install_relocation subtracts them out again. I think
7927 bfd_install_relocation is broken, but I don't dare change
7928 it. FIXME. */
7929 value += fixP->fx_where + fixP->fx_frag->fr_address;
7930 }
7931 }
7932 #endif
7933 #if defined (OBJ_COFF) && defined (TE_PE)
7934 /* For some reason, the PE format does not store a
7935 section address offset for a PC relative symbol. */
7936 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7937 || S_IS_WEAK (fixP->fx_addsy))
7938 value += md_pcrel_from (fixP);
7939 #endif
7940 }
7941 #if defined (OBJ_COFF) && defined (TE_PE)
7942 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7943 {
7944 value -= S_GET_VALUE (fixP->fx_addsy);
7945 }
7946 #endif
7947
7948 /* Fix a few things - the dynamic linker expects certain values here,
7949 and we must not disappoint it. */
7950 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7951 if (IS_ELF && fixP->fx_addsy)
7952 switch (fixP->fx_r_type)
7953 {
7954 case BFD_RELOC_386_PLT32:
7955 case BFD_RELOC_X86_64_PLT32:
7956 /* Make the jump instruction point to the address of the operand. At
7957 runtime we merely add the offset to the actual PLT entry. */
7958 value = -4;
7959 break;
7960
7961 case BFD_RELOC_386_TLS_GD:
7962 case BFD_RELOC_386_TLS_LDM:
7963 case BFD_RELOC_386_TLS_IE_32:
7964 case BFD_RELOC_386_TLS_IE:
7965 case BFD_RELOC_386_TLS_GOTIE:
7966 case BFD_RELOC_386_TLS_GOTDESC:
7967 case BFD_RELOC_X86_64_TLSGD:
7968 case BFD_RELOC_X86_64_TLSLD:
7969 case BFD_RELOC_X86_64_GOTTPOFF:
7970 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7971 value = 0; /* Fully resolved at runtime. No addend. */
7972 /* Fallthrough */
7973 case BFD_RELOC_386_TLS_LE:
7974 case BFD_RELOC_386_TLS_LDO_32:
7975 case BFD_RELOC_386_TLS_LE_32:
7976 case BFD_RELOC_X86_64_DTPOFF32:
7977 case BFD_RELOC_X86_64_DTPOFF64:
7978 case BFD_RELOC_X86_64_TPOFF32:
7979 case BFD_RELOC_X86_64_TPOFF64:
7980 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7981 break;
7982
7983 case BFD_RELOC_386_TLS_DESC_CALL:
7984 case BFD_RELOC_X86_64_TLSDESC_CALL:
7985 value = 0; /* Fully resolved at runtime. No addend. */
7986 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7987 fixP->fx_done = 0;
7988 return;
7989
7990 case BFD_RELOC_386_GOT32:
7991 case BFD_RELOC_X86_64_GOT32:
7992 value = 0; /* Fully resolved at runtime. No addend. */
7993 break;
7994
7995 case BFD_RELOC_VTABLE_INHERIT:
7996 case BFD_RELOC_VTABLE_ENTRY:
7997 fixP->fx_done = 0;
7998 return;
7999
8000 default:
8001 break;
8002 }
8003 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8004 *valP = value;
8005 #endif /* !defined (TE_Mach) */
8006
8007 /* Are we finished with this relocation now? */
8008 if (fixP->fx_addsy == NULL)
8009 fixP->fx_done = 1;
8010 #if defined (OBJ_COFF) && defined (TE_PE)
8011 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8012 {
8013 fixP->fx_done = 0;
8014 /* Remember value for tc_gen_reloc. */
8015 fixP->fx_addnumber = value;
8016 /* Clear out the frag for now. */
8017 value = 0;
8018 }
8019 #endif
8020 else if (use_rela_relocations)
8021 {
8022 fixP->fx_no_overflow = 1;
8023 /* Remember value for tc_gen_reloc. */
8024 fixP->fx_addnumber = value;
8025 value = 0;
8026 }
8027
8028 md_number_to_chars (p, value, fixP->fx_size);
8029 }
8030 \f
8031 char *
8032 md_atof (int type, char *litP, int *sizeP)
8033 {
8034 /* This outputs the LITTLENUMs in REVERSE order;
8035 in accord with the bigendian 386. */
8036 return ieee_md_atof (type, litP, sizeP, FALSE);
8037 }
8038 \f
8039 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8040
8041 static char *
8042 output_invalid (int c)
8043 {
8044 if (ISPRINT (c))
8045 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8046 "'%c'", c);
8047 else
8048 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8049 "(0x%x)", (unsigned char) c);
8050 return output_invalid_buf;
8051 }
8052
8053 /* REG_STRING starts *before* REGISTER_PREFIX. */
8054
8055 static const reg_entry *
8056 parse_real_register (char *reg_string, char **end_op)
8057 {
8058 char *s = reg_string;
8059 char *p;
8060 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8061 const reg_entry *r;
8062
8063 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8064 if (*s == REGISTER_PREFIX)
8065 ++s;
8066
8067 if (is_space_char (*s))
8068 ++s;
8069
8070 p = reg_name_given;
8071 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8072 {
8073 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8074 return (const reg_entry *) NULL;
8075 s++;
8076 }
8077
8078 /* For naked regs, make sure that we are not dealing with an identifier.
8079 This prevents confusing an identifier like `eax_var' with register
8080 `eax'. */
8081 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8082 return (const reg_entry *) NULL;
8083
8084 *end_op = s;
8085
8086 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8087
8088 /* Handle floating point regs, allowing spaces in the (i) part. */
8089 if (r == i386_regtab /* %st is first entry of table */)
8090 {
8091 if (is_space_char (*s))
8092 ++s;
8093 if (*s == '(')
8094 {
8095 ++s;
8096 if (is_space_char (*s))
8097 ++s;
8098 if (*s >= '0' && *s <= '7')
8099 {
8100 int fpr = *s - '0';
8101 ++s;
8102 if (is_space_char (*s))
8103 ++s;
8104 if (*s == ')')
8105 {
8106 *end_op = s + 1;
8107 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8108 know (r);
8109 return r + fpr;
8110 }
8111 }
8112 /* We have "%st(" then garbage. */
8113 return (const reg_entry *) NULL;
8114 }
8115 }
8116
8117 if (r == NULL || allow_pseudo_reg)
8118 return r;
8119
8120 if (operand_type_all_zero (&r->reg_type))
8121 return (const reg_entry *) NULL;
8122
8123 if ((r->reg_type.bitfield.reg32
8124 || r->reg_type.bitfield.sreg3
8125 || r->reg_type.bitfield.control
8126 || r->reg_type.bitfield.debug
8127 || r->reg_type.bitfield.test)
8128 && !cpu_arch_flags.bitfield.cpui386)
8129 return (const reg_entry *) NULL;
8130
8131 if (r->reg_type.bitfield.floatreg
8132 && !cpu_arch_flags.bitfield.cpu8087
8133 && !cpu_arch_flags.bitfield.cpu287
8134 && !cpu_arch_flags.bitfield.cpu387)
8135 return (const reg_entry *) NULL;
8136
8137 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8138 return (const reg_entry *) NULL;
8139
8140 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8141 return (const reg_entry *) NULL;
8142
8143 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8144 return (const reg_entry *) NULL;
8145
8146 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8147 if (!allow_index_reg
8148 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8149 return (const reg_entry *) NULL;
8150
8151 if (((r->reg_flags & (RegRex64 | RegRex))
8152 || r->reg_type.bitfield.reg64)
8153 && (!cpu_arch_flags.bitfield.cpulm
8154 || !operand_type_equal (&r->reg_type, &control))
8155 && flag_code != CODE_64BIT)
8156 return (const reg_entry *) NULL;
8157
8158 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8159 return (const reg_entry *) NULL;
8160
8161 return r;
8162 }
8163
8164 /* REG_STRING starts *before* REGISTER_PREFIX. */
8165
8166 static const reg_entry *
8167 parse_register (char *reg_string, char **end_op)
8168 {
8169 const reg_entry *r;
8170
8171 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8172 r = parse_real_register (reg_string, end_op);
8173 else
8174 r = NULL;
8175 if (!r)
8176 {
8177 char *save = input_line_pointer;
8178 char c;
8179 symbolS *symbolP;
8180
8181 input_line_pointer = reg_string;
8182 c = get_symbol_end ();
8183 symbolP = symbol_find (reg_string);
8184 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8185 {
8186 const expressionS *e = symbol_get_value_expression (symbolP);
8187
8188 know (e->X_op == O_register);
8189 know (e->X_add_number >= 0
8190 && (valueT) e->X_add_number < i386_regtab_size);
8191 r = i386_regtab + e->X_add_number;
8192 *end_op = input_line_pointer;
8193 }
8194 *input_line_pointer = c;
8195 input_line_pointer = save;
8196 }
8197 return r;
8198 }
8199
8200 int
8201 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8202 {
8203 const reg_entry *r;
8204 char *end = input_line_pointer;
8205
8206 *end = *nextcharP;
8207 r = parse_register (name, &input_line_pointer);
8208 if (r && end <= input_line_pointer)
8209 {
8210 *nextcharP = *input_line_pointer;
8211 *input_line_pointer = 0;
8212 e->X_op = O_register;
8213 e->X_add_number = r - i386_regtab;
8214 return 1;
8215 }
8216 input_line_pointer = end;
8217 *end = 0;
8218 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8219 }
8220
8221 void
8222 md_operand (expressionS *e)
8223 {
8224 char *end;
8225 const reg_entry *r;
8226
8227 switch (*input_line_pointer)
8228 {
8229 case REGISTER_PREFIX:
8230 r = parse_real_register (input_line_pointer, &end);
8231 if (r)
8232 {
8233 e->X_op = O_register;
8234 e->X_add_number = r - i386_regtab;
8235 input_line_pointer = end;
8236 }
8237 break;
8238
8239 case '[':
8240 gas_assert (intel_syntax);
8241 end = input_line_pointer++;
8242 expression (e);
8243 if (*input_line_pointer == ']')
8244 {
8245 ++input_line_pointer;
8246 e->X_op_symbol = make_expr_symbol (e);
8247 e->X_add_symbol = NULL;
8248 e->X_add_number = 0;
8249 e->X_op = O_index;
8250 }
8251 else
8252 {
8253 e->X_op = O_absent;
8254 input_line_pointer = end;
8255 }
8256 break;
8257 }
8258 }
8259
8260 \f
8261 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8262 const char *md_shortopts = "kVQ:sqn";
8263 #else
8264 const char *md_shortopts = "qn";
8265 #endif
8266
8267 #define OPTION_32 (OPTION_MD_BASE + 0)
8268 #define OPTION_64 (OPTION_MD_BASE + 1)
8269 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8270 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8271 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8272 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8273 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8274 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8275 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8276 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8277 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8278 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8279 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8280 #define OPTION_X32 (OPTION_MD_BASE + 13)
8281
8282 struct option md_longopts[] =
8283 {
8284 {"32", no_argument, NULL, OPTION_32},
8285 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8286 || defined (TE_PE) || defined (TE_PEP))
8287 {"64", no_argument, NULL, OPTION_64},
8288 #endif
8289 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8290 {"x32", no_argument, NULL, OPTION_X32},
8291 #endif
8292 {"divide", no_argument, NULL, OPTION_DIVIDE},
8293 {"march", required_argument, NULL, OPTION_MARCH},
8294 {"mtune", required_argument, NULL, OPTION_MTUNE},
8295 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8296 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8297 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8298 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8299 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8300 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8301 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8302 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8303 {NULL, no_argument, NULL, 0}
8304 };
8305 size_t md_longopts_size = sizeof (md_longopts);
8306
8307 int
8308 md_parse_option (int c, char *arg)
8309 {
8310 unsigned int j;
8311 char *arch, *next;
8312
8313 switch (c)
8314 {
8315 case 'n':
8316 optimize_align_code = 0;
8317 break;
8318
8319 case 'q':
8320 quiet_warnings = 1;
8321 break;
8322
8323 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8324 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8325 should be emitted or not. FIXME: Not implemented. */
8326 case 'Q':
8327 break;
8328
8329 /* -V: SVR4 argument to print version ID. */
8330 case 'V':
8331 print_version_id ();
8332 break;
8333
8334 /* -k: Ignore for FreeBSD compatibility. */
8335 case 'k':
8336 break;
8337
8338 case 's':
8339 /* -s: On i386 Solaris, this tells the native assembler to use
8340 .stab instead of .stab.excl. We always use .stab anyhow. */
8341 break;
8342 #endif
8343 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8344 || defined (TE_PE) || defined (TE_PEP))
8345 case OPTION_64:
8346 {
8347 const char **list, **l;
8348
8349 list = bfd_target_list ();
8350 for (l = list; *l != NULL; l++)
8351 if (CONST_STRNEQ (*l, "elf64-x86-64")
8352 || strcmp (*l, "coff-x86-64") == 0
8353 || strcmp (*l, "pe-x86-64") == 0
8354 || strcmp (*l, "pei-x86-64") == 0)
8355 {
8356 default_arch = "x86_64";
8357 break;
8358 }
8359 if (*l == NULL)
8360 as_fatal (_("no compiled in support for x86_64"));
8361 free (list);
8362 }
8363 break;
8364 #endif
8365
8366 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8367 case OPTION_X32:
8368 if (IS_ELF)
8369 {
8370 const char **list, **l;
8371
8372 list = bfd_target_list ();
8373 for (l = list; *l != NULL; l++)
8374 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8375 {
8376 default_arch = "x86_64:32";
8377 break;
8378 }
8379 if (*l == NULL)
8380 as_fatal (_("no compiled in support for 32bit x86_64"));
8381 free (list);
8382 }
8383 else
8384 as_fatal (_("32bit x86_64 is only supported for ELF"));
8385 break;
8386 #endif
8387
8388 case OPTION_32:
8389 default_arch = "i386";
8390 break;
8391
8392 case OPTION_DIVIDE:
8393 #ifdef SVR4_COMMENT_CHARS
8394 {
8395 char *n, *t;
8396 const char *s;
8397
8398 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8399 t = n;
8400 for (s = i386_comment_chars; *s != '\0'; s++)
8401 if (*s != '/')
8402 *t++ = *s;
8403 *t = '\0';
8404 i386_comment_chars = n;
8405 }
8406 #endif
8407 break;
8408
8409 case OPTION_MARCH:
8410 arch = xstrdup (arg);
8411 do
8412 {
8413 if (*arch == '.')
8414 as_fatal (_("invalid -march= option: `%s'"), arg);
8415 next = strchr (arch, '+');
8416 if (next)
8417 *next++ = '\0';
8418 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8419 {
8420 if (strcmp (arch, cpu_arch [j].name) == 0)
8421 {
8422 /* Processor. */
8423 if (! cpu_arch[j].flags.bitfield.cpui386)
8424 continue;
8425
8426 cpu_arch_name = cpu_arch[j].name;
8427 cpu_sub_arch_name = NULL;
8428 cpu_arch_flags = cpu_arch[j].flags;
8429 cpu_arch_isa = cpu_arch[j].type;
8430 cpu_arch_isa_flags = cpu_arch[j].flags;
8431 if (!cpu_arch_tune_set)
8432 {
8433 cpu_arch_tune = cpu_arch_isa;
8434 cpu_arch_tune_flags = cpu_arch_isa_flags;
8435 }
8436 break;
8437 }
8438 else if (*cpu_arch [j].name == '.'
8439 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8440 {
8441 /* ISA entension. */
8442 i386_cpu_flags flags;
8443
8444 if (!cpu_arch[j].negated)
8445 flags = cpu_flags_or (cpu_arch_flags,
8446 cpu_arch[j].flags);
8447 else
8448 flags = cpu_flags_and_not (cpu_arch_flags,
8449 cpu_arch[j].flags);
8450 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8451 {
8452 if (cpu_sub_arch_name)
8453 {
8454 char *name = cpu_sub_arch_name;
8455 cpu_sub_arch_name = concat (name,
8456 cpu_arch[j].name,
8457 (const char *) NULL);
8458 free (name);
8459 }
8460 else
8461 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8462 cpu_arch_flags = flags;
8463 cpu_arch_isa_flags = flags;
8464 }
8465 break;
8466 }
8467 }
8468
8469 if (j >= ARRAY_SIZE (cpu_arch))
8470 as_fatal (_("invalid -march= option: `%s'"), arg);
8471
8472 arch = next;
8473 }
8474 while (next != NULL );
8475 break;
8476
8477 case OPTION_MTUNE:
8478 if (*arg == '.')
8479 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8480 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8481 {
8482 if (strcmp (arg, cpu_arch [j].name) == 0)
8483 {
8484 cpu_arch_tune_set = 1;
8485 cpu_arch_tune = cpu_arch [j].type;
8486 cpu_arch_tune_flags = cpu_arch[j].flags;
8487 break;
8488 }
8489 }
8490 if (j >= ARRAY_SIZE (cpu_arch))
8491 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8492 break;
8493
8494 case OPTION_MMNEMONIC:
8495 if (strcasecmp (arg, "att") == 0)
8496 intel_mnemonic = 0;
8497 else if (strcasecmp (arg, "intel") == 0)
8498 intel_mnemonic = 1;
8499 else
8500 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8501 break;
8502
8503 case OPTION_MSYNTAX:
8504 if (strcasecmp (arg, "att") == 0)
8505 intel_syntax = 0;
8506 else if (strcasecmp (arg, "intel") == 0)
8507 intel_syntax = 1;
8508 else
8509 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8510 break;
8511
8512 case OPTION_MINDEX_REG:
8513 allow_index_reg = 1;
8514 break;
8515
8516 case OPTION_MNAKED_REG:
8517 allow_naked_reg = 1;
8518 break;
8519
8520 case OPTION_MOLD_GCC:
8521 old_gcc = 1;
8522 break;
8523
8524 case OPTION_MSSE2AVX:
8525 sse2avx = 1;
8526 break;
8527
8528 case OPTION_MSSE_CHECK:
8529 if (strcasecmp (arg, "error") == 0)
8530 sse_check = sse_check_error;
8531 else if (strcasecmp (arg, "warning") == 0)
8532 sse_check = sse_check_warning;
8533 else if (strcasecmp (arg, "none") == 0)
8534 sse_check = sse_check_none;
8535 else
8536 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8537 break;
8538
8539 case OPTION_MAVXSCALAR:
8540 if (strcasecmp (arg, "128") == 0)
8541 avxscalar = vex128;
8542 else if (strcasecmp (arg, "256") == 0)
8543 avxscalar = vex256;
8544 else
8545 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8546 break;
8547
8548 default:
8549 return 0;
8550 }
8551 return 1;
8552 }
8553
8554 #define MESSAGE_TEMPLATE \
8555 " "
8556
8557 static void
8558 show_arch (FILE *stream, int ext, int check)
8559 {
8560 static char message[] = MESSAGE_TEMPLATE;
8561 char *start = message + 27;
8562 char *p;
8563 int size = sizeof (MESSAGE_TEMPLATE);
8564 int left;
8565 const char *name;
8566 int len;
8567 unsigned int j;
8568
8569 p = start;
8570 left = size - (start - message);
8571 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8572 {
8573 /* Should it be skipped? */
8574 if (cpu_arch [j].skip)
8575 continue;
8576
8577 name = cpu_arch [j].name;
8578 len = cpu_arch [j].len;
8579 if (*name == '.')
8580 {
8581 /* It is an extension. Skip if we aren't asked to show it. */
8582 if (ext)
8583 {
8584 name++;
8585 len--;
8586 }
8587 else
8588 continue;
8589 }
8590 else if (ext)
8591 {
8592 /* It is an processor. Skip if we show only extension. */
8593 continue;
8594 }
8595 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8596 {
8597 /* It is an impossible processor - skip. */
8598 continue;
8599 }
8600
8601 /* Reserve 2 spaces for ", " or ",\0" */
8602 left -= len + 2;
8603
8604 /* Check if there is any room. */
8605 if (left >= 0)
8606 {
8607 if (p != start)
8608 {
8609 *p++ = ',';
8610 *p++ = ' ';
8611 }
8612 p = mempcpy (p, name, len);
8613 }
8614 else
8615 {
8616 /* Output the current message now and start a new one. */
8617 *p++ = ',';
8618 *p = '\0';
8619 fprintf (stream, "%s\n", message);
8620 p = start;
8621 left = size - (start - message) - len - 2;
8622
8623 gas_assert (left >= 0);
8624
8625 p = mempcpy (p, name, len);
8626 }
8627 }
8628
8629 *p = '\0';
8630 fprintf (stream, "%s\n", message);
8631 }
8632
8633 void
8634 md_show_usage (FILE *stream)
8635 {
8636 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8637 fprintf (stream, _("\
8638 -Q ignored\n\
8639 -V print assembler version number\n\
8640 -k ignored\n"));
8641 #endif
8642 fprintf (stream, _("\
8643 -n Do not optimize code alignment\n\
8644 -q quieten some warnings\n"));
8645 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8646 fprintf (stream, _("\
8647 -s ignored\n"));
8648 #endif
8649 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8650 || defined (TE_PE) || defined (TE_PEP))
8651 fprintf (stream, _("\
8652 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8653 #endif
8654 #ifdef SVR4_COMMENT_CHARS
8655 fprintf (stream, _("\
8656 --divide do not treat `/' as a comment character\n"));
8657 #else
8658 fprintf (stream, _("\
8659 --divide ignored\n"));
8660 #endif
8661 fprintf (stream, _("\
8662 -march=CPU[,+EXTENSION...]\n\
8663 generate code for CPU and EXTENSION, CPU is one of:\n"));
8664 show_arch (stream, 0, 1);
8665 fprintf (stream, _("\
8666 EXTENSION is combination of:\n"));
8667 show_arch (stream, 1, 0);
8668 fprintf (stream, _("\
8669 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8670 show_arch (stream, 0, 0);
8671 fprintf (stream, _("\
8672 -msse2avx encode SSE instructions with VEX prefix\n"));
8673 fprintf (stream, _("\
8674 -msse-check=[none|error|warning]\n\
8675 check SSE instructions\n"));
8676 fprintf (stream, _("\
8677 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8678 length\n"));
8679 fprintf (stream, _("\
8680 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8681 fprintf (stream, _("\
8682 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8683 fprintf (stream, _("\
8684 -mindex-reg support pseudo index registers\n"));
8685 fprintf (stream, _("\
8686 -mnaked-reg don't require `%%' prefix for registers\n"));
8687 fprintf (stream, _("\
8688 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8689 }
8690
8691 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8692 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8693 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8694
8695 /* Pick the target format to use. */
8696
8697 const char *
8698 i386_target_format (void)
8699 {
8700 if (!strncmp (default_arch, "x86_64", 6))
8701 {
8702 update_code_flag (CODE_64BIT, 1);
8703 if (default_arch[6] == '\0')
8704 x86_elf_abi = X86_64_ABI;
8705 else
8706 x86_elf_abi = X86_64_X32_ABI;
8707 }
8708 else if (!strcmp (default_arch, "i386"))
8709 update_code_flag (CODE_32BIT, 1);
8710 else
8711 as_fatal (_("unknown architecture"));
8712
8713 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8714 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8715 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8716 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8717
8718 switch (OUTPUT_FLAVOR)
8719 {
8720 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8721 case bfd_target_aout_flavour:
8722 return AOUT_TARGET_FORMAT;
8723 #endif
8724 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8725 # if defined (TE_PE) || defined (TE_PEP)
8726 case bfd_target_coff_flavour:
8727 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8728 # elif defined (TE_GO32)
8729 case bfd_target_coff_flavour:
8730 return "coff-go32";
8731 # else
8732 case bfd_target_coff_flavour:
8733 return "coff-i386";
8734 # endif
8735 #endif
8736 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8737 case bfd_target_elf_flavour:
8738 {
8739 const char *format;
8740
8741 switch (x86_elf_abi)
8742 {
8743 default:
8744 format = ELF_TARGET_FORMAT;
8745 break;
8746 case X86_64_ABI:
8747 use_rela_relocations = 1;
8748 object_64bit = 1;
8749 format = ELF_TARGET_FORMAT64;
8750 break;
8751 case X86_64_X32_ABI:
8752 use_rela_relocations = 1;
8753 object_64bit = 1;
8754 disallow_64bit_reloc = 1;
8755 format = ELF_TARGET_FORMAT32;
8756 break;
8757 }
8758 if (cpu_arch_isa == PROCESSOR_L1OM)
8759 {
8760 if (x86_elf_abi != X86_64_ABI)
8761 as_fatal (_("Intel L1OM is 64bit only"));
8762 return ELF_TARGET_L1OM_FORMAT;
8763 }
8764 if (cpu_arch_isa == PROCESSOR_K1OM)
8765 {
8766 if (x86_elf_abi != X86_64_ABI)
8767 as_fatal (_("Intel K1OM is 64bit only"));
8768 return ELF_TARGET_K1OM_FORMAT;
8769 }
8770 else
8771 return format;
8772 }
8773 #endif
8774 #if defined (OBJ_MACH_O)
8775 case bfd_target_mach_o_flavour:
8776 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8777 #endif
8778 default:
8779 abort ();
8780 return NULL;
8781 }
8782 }
8783
8784 #endif /* OBJ_MAYBE_ more than one */
8785
8786 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8787 void
8788 i386_elf_emit_arch_note (void)
8789 {
8790 if (IS_ELF && cpu_arch_name != NULL)
8791 {
8792 char *p;
8793 asection *seg = now_seg;
8794 subsegT subseg = now_subseg;
8795 Elf_Internal_Note i_note;
8796 Elf_External_Note e_note;
8797 asection *note_secp;
8798 int len;
8799
8800 /* Create the .note section. */
8801 note_secp = subseg_new (".note", 0);
8802 bfd_set_section_flags (stdoutput,
8803 note_secp,
8804 SEC_HAS_CONTENTS | SEC_READONLY);
8805
8806 /* Process the arch string. */
8807 len = strlen (cpu_arch_name);
8808
8809 i_note.namesz = len + 1;
8810 i_note.descsz = 0;
8811 i_note.type = NT_ARCH;
8812 p = frag_more (sizeof (e_note.namesz));
8813 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8814 p = frag_more (sizeof (e_note.descsz));
8815 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8816 p = frag_more (sizeof (e_note.type));
8817 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8818 p = frag_more (len + 1);
8819 strcpy (p, cpu_arch_name);
8820
8821 frag_align (2, 0, 0);
8822
8823 subseg_set (seg, subseg);
8824 }
8825 }
8826 #endif
8827 \f
8828 symbolS *
8829 md_undefined_symbol (char *name)
8830 {
8831 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8832 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8833 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8834 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8835 {
8836 if (!GOT_symbol)
8837 {
8838 if (symbol_find (name))
8839 as_bad (_("GOT already in symbol table"));
8840 GOT_symbol = symbol_new (name, undefined_section,
8841 (valueT) 0, &zero_address_frag);
8842 };
8843 return GOT_symbol;
8844 }
8845 return 0;
8846 }
8847
8848 /* Round up a section size to the appropriate boundary. */
8849
8850 valueT
8851 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8852 {
8853 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8854 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8855 {
8856 /* For a.out, force the section size to be aligned. If we don't do
8857 this, BFD will align it for us, but it will not write out the
8858 final bytes of the section. This may be a bug in BFD, but it is
8859 easier to fix it here since that is how the other a.out targets
8860 work. */
8861 int align;
8862
8863 align = bfd_get_section_alignment (stdoutput, segment);
8864 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8865 }
8866 #endif
8867
8868 return size;
8869 }
8870
8871 /* On the i386, PC-relative offsets are relative to the start of the
8872 next instruction. That is, the address of the offset, plus its
8873 size, since the offset is always the last part of the insn. */
8874
8875 long
8876 md_pcrel_from (fixS *fixP)
8877 {
8878 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8879 }
8880
8881 #ifndef I386COFF
8882
8883 static void
8884 s_bss (int ignore ATTRIBUTE_UNUSED)
8885 {
8886 int temp;
8887
8888 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8889 if (IS_ELF)
8890 obj_elf_section_change_hook ();
8891 #endif
8892 temp = get_absolute_expression ();
8893 subseg_set (bss_section, (subsegT) temp);
8894 demand_empty_rest_of_line ();
8895 }
8896
8897 #endif
8898
8899 void
8900 i386_validate_fix (fixS *fixp)
8901 {
8902 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8903 {
8904 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8905 {
8906 if (!object_64bit)
8907 abort ();
8908 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8909 }
8910 else
8911 {
8912 if (!object_64bit)
8913 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8914 else
8915 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8916 }
8917 fixp->fx_subsy = 0;
8918 }
8919 }
8920
8921 arelent *
8922 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
8923 {
8924 arelent *rel;
8925 bfd_reloc_code_real_type code;
8926
8927 switch (fixp->fx_r_type)
8928 {
8929 case BFD_RELOC_X86_64_PLT32:
8930 case BFD_RELOC_X86_64_GOT32:
8931 case BFD_RELOC_X86_64_GOTPCREL:
8932 case BFD_RELOC_386_PLT32:
8933 case BFD_RELOC_386_GOT32:
8934 case BFD_RELOC_386_GOTOFF:
8935 case BFD_RELOC_386_GOTPC:
8936 case BFD_RELOC_386_TLS_GD:
8937 case BFD_RELOC_386_TLS_LDM:
8938 case BFD_RELOC_386_TLS_LDO_32:
8939 case BFD_RELOC_386_TLS_IE_32:
8940 case BFD_RELOC_386_TLS_IE:
8941 case BFD_RELOC_386_TLS_GOTIE:
8942 case BFD_RELOC_386_TLS_LE_32:
8943 case BFD_RELOC_386_TLS_LE:
8944 case BFD_RELOC_386_TLS_GOTDESC:
8945 case BFD_RELOC_386_TLS_DESC_CALL:
8946 case BFD_RELOC_X86_64_TLSGD:
8947 case BFD_RELOC_X86_64_TLSLD:
8948 case BFD_RELOC_X86_64_DTPOFF32:
8949 case BFD_RELOC_X86_64_DTPOFF64:
8950 case BFD_RELOC_X86_64_GOTTPOFF:
8951 case BFD_RELOC_X86_64_TPOFF32:
8952 case BFD_RELOC_X86_64_TPOFF64:
8953 case BFD_RELOC_X86_64_GOTOFF64:
8954 case BFD_RELOC_X86_64_GOTPC32:
8955 case BFD_RELOC_X86_64_GOT64:
8956 case BFD_RELOC_X86_64_GOTPCREL64:
8957 case BFD_RELOC_X86_64_GOTPC64:
8958 case BFD_RELOC_X86_64_GOTPLT64:
8959 case BFD_RELOC_X86_64_PLTOFF64:
8960 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8961 case BFD_RELOC_X86_64_TLSDESC_CALL:
8962 case BFD_RELOC_RVA:
8963 case BFD_RELOC_VTABLE_ENTRY:
8964 case BFD_RELOC_VTABLE_INHERIT:
8965 #ifdef TE_PE
8966 case BFD_RELOC_32_SECREL:
8967 #endif
8968 code = fixp->fx_r_type;
8969 break;
8970 case BFD_RELOC_X86_64_32S:
8971 if (!fixp->fx_pcrel)
8972 {
8973 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8974 code = fixp->fx_r_type;
8975 break;
8976 }
8977 default:
8978 if (fixp->fx_pcrel)
8979 {
8980 switch (fixp->fx_size)
8981 {
8982 default:
8983 as_bad_where (fixp->fx_file, fixp->fx_line,
8984 _("can not do %d byte pc-relative relocation"),
8985 fixp->fx_size);
8986 code = BFD_RELOC_32_PCREL;
8987 break;
8988 case 1: code = BFD_RELOC_8_PCREL; break;
8989 case 2: code = BFD_RELOC_16_PCREL; break;
8990 case 4: code = BFD_RELOC_32_PCREL; break;
8991 #ifdef BFD64
8992 case 8: code = BFD_RELOC_64_PCREL; break;
8993 #endif
8994 }
8995 }
8996 else
8997 {
8998 switch (fixp->fx_size)
8999 {
9000 default:
9001 as_bad_where (fixp->fx_file, fixp->fx_line,
9002 _("can not do %d byte relocation"),
9003 fixp->fx_size);
9004 code = BFD_RELOC_32;
9005 break;
9006 case 1: code = BFD_RELOC_8; break;
9007 case 2: code = BFD_RELOC_16; break;
9008 case 4: code = BFD_RELOC_32; break;
9009 #ifdef BFD64
9010 case 8: code = BFD_RELOC_64; break;
9011 #endif
9012 }
9013 }
9014 break;
9015 }
9016
9017 if ((code == BFD_RELOC_32
9018 || code == BFD_RELOC_32_PCREL
9019 || code == BFD_RELOC_X86_64_32S)
9020 && GOT_symbol
9021 && fixp->fx_addsy == GOT_symbol)
9022 {
9023 if (!object_64bit)
9024 code = BFD_RELOC_386_GOTPC;
9025 else
9026 code = BFD_RELOC_X86_64_GOTPC32;
9027 }
9028 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9029 && GOT_symbol
9030 && fixp->fx_addsy == GOT_symbol)
9031 {
9032 code = BFD_RELOC_X86_64_GOTPC64;
9033 }
9034
9035 rel = (arelent *) xmalloc (sizeof (arelent));
9036 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9037 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9038
9039 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9040
9041 if (!use_rela_relocations)
9042 {
9043 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9044 vtable entry to be used in the relocation's section offset. */
9045 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9046 rel->address = fixp->fx_offset;
9047 #if defined (OBJ_COFF) && defined (TE_PE)
9048 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9049 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9050 else
9051 #endif
9052 rel->addend = 0;
9053 }
9054 /* Use the rela in 64bit mode. */
9055 else
9056 {
9057 if (disallow_64bit_reloc)
9058 switch (code)
9059 {
9060 case BFD_RELOC_X86_64_DTPOFF64:
9061 case BFD_RELOC_X86_64_TPOFF64:
9062 case BFD_RELOC_64_PCREL:
9063 case BFD_RELOC_X86_64_GOTOFF64:
9064 case BFD_RELOC_X86_64_GOT64:
9065 case BFD_RELOC_X86_64_GOTPCREL64:
9066 case BFD_RELOC_X86_64_GOTPC64:
9067 case BFD_RELOC_X86_64_GOTPLT64:
9068 case BFD_RELOC_X86_64_PLTOFF64:
9069 as_bad_where (fixp->fx_file, fixp->fx_line,
9070 _("cannot represent relocation type %s in x32 mode"),
9071 bfd_get_reloc_code_name (code));
9072 break;
9073 default:
9074 break;
9075 }
9076
9077 if (!fixp->fx_pcrel)
9078 rel->addend = fixp->fx_offset;
9079 else
9080 switch (code)
9081 {
9082 case BFD_RELOC_X86_64_PLT32:
9083 case BFD_RELOC_X86_64_GOT32:
9084 case BFD_RELOC_X86_64_GOTPCREL:
9085 case BFD_RELOC_X86_64_TLSGD:
9086 case BFD_RELOC_X86_64_TLSLD:
9087 case BFD_RELOC_X86_64_GOTTPOFF:
9088 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9089 case BFD_RELOC_X86_64_TLSDESC_CALL:
9090 rel->addend = fixp->fx_offset - fixp->fx_size;
9091 break;
9092 default:
9093 rel->addend = (section->vma
9094 - fixp->fx_size
9095 + fixp->fx_addnumber
9096 + md_pcrel_from (fixp));
9097 break;
9098 }
9099 }
9100
9101 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9102 if (rel->howto == NULL)
9103 {
9104 as_bad_where (fixp->fx_file, fixp->fx_line,
9105 _("cannot represent relocation type %s"),
9106 bfd_get_reloc_code_name (code));
9107 /* Set howto to a garbage value so that we can keep going. */
9108 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9109 gas_assert (rel->howto != NULL);
9110 }
9111
9112 return rel;
9113 }
9114
9115 #include "tc-i386-intel.c"
9116
9117 void
9118 tc_x86_parse_to_dw2regnum (expressionS *exp)
9119 {
9120 int saved_naked_reg;
9121 char saved_register_dot;
9122
9123 saved_naked_reg = allow_naked_reg;
9124 allow_naked_reg = 1;
9125 saved_register_dot = register_chars['.'];
9126 register_chars['.'] = '.';
9127 allow_pseudo_reg = 1;
9128 expression_and_evaluate (exp);
9129 allow_pseudo_reg = 0;
9130 register_chars['.'] = saved_register_dot;
9131 allow_naked_reg = saved_naked_reg;
9132
9133 if (exp->X_op == O_register && exp->X_add_number >= 0)
9134 {
9135 if ((addressT) exp->X_add_number < i386_regtab_size)
9136 {
9137 exp->X_op = O_constant;
9138 exp->X_add_number = i386_regtab[exp->X_add_number]
9139 .dw2_regnum[flag_code >> 1];
9140 }
9141 else
9142 exp->X_op = O_illegal;
9143 }
9144 }
9145
9146 void
9147 tc_x86_frame_initial_instructions (void)
9148 {
9149 static unsigned int sp_regno[2];
9150
9151 if (!sp_regno[flag_code >> 1])
9152 {
9153 char *saved_input = input_line_pointer;
9154 char sp[][4] = {"esp", "rsp"};
9155 expressionS exp;
9156
9157 input_line_pointer = sp[flag_code >> 1];
9158 tc_x86_parse_to_dw2regnum (&exp);
9159 gas_assert (exp.X_op == O_constant);
9160 sp_regno[flag_code >> 1] = exp.X_add_number;
9161 input_line_pointer = saved_input;
9162 }
9163
9164 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9165 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9166 }
9167
9168 int
9169 x86_dwarf2_addr_size (void)
9170 {
9171 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9172 if (x86_elf_abi == X86_64_X32_ABI)
9173 return 4;
9174 #endif
9175 return bfd_arch_bits_per_address (stdoutput) / 8;
9176 }
9177
9178 int
9179 i386_elf_section_type (const char *str, size_t len)
9180 {
9181 if (flag_code == CODE_64BIT
9182 && len == sizeof ("unwind") - 1
9183 && strncmp (str, "unwind", 6) == 0)
9184 return SHT_X86_64_UNWIND;
9185
9186 return -1;
9187 }
9188
9189 #ifdef TE_SOLARIS
9190 void
9191 i386_solaris_fix_up_eh_frame (segT sec)
9192 {
9193 if (flag_code == CODE_64BIT)
9194 elf_section_type (sec) = SHT_X86_64_UNWIND;
9195 }
9196 #endif
9197
9198 #ifdef TE_PE
9199 void
9200 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9201 {
9202 expressionS exp;
9203
9204 exp.X_op = O_secrel;
9205 exp.X_add_symbol = symbol;
9206 exp.X_add_number = 0;
9207 emit_expr (&exp, size);
9208 }
9209 #endif
9210
9211 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9212 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9213
9214 bfd_vma
9215 x86_64_section_letter (int letter, char **ptr_msg)
9216 {
9217 if (flag_code == CODE_64BIT)
9218 {
9219 if (letter == 'l')
9220 return SHF_X86_64_LARGE;
9221
9222 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9223 }
9224 else
9225 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9226 return -1;
9227 }
9228
9229 bfd_vma
9230 x86_64_section_word (char *str, size_t len)
9231 {
9232 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9233 return SHF_X86_64_LARGE;
9234
9235 return -1;
9236 }
9237
9238 static void
9239 handle_large_common (int small ATTRIBUTE_UNUSED)
9240 {
9241 if (flag_code != CODE_64BIT)
9242 {
9243 s_comm_internal (0, elf_common_parse);
9244 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9245 }
9246 else
9247 {
9248 static segT lbss_section;
9249 asection *saved_com_section_ptr = elf_com_section_ptr;
9250 asection *saved_bss_section = bss_section;
9251
9252 if (lbss_section == NULL)
9253 {
9254 flagword applicable;
9255 segT seg = now_seg;
9256 subsegT subseg = now_subseg;
9257
9258 /* The .lbss section is for local .largecomm symbols. */
9259 lbss_section = subseg_new (".lbss", 0);
9260 applicable = bfd_applicable_section_flags (stdoutput);
9261 bfd_set_section_flags (stdoutput, lbss_section,
9262 applicable & SEC_ALLOC);
9263 seg_info (lbss_section)->bss = 1;
9264
9265 subseg_set (seg, subseg);
9266 }
9267
9268 elf_com_section_ptr = &_bfd_elf_large_com_section;
9269 bss_section = lbss_section;
9270
9271 s_comm_internal (0, elf_common_parse);
9272
9273 elf_com_section_ptr = saved_com_section_ptr;
9274 bss_section = saved_bss_section;
9275 }
9276 }
9277 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.222128 seconds and 5 git commands to generate.