gas/
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 LOCKREP_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define LOCKREP_PREFIX 4
68 #define REX_PREFIX 5 /* must come last. */
69 #define MAX_PREFIXES 6 /* max prefixes per opcode */
70
71 /* we define the syntax here (modulo base,index,scale syntax) */
72 #define REGISTER_PREFIX '%'
73 #define IMMEDIATE_PREFIX '$'
74 #define ABSOLUTE_PREFIX '*'
75
76 /* these are the instruction mnemonic suffixes in AT&T syntax or
77 memory operand size in Intel syntax. */
78 #define WORD_MNEM_SUFFIX 'w'
79 #define BYTE_MNEM_SUFFIX 'b'
80 #define SHORT_MNEM_SUFFIX 's'
81 #define LONG_MNEM_SUFFIX 'l'
82 #define QWORD_MNEM_SUFFIX 'q'
83 #define XMMWORD_MNEM_SUFFIX 'x'
84 #define YMMWORD_MNEM_SUFFIX 'y'
85 /* Intel Syntax. Use a non-ascii letter since since it never appears
86 in instructions. */
87 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
88
89 #define END_OF_INSN '\0'
90
91 /*
92 'templates' is for grouping together 'template' structures for opcodes
93 of the same name. This is only used for storing the insns in the grand
94 ole hash table of insns.
95 The templates themselves start at START and range up to (but not including)
96 END.
97 */
98 typedef struct
99 {
100 const insn_template *start;
101 const insn_template *end;
102 }
103 templates;
104
105 /* 386 operand encoding bytes: see 386 book for details of this. */
106 typedef struct
107 {
108 unsigned int regmem; /* codes register or memory operand */
109 unsigned int reg; /* codes register operand (or extended opcode) */
110 unsigned int mode; /* how to interpret regmem & reg */
111 }
112 modrm_byte;
113
114 /* x86-64 extension prefix. */
115 typedef int rex_byte;
116
117 /* 386 opcode byte to code indirect addressing. */
118 typedef struct
119 {
120 unsigned base;
121 unsigned index;
122 unsigned scale;
123 }
124 sib_byte;
125
126 /* x86 arch names, types and features */
127 typedef struct
128 {
129 const char *name; /* arch name */
130 enum processor_type type; /* arch type */
131 i386_cpu_flags flags; /* cpu feature flags */
132 }
133 arch_entry;
134
135 static void set_code_flag (int);
136 static void set_16bit_gcc_code_flag (int);
137 static void set_intel_syntax (int);
138 static void set_intel_mnemonic (int);
139 static void set_allow_index_reg (int);
140 static void set_sse_check (int);
141 static void set_cpu_arch (int);
142 #ifdef TE_PE
143 static void pe_directive_secrel (int);
144 #endif
145 static void signed_cons (int);
146 static char *output_invalid (int c);
147 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
148 const char *);
149 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
150 const char *);
151 static int i386_att_operand (char *);
152 static int i386_intel_operand (char *, int);
153 static int i386_intel_simplify (expressionS *);
154 static int i386_intel_parse_name (const char *, expressionS *);
155 static const reg_entry *parse_register (char *, char **);
156 static char *parse_insn (char *, char *);
157 static char *parse_operands (char *, const char *);
158 static void swap_operands (void);
159 static void swap_2_operands (int, int);
160 static void optimize_imm (void);
161 static void optimize_disp (void);
162 static const insn_template *match_template (void);
163 static int check_string (void);
164 static int process_suffix (void);
165 static int check_byte_reg (void);
166 static int check_long_reg (void);
167 static int check_qword_reg (void);
168 static int check_word_reg (void);
169 static int finalize_imm (void);
170 static int process_operands (void);
171 static const seg_entry *build_modrm_byte (void);
172 static void output_insn (void);
173 static void output_imm (fragS *, offsetT);
174 static void output_disp (fragS *, offsetT);
175 #ifndef I386COFF
176 static void s_bss (int);
177 #endif
178 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
179 static void handle_large_common (int small ATTRIBUTE_UNUSED);
180 #endif
181
182 static const char *default_arch = DEFAULT_ARCH;
183
184 /* VEX prefix. */
185 typedef struct
186 {
187 /* VEX prefix is either 2 byte or 3 byte. */
188 unsigned char bytes[3];
189 unsigned int length;
190 /* Destination or source register specifier. */
191 const reg_entry *register_specifier;
192 } vex_prefix;
193
194 /* 'md_assemble ()' gathers together information and puts it into a
195 i386_insn. */
196
197 union i386_op
198 {
199 expressionS *disps;
200 expressionS *imms;
201 const reg_entry *regs;
202 };
203
204 struct _i386_insn
205 {
206 /* TM holds the template for the insn were currently assembling. */
207 insn_template tm;
208
209 /* SUFFIX holds the instruction size suffix for byte, word, dword
210 or qword, if given. */
211 char suffix;
212
213 /* OPERANDS gives the number of given operands. */
214 unsigned int operands;
215
216 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
217 of given register, displacement, memory operands and immediate
218 operands. */
219 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
220
221 /* TYPES [i] is the type (see above #defines) which tells us how to
222 use OP[i] for the corresponding operand. */
223 i386_operand_type types[MAX_OPERANDS];
224
225 /* Displacement expression, immediate expression, or register for each
226 operand. */
227 union i386_op op[MAX_OPERANDS];
228
229 /* Flags for operands. */
230 unsigned int flags[MAX_OPERANDS];
231 #define Operand_PCrel 1
232
233 /* Relocation type for operand */
234 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
235
236 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
237 the base index byte below. */
238 const reg_entry *base_reg;
239 const reg_entry *index_reg;
240 unsigned int log2_scale_factor;
241
242 /* SEG gives the seg_entries of this insn. They are zero unless
243 explicit segment overrides are given. */
244 const seg_entry *seg[2];
245
246 /* PREFIX holds all the given prefix opcodes (usually null).
247 PREFIXES is the number of prefix opcodes. */
248 unsigned int prefixes;
249 unsigned char prefix[MAX_PREFIXES];
250
251 /* RM and SIB are the modrm byte and the sib byte where the
252 addressing modes of this insn are encoded. */
253 modrm_byte rm;
254 rex_byte rex;
255 sib_byte sib;
256 vex_prefix vex;
257
258 /* Swap operand in encoding. */
259 unsigned int swap_operand : 1;
260 };
261
262 typedef struct _i386_insn i386_insn;
263
264 /* List of chars besides those in app.c:symbol_chars that can start an
265 operand. Used to prevent the scrubber eating vital white-space. */
266 const char extra_symbol_chars[] = "*%-(["
267 #ifdef LEX_AT
268 "@"
269 #endif
270 #ifdef LEX_QM
271 "?"
272 #endif
273 ;
274
275 #if (defined (TE_I386AIX) \
276 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
277 && !defined (TE_GNU) \
278 && !defined (TE_LINUX) \
279 && !defined (TE_NETWARE) \
280 && !defined (TE_FreeBSD) \
281 && !defined (TE_NetBSD)))
282 /* This array holds the chars that always start a comment. If the
283 pre-processor is disabled, these aren't very useful. The option
284 --divide will remove '/' from this list. */
285 const char *i386_comment_chars = "#/";
286 #define SVR4_COMMENT_CHARS 1
287 #define PREFIX_SEPARATOR '\\'
288
289 #else
290 const char *i386_comment_chars = "#";
291 #define PREFIX_SEPARATOR '/'
292 #endif
293
294 /* This array holds the chars that only start a comment at the beginning of
295 a line. If the line seems to have the form '# 123 filename'
296 .line and .file directives will appear in the pre-processed output.
297 Note that input_file.c hand checks for '#' at the beginning of the
298 first line of the input file. This is because the compiler outputs
299 #NO_APP at the beginning of its output.
300 Also note that comments started like this one will always work if
301 '/' isn't otherwise defined. */
302 const char line_comment_chars[] = "#/";
303
304 const char line_separator_chars[] = ";";
305
306 /* Chars that can be used to separate mant from exp in floating point
307 nums. */
308 const char EXP_CHARS[] = "eE";
309
310 /* Chars that mean this number is a floating point constant
311 As in 0f12.456
312 or 0d1.2345e12. */
313 const char FLT_CHARS[] = "fFdDxX";
314
315 /* Tables for lexical analysis. */
316 static char mnemonic_chars[256];
317 static char register_chars[256];
318 static char operand_chars[256];
319 static char identifier_chars[256];
320 static char digit_chars[256];
321
322 /* Lexical macros. */
323 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
324 #define is_operand_char(x) (operand_chars[(unsigned char) x])
325 #define is_register_char(x) (register_chars[(unsigned char) x])
326 #define is_space_char(x) ((x) == ' ')
327 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
328 #define is_digit_char(x) (digit_chars[(unsigned char) x])
329
330 /* All non-digit non-letter characters that may occur in an operand. */
331 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
332
333 /* md_assemble() always leaves the strings it's passed unaltered. To
334 effect this we maintain a stack of saved characters that we've smashed
335 with '\0's (indicating end of strings for various sub-fields of the
336 assembler instruction). */
337 static char save_stack[32];
338 static char *save_stack_p;
339 #define END_STRING_AND_SAVE(s) \
340 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
341 #define RESTORE_END_STRING(s) \
342 do { *(s) = *--save_stack_p; } while (0)
343
344 /* The instruction we're assembling. */
345 static i386_insn i;
346
347 /* Possible templates for current insn. */
348 static const templates *current_templates;
349
350 /* Per instruction expressionS buffers: max displacements & immediates. */
351 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
352 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
353
354 /* Current operand we are working on. */
355 static int this_operand = -1;
356
357 /* We support four different modes. FLAG_CODE variable is used to distinguish
358 these. */
359
360 enum flag_code {
361 CODE_32BIT,
362 CODE_16BIT,
363 CODE_64BIT };
364
365 static enum flag_code flag_code;
366 static unsigned int object_64bit;
367 static int use_rela_relocations = 0;
368
369 /* The names used to print error messages. */
370 static const char *flag_code_names[] =
371 {
372 "32",
373 "16",
374 "64"
375 };
376
377 /* 1 for intel syntax,
378 0 if att syntax. */
379 static int intel_syntax = 0;
380
381 /* 1 for intel mnemonic,
382 0 if att mnemonic. */
383 static int intel_mnemonic = !SYSV386_COMPAT;
384
385 /* 1 if support old (<= 2.8.1) versions of gcc. */
386 static int old_gcc = OLDGCC_COMPAT;
387
388 /* 1 if pseudo registers are permitted. */
389 static int allow_pseudo_reg = 0;
390
391 /* 1 if register prefix % not required. */
392 static int allow_naked_reg = 0;
393
394 /* 1 if pseudo index register, eiz/riz, is allowed . */
395 static int allow_index_reg = 0;
396
397 static enum
398 {
399 sse_check_none = 0,
400 sse_check_warning,
401 sse_check_error
402 }
403 sse_check;
404
405 /* Register prefix used for error message. */
406 static const char *register_prefix = "%";
407
408 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
409 leave, push, and pop instructions so that gcc has the same stack
410 frame as in 32 bit mode. */
411 static char stackop_size = '\0';
412
413 /* Non-zero to optimize code alignment. */
414 int optimize_align_code = 1;
415
416 /* Non-zero to quieten some warnings. */
417 static int quiet_warnings = 0;
418
419 /* CPU name. */
420 static const char *cpu_arch_name = NULL;
421 static char *cpu_sub_arch_name = NULL;
422
423 /* CPU feature flags. */
424 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
425
426 /* If we have selected a cpu we are generating instructions for. */
427 static int cpu_arch_tune_set = 0;
428
429 /* Cpu we are generating instructions for. */
430 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
431
432 /* CPU feature flags of cpu we are generating instructions for. */
433 static i386_cpu_flags cpu_arch_tune_flags;
434
435 /* CPU instruction set architecture used. */
436 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
437
438 /* CPU feature flags of instruction set architecture used. */
439 i386_cpu_flags cpu_arch_isa_flags;
440
441 /* If set, conditional jumps are not automatically promoted to handle
442 larger than a byte offset. */
443 static unsigned int no_cond_jump_promotion = 0;
444
445 /* Encode SSE instructions with VEX prefix. */
446 static unsigned int sse2avx;
447
448 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
449 static symbolS *GOT_symbol;
450
451 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
452 unsigned int x86_dwarf2_return_column;
453
454 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
455 int x86_cie_data_alignment;
456
457 /* Interface to relax_segment.
458 There are 3 major relax states for 386 jump insns because the
459 different types of jumps add different sizes to frags when we're
460 figuring out what sort of jump to choose to reach a given label. */
461
462 /* Types. */
463 #define UNCOND_JUMP 0
464 #define COND_JUMP 1
465 #define COND_JUMP86 2
466
467 /* Sizes. */
468 #define CODE16 1
469 #define SMALL 0
470 #define SMALL16 (SMALL | CODE16)
471 #define BIG 2
472 #define BIG16 (BIG | CODE16)
473
474 #ifndef INLINE
475 #ifdef __GNUC__
476 #define INLINE __inline__
477 #else
478 #define INLINE
479 #endif
480 #endif
481
482 #define ENCODE_RELAX_STATE(type, size) \
483 ((relax_substateT) (((type) << 2) | (size)))
484 #define TYPE_FROM_RELAX_STATE(s) \
485 ((s) >> 2)
486 #define DISP_SIZE_FROM_RELAX_STATE(s) \
487 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
488
489 /* This table is used by relax_frag to promote short jumps to long
490 ones where necessary. SMALL (short) jumps may be promoted to BIG
491 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
492 don't allow a short jump in a 32 bit code segment to be promoted to
493 a 16 bit offset jump because it's slower (requires data size
494 prefix), and doesn't work, unless the destination is in the bottom
495 64k of the code segment (The top 16 bits of eip are zeroed). */
496
497 const relax_typeS md_relax_table[] =
498 {
499 /* The fields are:
500 1) most positive reach of this state,
501 2) most negative reach of this state,
502 3) how many bytes this mode will have in the variable part of the frag
503 4) which index into the table to try if we can't fit into this one. */
504
505 /* UNCOND_JUMP states. */
506 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
507 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
508 /* dword jmp adds 4 bytes to frag:
509 0 extra opcode bytes, 4 displacement bytes. */
510 {0, 0, 4, 0},
511 /* word jmp adds 2 byte2 to frag:
512 0 extra opcode bytes, 2 displacement bytes. */
513 {0, 0, 2, 0},
514
515 /* COND_JUMP states. */
516 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
517 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
518 /* dword conditionals adds 5 bytes to frag:
519 1 extra opcode byte, 4 displacement bytes. */
520 {0, 0, 5, 0},
521 /* word conditionals add 3 bytes to frag:
522 1 extra opcode byte, 2 displacement bytes. */
523 {0, 0, 3, 0},
524
525 /* COND_JUMP86 states. */
526 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
527 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
528 /* dword conditionals adds 5 bytes to frag:
529 1 extra opcode byte, 4 displacement bytes. */
530 {0, 0, 5, 0},
531 /* word conditionals add 4 bytes to frag:
532 1 displacement byte and a 3 byte long branch insn. */
533 {0, 0, 4, 0}
534 };
535
536 static const arch_entry cpu_arch[] =
537 {
538 { "generic32", PROCESSOR_GENERIC32,
539 CPU_GENERIC32_FLAGS },
540 { "generic64", PROCESSOR_GENERIC64,
541 CPU_GENERIC64_FLAGS },
542 { "i8086", PROCESSOR_UNKNOWN,
543 CPU_NONE_FLAGS },
544 { "i186", PROCESSOR_UNKNOWN,
545 CPU_I186_FLAGS },
546 { "i286", PROCESSOR_UNKNOWN,
547 CPU_I286_FLAGS },
548 { "i386", PROCESSOR_I386,
549 CPU_I386_FLAGS },
550 { "i486", PROCESSOR_I486,
551 CPU_I486_FLAGS },
552 { "i586", PROCESSOR_PENTIUM,
553 CPU_I586_FLAGS },
554 { "i686", PROCESSOR_PENTIUMPRO,
555 CPU_I686_FLAGS },
556 { "pentium", PROCESSOR_PENTIUM,
557 CPU_I586_FLAGS },
558 { "pentiumpro", PROCESSOR_PENTIUMPRO,
559 CPU_I686_FLAGS },
560 { "pentiumii", PROCESSOR_PENTIUMPRO,
561 CPU_P2_FLAGS },
562 { "pentiumiii",PROCESSOR_PENTIUMPRO,
563 CPU_P3_FLAGS },
564 { "pentium4", PROCESSOR_PENTIUM4,
565 CPU_P4_FLAGS },
566 { "prescott", PROCESSOR_NOCONA,
567 CPU_CORE_FLAGS },
568 { "nocona", PROCESSOR_NOCONA,
569 CPU_NOCONA_FLAGS },
570 { "yonah", PROCESSOR_CORE,
571 CPU_CORE_FLAGS },
572 { "core", PROCESSOR_CORE,
573 CPU_CORE_FLAGS },
574 { "merom", PROCESSOR_CORE2,
575 CPU_CORE2_FLAGS },
576 { "core2", PROCESSOR_CORE2,
577 CPU_CORE2_FLAGS },
578 { "corei7", PROCESSOR_COREI7,
579 CPU_COREI7_FLAGS },
580 { "l1om", PROCESSOR_L1OM,
581 CPU_L1OM_FLAGS },
582 { "k6", PROCESSOR_K6,
583 CPU_K6_FLAGS },
584 { "k6_2", PROCESSOR_K6,
585 CPU_K6_2_FLAGS },
586 { "athlon", PROCESSOR_ATHLON,
587 CPU_ATHLON_FLAGS },
588 { "sledgehammer", PROCESSOR_K8,
589 CPU_K8_FLAGS },
590 { "opteron", PROCESSOR_K8,
591 CPU_K8_FLAGS },
592 { "k8", PROCESSOR_K8,
593 CPU_K8_FLAGS },
594 { "amdfam10", PROCESSOR_AMDFAM10,
595 CPU_AMDFAM10_FLAGS },
596 { ".8087", PROCESSOR_UNKNOWN,
597 CPU_8087_FLAGS },
598 { ".287", PROCESSOR_UNKNOWN,
599 CPU_287_FLAGS },
600 { ".387", PROCESSOR_UNKNOWN,
601 CPU_387_FLAGS },
602 { ".no87", PROCESSOR_UNKNOWN,
603 CPU_ANY87_FLAGS },
604 { ".mmx", PROCESSOR_UNKNOWN,
605 CPU_MMX_FLAGS },
606 { ".nommx", PROCESSOR_UNKNOWN,
607 CPU_3DNOWA_FLAGS },
608 { ".sse", PROCESSOR_UNKNOWN,
609 CPU_SSE_FLAGS },
610 { ".sse2", PROCESSOR_UNKNOWN,
611 CPU_SSE2_FLAGS },
612 { ".sse3", PROCESSOR_UNKNOWN,
613 CPU_SSE3_FLAGS },
614 { ".ssse3", PROCESSOR_UNKNOWN,
615 CPU_SSSE3_FLAGS },
616 { ".sse4.1", PROCESSOR_UNKNOWN,
617 CPU_SSE4_1_FLAGS },
618 { ".sse4.2", PROCESSOR_UNKNOWN,
619 CPU_SSE4_2_FLAGS },
620 { ".sse4", PROCESSOR_UNKNOWN,
621 CPU_SSE4_2_FLAGS },
622 { ".nosse", PROCESSOR_UNKNOWN,
623 CPU_ANY_SSE_FLAGS },
624 { ".avx", PROCESSOR_UNKNOWN,
625 CPU_AVX_FLAGS },
626 { ".noavx", PROCESSOR_UNKNOWN,
627 CPU_ANY_AVX_FLAGS },
628 { ".vmx", PROCESSOR_UNKNOWN,
629 CPU_VMX_FLAGS },
630 { ".smx", PROCESSOR_UNKNOWN,
631 CPU_SMX_FLAGS },
632 { ".xsave", PROCESSOR_UNKNOWN,
633 CPU_XSAVE_FLAGS },
634 { ".aes", PROCESSOR_UNKNOWN,
635 CPU_AES_FLAGS },
636 { ".pclmul", PROCESSOR_UNKNOWN,
637 CPU_PCLMUL_FLAGS },
638 { ".clmul", PROCESSOR_UNKNOWN,
639 CPU_PCLMUL_FLAGS },
640 { ".fma", PROCESSOR_UNKNOWN,
641 CPU_FMA_FLAGS },
642 { ".fma4", PROCESSOR_UNKNOWN,
643 CPU_FMA4_FLAGS },
644 { ".movbe", PROCESSOR_UNKNOWN,
645 CPU_MOVBE_FLAGS },
646 { ".ept", PROCESSOR_UNKNOWN,
647 CPU_EPT_FLAGS },
648 { ".clflush", PROCESSOR_UNKNOWN,
649 CPU_CLFLUSH_FLAGS },
650 { ".syscall", PROCESSOR_UNKNOWN,
651 CPU_SYSCALL_FLAGS },
652 { ".rdtscp", PROCESSOR_UNKNOWN,
653 CPU_RDTSCP_FLAGS },
654 { ".3dnow", PROCESSOR_UNKNOWN,
655 CPU_3DNOW_FLAGS },
656 { ".3dnowa", PROCESSOR_UNKNOWN,
657 CPU_3DNOWA_FLAGS },
658 { ".padlock", PROCESSOR_UNKNOWN,
659 CPU_PADLOCK_FLAGS },
660 { ".pacifica", PROCESSOR_UNKNOWN,
661 CPU_SVME_FLAGS },
662 { ".svme", PROCESSOR_UNKNOWN,
663 CPU_SVME_FLAGS },
664 { ".sse4a", PROCESSOR_UNKNOWN,
665 CPU_SSE4A_FLAGS },
666 { ".abm", PROCESSOR_UNKNOWN,
667 CPU_ABM_FLAGS },
668 };
669
670 #ifdef I386COFF
671 /* Like s_lcomm_internal in gas/read.c but the alignment string
672 is allowed to be optional. */
673
674 static symbolS *
675 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
676 {
677 addressT align = 0;
678
679 SKIP_WHITESPACE ();
680
681 if (needs_align
682 && *input_line_pointer == ',')
683 {
684 align = parse_align (needs_align - 1);
685
686 if (align == (addressT) -1)
687 return NULL;
688 }
689 else
690 {
691 if (size >= 8)
692 align = 3;
693 else if (size >= 4)
694 align = 2;
695 else if (size >= 2)
696 align = 1;
697 else
698 align = 0;
699 }
700
701 bss_alloc (symbolP, size, align);
702 return symbolP;
703 }
704
705 static void
706 pe_lcomm (int needs_align)
707 {
708 s_comm_internal (needs_align * 2, pe_lcomm_internal);
709 }
710 #endif
711
712 const pseudo_typeS md_pseudo_table[] =
713 {
714 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
715 {"align", s_align_bytes, 0},
716 #else
717 {"align", s_align_ptwo, 0},
718 #endif
719 {"arch", set_cpu_arch, 0},
720 #ifndef I386COFF
721 {"bss", s_bss, 0},
722 #else
723 {"lcomm", pe_lcomm, 1},
724 #endif
725 {"ffloat", float_cons, 'f'},
726 {"dfloat", float_cons, 'd'},
727 {"tfloat", float_cons, 'x'},
728 {"value", cons, 2},
729 {"slong", signed_cons, 4},
730 {"noopt", s_ignore, 0},
731 {"optim", s_ignore, 0},
732 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
733 {"code16", set_code_flag, CODE_16BIT},
734 {"code32", set_code_flag, CODE_32BIT},
735 {"code64", set_code_flag, CODE_64BIT},
736 {"intel_syntax", set_intel_syntax, 1},
737 {"att_syntax", set_intel_syntax, 0},
738 {"intel_mnemonic", set_intel_mnemonic, 1},
739 {"att_mnemonic", set_intel_mnemonic, 0},
740 {"allow_index_reg", set_allow_index_reg, 1},
741 {"disallow_index_reg", set_allow_index_reg, 0},
742 {"sse_check", set_sse_check, 0},
743 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
744 {"largecomm", handle_large_common, 0},
745 #else
746 {"file", (void (*) (int)) dwarf2_directive_file, 0},
747 {"loc", dwarf2_directive_loc, 0},
748 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
749 #endif
750 #ifdef TE_PE
751 {"secrel32", pe_directive_secrel, 0},
752 #endif
753 {0, 0, 0}
754 };
755
756 /* For interface with expression (). */
757 extern char *input_line_pointer;
758
759 /* Hash table for instruction mnemonic lookup. */
760 static struct hash_control *op_hash;
761
762 /* Hash table for register lookup. */
763 static struct hash_control *reg_hash;
764 \f
765 void
766 i386_align_code (fragS *fragP, int count)
767 {
768 /* Various efficient no-op patterns for aligning code labels.
769 Note: Don't try to assemble the instructions in the comments.
770 0L and 0w are not legal. */
771 static const char f32_1[] =
772 {0x90}; /* nop */
773 static const char f32_2[] =
774 {0x66,0x90}; /* xchg %ax,%ax */
775 static const char f32_3[] =
776 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
777 static const char f32_4[] =
778 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
779 static const char f32_5[] =
780 {0x90, /* nop */
781 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
782 static const char f32_6[] =
783 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
784 static const char f32_7[] =
785 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
786 static const char f32_8[] =
787 {0x90, /* nop */
788 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
789 static const char f32_9[] =
790 {0x89,0xf6, /* movl %esi,%esi */
791 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
792 static const char f32_10[] =
793 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
794 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
795 static const char f32_11[] =
796 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
797 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
798 static const char f32_12[] =
799 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
800 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
801 static const char f32_13[] =
802 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
803 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
804 static const char f32_14[] =
805 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
806 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
807 static const char f16_3[] =
808 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
809 static const char f16_4[] =
810 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
811 static const char f16_5[] =
812 {0x90, /* nop */
813 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
814 static const char f16_6[] =
815 {0x89,0xf6, /* mov %si,%si */
816 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
817 static const char f16_7[] =
818 {0x8d,0x74,0x00, /* lea 0(%si),%si */
819 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
820 static const char f16_8[] =
821 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
822 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
823 static const char jump_31[] =
824 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
825 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
826 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
827 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
828 static const char *const f32_patt[] = {
829 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
830 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
831 };
832 static const char *const f16_patt[] = {
833 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
834 };
835 /* nopl (%[re]ax) */
836 static const char alt_3[] =
837 {0x0f,0x1f,0x00};
838 /* nopl 0(%[re]ax) */
839 static const char alt_4[] =
840 {0x0f,0x1f,0x40,0x00};
841 /* nopl 0(%[re]ax,%[re]ax,1) */
842 static const char alt_5[] =
843 {0x0f,0x1f,0x44,0x00,0x00};
844 /* nopw 0(%[re]ax,%[re]ax,1) */
845 static const char alt_6[] =
846 {0x66,0x0f,0x1f,0x44,0x00,0x00};
847 /* nopl 0L(%[re]ax) */
848 static const char alt_7[] =
849 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
850 /* nopl 0L(%[re]ax,%[re]ax,1) */
851 static const char alt_8[] =
852 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
853 /* nopw 0L(%[re]ax,%[re]ax,1) */
854 static const char alt_9[] =
855 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
856 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
857 static const char alt_10[] =
858 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
859 /* data16
860 nopw %cs:0L(%[re]ax,%[re]ax,1) */
861 static const char alt_long_11[] =
862 {0x66,
863 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
864 /* data16
865 data16
866 nopw %cs:0L(%[re]ax,%[re]ax,1) */
867 static const char alt_long_12[] =
868 {0x66,
869 0x66,
870 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
871 /* data16
872 data16
873 data16
874 nopw %cs:0L(%[re]ax,%[re]ax,1) */
875 static const char alt_long_13[] =
876 {0x66,
877 0x66,
878 0x66,
879 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
880 /* data16
881 data16
882 data16
883 data16
884 nopw %cs:0L(%[re]ax,%[re]ax,1) */
885 static const char alt_long_14[] =
886 {0x66,
887 0x66,
888 0x66,
889 0x66,
890 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
891 /* data16
892 data16
893 data16
894 data16
895 data16
896 nopw %cs:0L(%[re]ax,%[re]ax,1) */
897 static const char alt_long_15[] =
898 {0x66,
899 0x66,
900 0x66,
901 0x66,
902 0x66,
903 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
904 /* nopl 0(%[re]ax,%[re]ax,1)
905 nopw 0(%[re]ax,%[re]ax,1) */
906 static const char alt_short_11[] =
907 {0x0f,0x1f,0x44,0x00,0x00,
908 0x66,0x0f,0x1f,0x44,0x00,0x00};
909 /* nopw 0(%[re]ax,%[re]ax,1)
910 nopw 0(%[re]ax,%[re]ax,1) */
911 static const char alt_short_12[] =
912 {0x66,0x0f,0x1f,0x44,0x00,0x00,
913 0x66,0x0f,0x1f,0x44,0x00,0x00};
914 /* nopw 0(%[re]ax,%[re]ax,1)
915 nopl 0L(%[re]ax) */
916 static const char alt_short_13[] =
917 {0x66,0x0f,0x1f,0x44,0x00,0x00,
918 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
919 /* nopl 0L(%[re]ax)
920 nopl 0L(%[re]ax) */
921 static const char alt_short_14[] =
922 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
923 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
924 /* nopl 0L(%[re]ax)
925 nopl 0L(%[re]ax,%[re]ax,1) */
926 static const char alt_short_15[] =
927 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
928 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
929 static const char *const alt_short_patt[] = {
930 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
931 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
932 alt_short_14, alt_short_15
933 };
934 static const char *const alt_long_patt[] = {
935 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
936 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
937 alt_long_14, alt_long_15
938 };
939
940 /* Only align for at least a positive non-zero boundary. */
941 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
942 return;
943
944 /* We need to decide which NOP sequence to use for 32bit and
945 64bit. When -mtune= is used:
946
947 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
948 PROCESSOR_GENERIC32, f32_patt will be used.
949 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
950 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
951 PROCESSOR_GENERIC64, alt_long_patt will be used.
952 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
953 PROCESSOR_AMDFAM10, alt_short_patt will be used.
954
955 When -mtune= isn't used, alt_long_patt will be used if
956 cpu_arch_isa_flags has Cpu686. Otherwise, f32_patt will
957 be used.
958
959 When -march= or .arch is used, we can't use anything beyond
960 cpu_arch_isa_flags. */
961
962 if (flag_code == CODE_16BIT)
963 {
964 if (count > 8)
965 {
966 memcpy (fragP->fr_literal + fragP->fr_fix,
967 jump_31, count);
968 /* Adjust jump offset. */
969 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
970 }
971 else
972 memcpy (fragP->fr_literal + fragP->fr_fix,
973 f16_patt[count - 1], count);
974 }
975 else
976 {
977 const char *const *patt = NULL;
978
979 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
980 {
981 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
982 switch (cpu_arch_tune)
983 {
984 case PROCESSOR_UNKNOWN:
985 /* We use cpu_arch_isa_flags to check if we SHOULD
986 optimize for Cpu686. */
987 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
988 patt = alt_long_patt;
989 else
990 patt = f32_patt;
991 break;
992 case PROCESSOR_PENTIUMPRO:
993 case PROCESSOR_PENTIUM4:
994 case PROCESSOR_NOCONA:
995 case PROCESSOR_CORE:
996 case PROCESSOR_CORE2:
997 case PROCESSOR_COREI7:
998 case PROCESSOR_L1OM:
999 case PROCESSOR_GENERIC64:
1000 patt = alt_long_patt;
1001 break;
1002 case PROCESSOR_K6:
1003 case PROCESSOR_ATHLON:
1004 case PROCESSOR_K8:
1005 case PROCESSOR_AMDFAM10:
1006 patt = alt_short_patt;
1007 break;
1008 case PROCESSOR_I386:
1009 case PROCESSOR_I486:
1010 case PROCESSOR_PENTIUM:
1011 case PROCESSOR_GENERIC32:
1012 patt = f32_patt;
1013 break;
1014 }
1015 }
1016 else
1017 {
1018 switch (fragP->tc_frag_data.tune)
1019 {
1020 case PROCESSOR_UNKNOWN:
1021 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1022 PROCESSOR_UNKNOWN. */
1023 abort ();
1024 break;
1025
1026 case PROCESSOR_I386:
1027 case PROCESSOR_I486:
1028 case PROCESSOR_PENTIUM:
1029 case PROCESSOR_K6:
1030 case PROCESSOR_ATHLON:
1031 case PROCESSOR_K8:
1032 case PROCESSOR_AMDFAM10:
1033 case PROCESSOR_GENERIC32:
1034 /* We use cpu_arch_isa_flags to check if we CAN optimize
1035 for Cpu686. */
1036 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1037 patt = alt_short_patt;
1038 else
1039 patt = f32_patt;
1040 break;
1041 case PROCESSOR_PENTIUMPRO:
1042 case PROCESSOR_PENTIUM4:
1043 case PROCESSOR_NOCONA:
1044 case PROCESSOR_CORE:
1045 case PROCESSOR_CORE2:
1046 case PROCESSOR_COREI7:
1047 case PROCESSOR_L1OM:
1048 if (fragP->tc_frag_data.isa_flags.bitfield.cpui686)
1049 patt = alt_long_patt;
1050 else
1051 patt = f32_patt;
1052 break;
1053 case PROCESSOR_GENERIC64:
1054 patt = alt_long_patt;
1055 break;
1056 }
1057 }
1058
1059 if (patt == f32_patt)
1060 {
1061 /* If the padding is less than 15 bytes, we use the normal
1062 ones. Otherwise, we use a jump instruction and adjust
1063 its offset. */
1064 int limit;
1065
1066 /* For 64bit, the limit is 3 bytes. */
1067 if (flag_code == CODE_64BIT
1068 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1069 limit = 3;
1070 else
1071 limit = 15;
1072 if (count < limit)
1073 memcpy (fragP->fr_literal + fragP->fr_fix,
1074 patt[count - 1], count);
1075 else
1076 {
1077 memcpy (fragP->fr_literal + fragP->fr_fix,
1078 jump_31, count);
1079 /* Adjust jump offset. */
1080 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1081 }
1082 }
1083 else
1084 {
1085 /* Maximum length of an instruction is 15 byte. If the
1086 padding is greater than 15 bytes and we don't use jump,
1087 we have to break it into smaller pieces. */
1088 int padding = count;
1089 while (padding > 15)
1090 {
1091 padding -= 15;
1092 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1093 patt [14], 15);
1094 }
1095
1096 if (padding)
1097 memcpy (fragP->fr_literal + fragP->fr_fix,
1098 patt [padding - 1], padding);
1099 }
1100 }
1101 fragP->fr_var = count;
1102 }
1103
1104 static INLINE int
1105 operand_type_all_zero (const union i386_operand_type *x)
1106 {
1107 switch (ARRAY_SIZE(x->array))
1108 {
1109 case 3:
1110 if (x->array[2])
1111 return 0;
1112 case 2:
1113 if (x->array[1])
1114 return 0;
1115 case 1:
1116 return !x->array[0];
1117 default:
1118 abort ();
1119 }
1120 }
1121
1122 static INLINE void
1123 operand_type_set (union i386_operand_type *x, unsigned int v)
1124 {
1125 switch (ARRAY_SIZE(x->array))
1126 {
1127 case 3:
1128 x->array[2] = v;
1129 case 2:
1130 x->array[1] = v;
1131 case 1:
1132 x->array[0] = v;
1133 break;
1134 default:
1135 abort ();
1136 }
1137 }
1138
1139 static INLINE int
1140 operand_type_equal (const union i386_operand_type *x,
1141 const union i386_operand_type *y)
1142 {
1143 switch (ARRAY_SIZE(x->array))
1144 {
1145 case 3:
1146 if (x->array[2] != y->array[2])
1147 return 0;
1148 case 2:
1149 if (x->array[1] != y->array[1])
1150 return 0;
1151 case 1:
1152 return x->array[0] == y->array[0];
1153 break;
1154 default:
1155 abort ();
1156 }
1157 }
1158
1159 static INLINE int
1160 cpu_flags_all_zero (const union i386_cpu_flags *x)
1161 {
1162 switch (ARRAY_SIZE(x->array))
1163 {
1164 case 3:
1165 if (x->array[2])
1166 return 0;
1167 case 2:
1168 if (x->array[1])
1169 return 0;
1170 case 1:
1171 return !x->array[0];
1172 default:
1173 abort ();
1174 }
1175 }
1176
1177 static INLINE void
1178 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1179 {
1180 switch (ARRAY_SIZE(x->array))
1181 {
1182 case 3:
1183 x->array[2] = v;
1184 case 2:
1185 x->array[1] = v;
1186 case 1:
1187 x->array[0] = v;
1188 break;
1189 default:
1190 abort ();
1191 }
1192 }
1193
1194 static INLINE int
1195 cpu_flags_equal (const union i386_cpu_flags *x,
1196 const union i386_cpu_flags *y)
1197 {
1198 switch (ARRAY_SIZE(x->array))
1199 {
1200 case 3:
1201 if (x->array[2] != y->array[2])
1202 return 0;
1203 case 2:
1204 if (x->array[1] != y->array[1])
1205 return 0;
1206 case 1:
1207 return x->array[0] == y->array[0];
1208 break;
1209 default:
1210 abort ();
1211 }
1212 }
1213
1214 static INLINE int
1215 cpu_flags_check_cpu64 (i386_cpu_flags f)
1216 {
1217 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1218 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1219 }
1220
1221 static INLINE i386_cpu_flags
1222 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1223 {
1224 switch (ARRAY_SIZE (x.array))
1225 {
1226 case 3:
1227 x.array [2] &= y.array [2];
1228 case 2:
1229 x.array [1] &= y.array [1];
1230 case 1:
1231 x.array [0] &= y.array [0];
1232 break;
1233 default:
1234 abort ();
1235 }
1236 return x;
1237 }
1238
1239 static INLINE i386_cpu_flags
1240 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1241 {
1242 switch (ARRAY_SIZE (x.array))
1243 {
1244 case 3:
1245 x.array [2] |= y.array [2];
1246 case 2:
1247 x.array [1] |= y.array [1];
1248 case 1:
1249 x.array [0] |= y.array [0];
1250 break;
1251 default:
1252 abort ();
1253 }
1254 return x;
1255 }
1256
1257 static INLINE i386_cpu_flags
1258 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1259 {
1260 switch (ARRAY_SIZE (x.array))
1261 {
1262 case 3:
1263 x.array [2] &= ~y.array [2];
1264 case 2:
1265 x.array [1] &= ~y.array [1];
1266 case 1:
1267 x.array [0] &= ~y.array [0];
1268 break;
1269 default:
1270 abort ();
1271 }
1272 return x;
1273 }
1274
1275 #define CPU_FLAGS_ARCH_MATCH 0x1
1276 #define CPU_FLAGS_64BIT_MATCH 0x2
1277 #define CPU_FLAGS_AES_MATCH 0x4
1278 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1279 #define CPU_FLAGS_AVX_MATCH 0x10
1280
1281 #define CPU_FLAGS_32BIT_MATCH \
1282 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1283 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1284 #define CPU_FLAGS_PERFECT_MATCH \
1285 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1286
1287 /* Return CPU flags match bits. */
1288
1289 static int
1290 cpu_flags_match (const insn_template *t)
1291 {
1292 i386_cpu_flags x = t->cpu_flags;
1293 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1294
1295 x.bitfield.cpu64 = 0;
1296 x.bitfield.cpuno64 = 0;
1297
1298 if (cpu_flags_all_zero (&x))
1299 {
1300 /* This instruction is available on all archs. */
1301 match |= CPU_FLAGS_32BIT_MATCH;
1302 }
1303 else
1304 {
1305 /* This instruction is available only on some archs. */
1306 i386_cpu_flags cpu = cpu_arch_flags;
1307
1308 cpu.bitfield.cpu64 = 0;
1309 cpu.bitfield.cpuno64 = 0;
1310 cpu = cpu_flags_and (x, cpu);
1311 if (!cpu_flags_all_zero (&cpu))
1312 {
1313 if (x.bitfield.cpuavx)
1314 {
1315 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1316 if (cpu.bitfield.cpuavx)
1317 {
1318 /* Check SSE2AVX. */
1319 if (!t->opcode_modifier.sse2avx|| sse2avx)
1320 {
1321 match |= (CPU_FLAGS_ARCH_MATCH
1322 | CPU_FLAGS_AVX_MATCH);
1323 /* Check AES. */
1324 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1325 match |= CPU_FLAGS_AES_MATCH;
1326 /* Check PCLMUL. */
1327 if (!x.bitfield.cpupclmul
1328 || cpu.bitfield.cpupclmul)
1329 match |= CPU_FLAGS_PCLMUL_MATCH;
1330 }
1331 }
1332 else
1333 match |= CPU_FLAGS_ARCH_MATCH;
1334 }
1335 else
1336 match |= CPU_FLAGS_32BIT_MATCH;
1337 }
1338 }
1339 return match;
1340 }
1341
1342 static INLINE i386_operand_type
1343 operand_type_and (i386_operand_type x, i386_operand_type y)
1344 {
1345 switch (ARRAY_SIZE (x.array))
1346 {
1347 case 3:
1348 x.array [2] &= y.array [2];
1349 case 2:
1350 x.array [1] &= y.array [1];
1351 case 1:
1352 x.array [0] &= y.array [0];
1353 break;
1354 default:
1355 abort ();
1356 }
1357 return x;
1358 }
1359
1360 static INLINE i386_operand_type
1361 operand_type_or (i386_operand_type x, i386_operand_type y)
1362 {
1363 switch (ARRAY_SIZE (x.array))
1364 {
1365 case 3:
1366 x.array [2] |= y.array [2];
1367 case 2:
1368 x.array [1] |= y.array [1];
1369 case 1:
1370 x.array [0] |= y.array [0];
1371 break;
1372 default:
1373 abort ();
1374 }
1375 return x;
1376 }
1377
1378 static INLINE i386_operand_type
1379 operand_type_xor (i386_operand_type x, i386_operand_type y)
1380 {
1381 switch (ARRAY_SIZE (x.array))
1382 {
1383 case 3:
1384 x.array [2] ^= y.array [2];
1385 case 2:
1386 x.array [1] ^= y.array [1];
1387 case 1:
1388 x.array [0] ^= y.array [0];
1389 break;
1390 default:
1391 abort ();
1392 }
1393 return x;
1394 }
1395
1396 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1397 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1398 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1399 static const i386_operand_type inoutportreg
1400 = OPERAND_TYPE_INOUTPORTREG;
1401 static const i386_operand_type reg16_inoutportreg
1402 = OPERAND_TYPE_REG16_INOUTPORTREG;
1403 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1404 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1405 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1406 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1407 static const i386_operand_type anydisp
1408 = OPERAND_TYPE_ANYDISP;
1409 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1410 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1411 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1412 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1413 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1414 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1415 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1416 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1417 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1418 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1419 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1420
1421 enum operand_type
1422 {
1423 reg,
1424 imm,
1425 disp,
1426 anymem
1427 };
1428
1429 static INLINE int
1430 operand_type_check (i386_operand_type t, enum operand_type c)
1431 {
1432 switch (c)
1433 {
1434 case reg:
1435 return (t.bitfield.reg8
1436 || t.bitfield.reg16
1437 || t.bitfield.reg32
1438 || t.bitfield.reg64);
1439
1440 case imm:
1441 return (t.bitfield.imm8
1442 || t.bitfield.imm8s
1443 || t.bitfield.imm16
1444 || t.bitfield.imm32
1445 || t.bitfield.imm32s
1446 || t.bitfield.imm64);
1447
1448 case disp:
1449 return (t.bitfield.disp8
1450 || t.bitfield.disp16
1451 || t.bitfield.disp32
1452 || t.bitfield.disp32s
1453 || t.bitfield.disp64);
1454
1455 case anymem:
1456 return (t.bitfield.disp8
1457 || t.bitfield.disp16
1458 || t.bitfield.disp32
1459 || t.bitfield.disp32s
1460 || t.bitfield.disp64
1461 || t.bitfield.baseindex);
1462
1463 default:
1464 abort ();
1465 }
1466
1467 return 0;
1468 }
1469
1470 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1471 operand J for instruction template T. */
1472
1473 static INLINE int
1474 match_reg_size (const insn_template *t, unsigned int j)
1475 {
1476 return !((i.types[j].bitfield.byte
1477 && !t->operand_types[j].bitfield.byte)
1478 || (i.types[j].bitfield.word
1479 && !t->operand_types[j].bitfield.word)
1480 || (i.types[j].bitfield.dword
1481 && !t->operand_types[j].bitfield.dword)
1482 || (i.types[j].bitfield.qword
1483 && !t->operand_types[j].bitfield.qword));
1484 }
1485
1486 /* Return 1 if there is no conflict in any size on operand J for
1487 instruction template T. */
1488
1489 static INLINE int
1490 match_mem_size (const insn_template *t, unsigned int j)
1491 {
1492 return (match_reg_size (t, j)
1493 && !((i.types[j].bitfield.unspecified
1494 && !t->operand_types[j].bitfield.unspecified)
1495 || (i.types[j].bitfield.fword
1496 && !t->operand_types[j].bitfield.fword)
1497 || (i.types[j].bitfield.tbyte
1498 && !t->operand_types[j].bitfield.tbyte)
1499 || (i.types[j].bitfield.xmmword
1500 && !t->operand_types[j].bitfield.xmmword)
1501 || (i.types[j].bitfield.ymmword
1502 && !t->operand_types[j].bitfield.ymmword)));
1503 }
1504
1505 /* Return 1 if there is no size conflict on any operands for
1506 instruction template T. */
1507
1508 static INLINE int
1509 operand_size_match (const insn_template *t)
1510 {
1511 unsigned int j;
1512 int match = 1;
1513
1514 /* Don't check jump instructions. */
1515 if (t->opcode_modifier.jump
1516 || t->opcode_modifier.jumpbyte
1517 || t->opcode_modifier.jumpdword
1518 || t->opcode_modifier.jumpintersegment)
1519 return match;
1520
1521 /* Check memory and accumulator operand size. */
1522 for (j = 0; j < i.operands; j++)
1523 {
1524 if (t->operand_types[j].bitfield.anysize)
1525 continue;
1526
1527 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1528 {
1529 match = 0;
1530 break;
1531 }
1532
1533 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1534 {
1535 match = 0;
1536 break;
1537 }
1538 }
1539
1540 if (match
1541 || (!t->opcode_modifier.d && !t->opcode_modifier.floatd))
1542 return match;
1543
1544 /* Check reverse. */
1545 gas_assert (i.operands == 2);
1546
1547 match = 1;
1548 for (j = 0; j < 2; j++)
1549 {
1550 if (t->operand_types[j].bitfield.acc
1551 && !match_reg_size (t, j ? 0 : 1))
1552 {
1553 match = 0;
1554 break;
1555 }
1556
1557 if (i.types[j].bitfield.mem
1558 && !match_mem_size (t, j ? 0 : 1))
1559 {
1560 match = 0;
1561 break;
1562 }
1563 }
1564
1565 return match;
1566 }
1567
1568 static INLINE int
1569 operand_type_match (i386_operand_type overlap,
1570 i386_operand_type given)
1571 {
1572 i386_operand_type temp = overlap;
1573
1574 temp.bitfield.jumpabsolute = 0;
1575 temp.bitfield.unspecified = 0;
1576 temp.bitfield.byte = 0;
1577 temp.bitfield.word = 0;
1578 temp.bitfield.dword = 0;
1579 temp.bitfield.fword = 0;
1580 temp.bitfield.qword = 0;
1581 temp.bitfield.tbyte = 0;
1582 temp.bitfield.xmmword = 0;
1583 temp.bitfield.ymmword = 0;
1584 if (operand_type_all_zero (&temp))
1585 return 0;
1586
1587 return (given.bitfield.baseindex == overlap.bitfield.baseindex
1588 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute);
1589 }
1590
1591 /* If given types g0 and g1 are registers they must be of the same type
1592 unless the expected operand type register overlap is null.
1593 Note that Acc in a template matches every size of reg. */
1594
1595 static INLINE int
1596 operand_type_register_match (i386_operand_type m0,
1597 i386_operand_type g0,
1598 i386_operand_type t0,
1599 i386_operand_type m1,
1600 i386_operand_type g1,
1601 i386_operand_type t1)
1602 {
1603 if (!operand_type_check (g0, reg))
1604 return 1;
1605
1606 if (!operand_type_check (g1, reg))
1607 return 1;
1608
1609 if (g0.bitfield.reg8 == g1.bitfield.reg8
1610 && g0.bitfield.reg16 == g1.bitfield.reg16
1611 && g0.bitfield.reg32 == g1.bitfield.reg32
1612 && g0.bitfield.reg64 == g1.bitfield.reg64)
1613 return 1;
1614
1615 if (m0.bitfield.acc)
1616 {
1617 t0.bitfield.reg8 = 1;
1618 t0.bitfield.reg16 = 1;
1619 t0.bitfield.reg32 = 1;
1620 t0.bitfield.reg64 = 1;
1621 }
1622
1623 if (m1.bitfield.acc)
1624 {
1625 t1.bitfield.reg8 = 1;
1626 t1.bitfield.reg16 = 1;
1627 t1.bitfield.reg32 = 1;
1628 t1.bitfield.reg64 = 1;
1629 }
1630
1631 return (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1632 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1633 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1634 && !(t0.bitfield.reg64 & t1.bitfield.reg64));
1635 }
1636
1637 static INLINE unsigned int
1638 mode_from_disp_size (i386_operand_type t)
1639 {
1640 if (t.bitfield.disp8)
1641 return 1;
1642 else if (t.bitfield.disp16
1643 || t.bitfield.disp32
1644 || t.bitfield.disp32s)
1645 return 2;
1646 else
1647 return 0;
1648 }
1649
1650 static INLINE int
1651 fits_in_signed_byte (offsetT num)
1652 {
1653 return (num >= -128) && (num <= 127);
1654 }
1655
1656 static INLINE int
1657 fits_in_unsigned_byte (offsetT num)
1658 {
1659 return (num & 0xff) == num;
1660 }
1661
1662 static INLINE int
1663 fits_in_unsigned_word (offsetT num)
1664 {
1665 return (num & 0xffff) == num;
1666 }
1667
1668 static INLINE int
1669 fits_in_signed_word (offsetT num)
1670 {
1671 return (-32768 <= num) && (num <= 32767);
1672 }
1673
1674 static INLINE int
1675 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1676 {
1677 #ifndef BFD64
1678 return 1;
1679 #else
1680 return (!(((offsetT) -1 << 31) & num)
1681 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1682 #endif
1683 } /* fits_in_signed_long() */
1684
1685 static INLINE int
1686 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1687 {
1688 #ifndef BFD64
1689 return 1;
1690 #else
1691 return (num & (((offsetT) 2 << 31) - 1)) == num;
1692 #endif
1693 } /* fits_in_unsigned_long() */
1694
1695 static i386_operand_type
1696 smallest_imm_type (offsetT num)
1697 {
1698 i386_operand_type t;
1699
1700 operand_type_set (&t, 0);
1701 t.bitfield.imm64 = 1;
1702
1703 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1704 {
1705 /* This code is disabled on the 486 because all the Imm1 forms
1706 in the opcode table are slower on the i486. They're the
1707 versions with the implicitly specified single-position
1708 displacement, which has another syntax if you really want to
1709 use that form. */
1710 t.bitfield.imm1 = 1;
1711 t.bitfield.imm8 = 1;
1712 t.bitfield.imm8s = 1;
1713 t.bitfield.imm16 = 1;
1714 t.bitfield.imm32 = 1;
1715 t.bitfield.imm32s = 1;
1716 }
1717 else if (fits_in_signed_byte (num))
1718 {
1719 t.bitfield.imm8 = 1;
1720 t.bitfield.imm8s = 1;
1721 t.bitfield.imm16 = 1;
1722 t.bitfield.imm32 = 1;
1723 t.bitfield.imm32s = 1;
1724 }
1725 else if (fits_in_unsigned_byte (num))
1726 {
1727 t.bitfield.imm8 = 1;
1728 t.bitfield.imm16 = 1;
1729 t.bitfield.imm32 = 1;
1730 t.bitfield.imm32s = 1;
1731 }
1732 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1733 {
1734 t.bitfield.imm16 = 1;
1735 t.bitfield.imm32 = 1;
1736 t.bitfield.imm32s = 1;
1737 }
1738 else if (fits_in_signed_long (num))
1739 {
1740 t.bitfield.imm32 = 1;
1741 t.bitfield.imm32s = 1;
1742 }
1743 else if (fits_in_unsigned_long (num))
1744 t.bitfield.imm32 = 1;
1745
1746 return t;
1747 }
1748
1749 static offsetT
1750 offset_in_range (offsetT val, int size)
1751 {
1752 addressT mask;
1753
1754 switch (size)
1755 {
1756 case 1: mask = ((addressT) 1 << 8) - 1; break;
1757 case 2: mask = ((addressT) 1 << 16) - 1; break;
1758 case 4: mask = ((addressT) 2 << 31) - 1; break;
1759 #ifdef BFD64
1760 case 8: mask = ((addressT) 2 << 63) - 1; break;
1761 #endif
1762 default: abort ();
1763 }
1764
1765 #ifdef BFD64
1766 /* If BFD64, sign extend val for 32bit address mode. */
1767 if (flag_code != CODE_64BIT
1768 || i.prefix[ADDR_PREFIX])
1769 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1770 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1771 #endif
1772
1773 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1774 {
1775 char buf1[40], buf2[40];
1776
1777 sprint_value (buf1, val);
1778 sprint_value (buf2, val & mask);
1779 as_warn (_("%s shortened to %s"), buf1, buf2);
1780 }
1781 return val & mask;
1782 }
1783
1784 /* Returns 0 if attempting to add a prefix where one from the same
1785 class already exists, 1 if non rep/repne added, 2 if rep/repne
1786 added. */
1787 static int
1788 add_prefix (unsigned int prefix)
1789 {
1790 int ret = 1;
1791 unsigned int q;
1792
1793 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1794 && flag_code == CODE_64BIT)
1795 {
1796 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1797 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1798 && (prefix & (REX_R | REX_X | REX_B))))
1799 ret = 0;
1800 q = REX_PREFIX;
1801 }
1802 else
1803 {
1804 switch (prefix)
1805 {
1806 default:
1807 abort ();
1808
1809 case CS_PREFIX_OPCODE:
1810 case DS_PREFIX_OPCODE:
1811 case ES_PREFIX_OPCODE:
1812 case FS_PREFIX_OPCODE:
1813 case GS_PREFIX_OPCODE:
1814 case SS_PREFIX_OPCODE:
1815 q = SEG_PREFIX;
1816 break;
1817
1818 case REPNE_PREFIX_OPCODE:
1819 case REPE_PREFIX_OPCODE:
1820 ret = 2;
1821 /* fall thru */
1822 case LOCK_PREFIX_OPCODE:
1823 q = LOCKREP_PREFIX;
1824 break;
1825
1826 case FWAIT_OPCODE:
1827 q = WAIT_PREFIX;
1828 break;
1829
1830 case ADDR_PREFIX_OPCODE:
1831 q = ADDR_PREFIX;
1832 break;
1833
1834 case DATA_PREFIX_OPCODE:
1835 q = DATA_PREFIX;
1836 break;
1837 }
1838 if (i.prefix[q] != 0)
1839 ret = 0;
1840 }
1841
1842 if (ret)
1843 {
1844 if (!i.prefix[q])
1845 ++i.prefixes;
1846 i.prefix[q] |= prefix;
1847 }
1848 else
1849 as_bad (_("same type of prefix used twice"));
1850
1851 return ret;
1852 }
1853
1854 static void
1855 set_code_flag (int value)
1856 {
1857 flag_code = (enum flag_code) value;
1858 if (flag_code == CODE_64BIT)
1859 {
1860 cpu_arch_flags.bitfield.cpu64 = 1;
1861 cpu_arch_flags.bitfield.cpuno64 = 0;
1862 }
1863 else
1864 {
1865 cpu_arch_flags.bitfield.cpu64 = 0;
1866 cpu_arch_flags.bitfield.cpuno64 = 1;
1867 }
1868 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1869 {
1870 as_bad (_("64bit mode not supported on this CPU."));
1871 }
1872 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1873 {
1874 as_bad (_("32bit mode not supported on this CPU."));
1875 }
1876 stackop_size = '\0';
1877 }
1878
1879 static void
1880 set_16bit_gcc_code_flag (int new_code_flag)
1881 {
1882 flag_code = (enum flag_code) new_code_flag;
1883 if (flag_code != CODE_16BIT)
1884 abort ();
1885 cpu_arch_flags.bitfield.cpu64 = 0;
1886 cpu_arch_flags.bitfield.cpuno64 = 1;
1887 stackop_size = LONG_MNEM_SUFFIX;
1888 }
1889
1890 static void
1891 set_intel_syntax (int syntax_flag)
1892 {
1893 /* Find out if register prefixing is specified. */
1894 int ask_naked_reg = 0;
1895
1896 SKIP_WHITESPACE ();
1897 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1898 {
1899 char *string = input_line_pointer;
1900 int e = get_symbol_end ();
1901
1902 if (strcmp (string, "prefix") == 0)
1903 ask_naked_reg = 1;
1904 else if (strcmp (string, "noprefix") == 0)
1905 ask_naked_reg = -1;
1906 else
1907 as_bad (_("bad argument to syntax directive."));
1908 *input_line_pointer = e;
1909 }
1910 demand_empty_rest_of_line ();
1911
1912 intel_syntax = syntax_flag;
1913
1914 if (ask_naked_reg == 0)
1915 allow_naked_reg = (intel_syntax
1916 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
1917 else
1918 allow_naked_reg = (ask_naked_reg < 0);
1919
1920 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
1921
1922 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
1923 identifier_chars['$'] = intel_syntax ? '$' : 0;
1924 register_prefix = allow_naked_reg ? "" : "%";
1925 }
1926
1927 static void
1928 set_intel_mnemonic (int mnemonic_flag)
1929 {
1930 intel_mnemonic = mnemonic_flag;
1931 }
1932
1933 static void
1934 set_allow_index_reg (int flag)
1935 {
1936 allow_index_reg = flag;
1937 }
1938
1939 static void
1940 set_sse_check (int dummy ATTRIBUTE_UNUSED)
1941 {
1942 SKIP_WHITESPACE ();
1943
1944 if (!is_end_of_line[(unsigned char) *input_line_pointer])
1945 {
1946 char *string = input_line_pointer;
1947 int e = get_symbol_end ();
1948
1949 if (strcmp (string, "none") == 0)
1950 sse_check = sse_check_none;
1951 else if (strcmp (string, "warning") == 0)
1952 sse_check = sse_check_warning;
1953 else if (strcmp (string, "error") == 0)
1954 sse_check = sse_check_error;
1955 else
1956 as_bad (_("bad argument to sse_check directive."));
1957 *input_line_pointer = e;
1958 }
1959 else
1960 as_bad (_("missing argument for sse_check directive"));
1961
1962 demand_empty_rest_of_line ();
1963 }
1964
1965 static void
1966 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
1967 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
1968 {
1969 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1970 static const char *arch;
1971
1972 /* Intel LIOM is only supported on ELF. */
1973 if (!IS_ELF)
1974 return;
1975
1976 if (!arch)
1977 {
1978 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
1979 use default_arch. */
1980 arch = cpu_arch_name;
1981 if (!arch)
1982 arch = default_arch;
1983 }
1984
1985 /* If we are targeting Intel L1OM, we must enable it. */
1986 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
1987 || new_flag.bitfield.cpul1om)
1988 return;
1989
1990 as_bad (_("`%s' is not supported on `%s'"), name, arch);
1991 #endif
1992 }
1993
1994 static void
1995 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
1996 {
1997 SKIP_WHITESPACE ();
1998
1999 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2000 {
2001 char *string = input_line_pointer;
2002 int e = get_symbol_end ();
2003 unsigned int i;
2004 i386_cpu_flags flags;
2005
2006 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
2007 {
2008 if (strcmp (string, cpu_arch[i].name) == 0)
2009 {
2010 check_cpu_arch_compatible (string, cpu_arch[i].flags);
2011
2012 if (*string != '.')
2013 {
2014 cpu_arch_name = cpu_arch[i].name;
2015 cpu_sub_arch_name = NULL;
2016 cpu_arch_flags = cpu_arch[i].flags;
2017 if (flag_code == CODE_64BIT)
2018 {
2019 cpu_arch_flags.bitfield.cpu64 = 1;
2020 cpu_arch_flags.bitfield.cpuno64 = 0;
2021 }
2022 else
2023 {
2024 cpu_arch_flags.bitfield.cpu64 = 0;
2025 cpu_arch_flags.bitfield.cpuno64 = 1;
2026 }
2027 cpu_arch_isa = cpu_arch[i].type;
2028 cpu_arch_isa_flags = cpu_arch[i].flags;
2029 if (!cpu_arch_tune_set)
2030 {
2031 cpu_arch_tune = cpu_arch_isa;
2032 cpu_arch_tune_flags = cpu_arch_isa_flags;
2033 }
2034 break;
2035 }
2036
2037 if (strncmp (string + 1, "no", 2))
2038 flags = cpu_flags_or (cpu_arch_flags,
2039 cpu_arch[i].flags);
2040 else
2041 flags = cpu_flags_and_not (cpu_arch_flags,
2042 cpu_arch[i].flags);
2043 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2044 {
2045 if (cpu_sub_arch_name)
2046 {
2047 char *name = cpu_sub_arch_name;
2048 cpu_sub_arch_name = concat (name,
2049 cpu_arch[i].name,
2050 (const char *) NULL);
2051 free (name);
2052 }
2053 else
2054 cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
2055 cpu_arch_flags = flags;
2056 }
2057 *input_line_pointer = e;
2058 demand_empty_rest_of_line ();
2059 return;
2060 }
2061 }
2062 if (i >= ARRAY_SIZE (cpu_arch))
2063 as_bad (_("no such architecture: `%s'"), string);
2064
2065 *input_line_pointer = e;
2066 }
2067 else
2068 as_bad (_("missing cpu architecture"));
2069
2070 no_cond_jump_promotion = 0;
2071 if (*input_line_pointer == ','
2072 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2073 {
2074 char *string = ++input_line_pointer;
2075 int e = get_symbol_end ();
2076
2077 if (strcmp (string, "nojumps") == 0)
2078 no_cond_jump_promotion = 1;
2079 else if (strcmp (string, "jumps") == 0)
2080 ;
2081 else
2082 as_bad (_("no such architecture modifier: `%s'"), string);
2083
2084 *input_line_pointer = e;
2085 }
2086
2087 demand_empty_rest_of_line ();
2088 }
2089
2090 enum bfd_architecture
2091 i386_arch (void)
2092 {
2093 if (cpu_arch_isa == PROCESSOR_L1OM)
2094 {
2095 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2096 || flag_code != CODE_64BIT)
2097 as_fatal (_("Intel L1OM is 64bit ELF only"));
2098 return bfd_arch_l1om;
2099 }
2100 else
2101 return bfd_arch_i386;
2102 }
2103
2104 unsigned long
2105 i386_mach ()
2106 {
2107 if (!strcmp (default_arch, "x86_64"))
2108 {
2109 if (cpu_arch_isa == PROCESSOR_L1OM)
2110 {
2111 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2112 as_fatal (_("Intel L1OM is 64bit ELF only"));
2113 return bfd_mach_l1om;
2114 }
2115 else
2116 return bfd_mach_x86_64;
2117 }
2118 else if (!strcmp (default_arch, "i386"))
2119 return bfd_mach_i386_i386;
2120 else
2121 as_fatal (_("Unknown architecture"));
2122 }
2123 \f
2124 void
2125 md_begin ()
2126 {
2127 const char *hash_err;
2128
2129 /* Initialize op_hash hash table. */
2130 op_hash = hash_new ();
2131
2132 {
2133 const insn_template *optab;
2134 templates *core_optab;
2135
2136 /* Setup for loop. */
2137 optab = i386_optab;
2138 core_optab = (templates *) xmalloc (sizeof (templates));
2139 core_optab->start = optab;
2140
2141 while (1)
2142 {
2143 ++optab;
2144 if (optab->name == NULL
2145 || strcmp (optab->name, (optab - 1)->name) != 0)
2146 {
2147 /* different name --> ship out current template list;
2148 add to hash table; & begin anew. */
2149 core_optab->end = optab;
2150 hash_err = hash_insert (op_hash,
2151 (optab - 1)->name,
2152 (void *) core_optab);
2153 if (hash_err)
2154 {
2155 as_fatal (_("Internal Error: Can't hash %s: %s"),
2156 (optab - 1)->name,
2157 hash_err);
2158 }
2159 if (optab->name == NULL)
2160 break;
2161 core_optab = (templates *) xmalloc (sizeof (templates));
2162 core_optab->start = optab;
2163 }
2164 }
2165 }
2166
2167 /* Initialize reg_hash hash table. */
2168 reg_hash = hash_new ();
2169 {
2170 const reg_entry *regtab;
2171 unsigned int regtab_size = i386_regtab_size;
2172
2173 for (regtab = i386_regtab; regtab_size--; regtab++)
2174 {
2175 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2176 if (hash_err)
2177 as_fatal (_("Internal Error: Can't hash %s: %s"),
2178 regtab->reg_name,
2179 hash_err);
2180 }
2181 }
2182
2183 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2184 {
2185 int c;
2186 char *p;
2187
2188 for (c = 0; c < 256; c++)
2189 {
2190 if (ISDIGIT (c))
2191 {
2192 digit_chars[c] = c;
2193 mnemonic_chars[c] = c;
2194 register_chars[c] = c;
2195 operand_chars[c] = c;
2196 }
2197 else if (ISLOWER (c))
2198 {
2199 mnemonic_chars[c] = c;
2200 register_chars[c] = c;
2201 operand_chars[c] = c;
2202 }
2203 else if (ISUPPER (c))
2204 {
2205 mnemonic_chars[c] = TOLOWER (c);
2206 register_chars[c] = mnemonic_chars[c];
2207 operand_chars[c] = c;
2208 }
2209
2210 if (ISALPHA (c) || ISDIGIT (c))
2211 identifier_chars[c] = c;
2212 else if (c >= 128)
2213 {
2214 identifier_chars[c] = c;
2215 operand_chars[c] = c;
2216 }
2217 }
2218
2219 #ifdef LEX_AT
2220 identifier_chars['@'] = '@';
2221 #endif
2222 #ifdef LEX_QM
2223 identifier_chars['?'] = '?';
2224 operand_chars['?'] = '?';
2225 #endif
2226 digit_chars['-'] = '-';
2227 mnemonic_chars['_'] = '_';
2228 mnemonic_chars['-'] = '-';
2229 mnemonic_chars['.'] = '.';
2230 identifier_chars['_'] = '_';
2231 identifier_chars['.'] = '.';
2232
2233 for (p = operand_special_chars; *p != '\0'; p++)
2234 operand_chars[(unsigned char) *p] = *p;
2235 }
2236
2237 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2238 if (IS_ELF)
2239 {
2240 record_alignment (text_section, 2);
2241 record_alignment (data_section, 2);
2242 record_alignment (bss_section, 2);
2243 }
2244 #endif
2245
2246 if (flag_code == CODE_64BIT)
2247 {
2248 x86_dwarf2_return_column = 16;
2249 x86_cie_data_alignment = -8;
2250 }
2251 else
2252 {
2253 x86_dwarf2_return_column = 8;
2254 x86_cie_data_alignment = -4;
2255 }
2256 }
2257
2258 void
2259 i386_print_statistics (FILE *file)
2260 {
2261 hash_print_statistics (file, "i386 opcode", op_hash);
2262 hash_print_statistics (file, "i386 register", reg_hash);
2263 }
2264 \f
2265 #ifdef DEBUG386
2266
2267 /* Debugging routines for md_assemble. */
2268 static void pte (insn_template *);
2269 static void pt (i386_operand_type);
2270 static void pe (expressionS *);
2271 static void ps (symbolS *);
2272
2273 static void
2274 pi (char *line, i386_insn *x)
2275 {
2276 unsigned int i;
2277
2278 fprintf (stdout, "%s: template ", line);
2279 pte (&x->tm);
2280 fprintf (stdout, " address: base %s index %s scale %x\n",
2281 x->base_reg ? x->base_reg->reg_name : "none",
2282 x->index_reg ? x->index_reg->reg_name : "none",
2283 x->log2_scale_factor);
2284 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2285 x->rm.mode, x->rm.reg, x->rm.regmem);
2286 fprintf (stdout, " sib: base %x index %x scale %x\n",
2287 x->sib.base, x->sib.index, x->sib.scale);
2288 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2289 (x->rex & REX_W) != 0,
2290 (x->rex & REX_R) != 0,
2291 (x->rex & REX_X) != 0,
2292 (x->rex & REX_B) != 0);
2293 for (i = 0; i < x->operands; i++)
2294 {
2295 fprintf (stdout, " #%d: ", i + 1);
2296 pt (x->types[i]);
2297 fprintf (stdout, "\n");
2298 if (x->types[i].bitfield.reg8
2299 || x->types[i].bitfield.reg16
2300 || x->types[i].bitfield.reg32
2301 || x->types[i].bitfield.reg64
2302 || x->types[i].bitfield.regmmx
2303 || x->types[i].bitfield.regxmm
2304 || x->types[i].bitfield.regymm
2305 || x->types[i].bitfield.sreg2
2306 || x->types[i].bitfield.sreg3
2307 || x->types[i].bitfield.control
2308 || x->types[i].bitfield.debug
2309 || x->types[i].bitfield.test)
2310 fprintf (stdout, "%s\n", x->op[i].regs->reg_name);
2311 if (operand_type_check (x->types[i], imm))
2312 pe (x->op[i].imms);
2313 if (operand_type_check (x->types[i], disp))
2314 pe (x->op[i].disps);
2315 }
2316 }
2317
2318 static void
2319 pte (insn_template *t)
2320 {
2321 unsigned int i;
2322 fprintf (stdout, " %d operands ", t->operands);
2323 fprintf (stdout, "opcode %x ", t->base_opcode);
2324 if (t->extension_opcode != None)
2325 fprintf (stdout, "ext %x ", t->extension_opcode);
2326 if (t->opcode_modifier.d)
2327 fprintf (stdout, "D");
2328 if (t->opcode_modifier.w)
2329 fprintf (stdout, "W");
2330 fprintf (stdout, "\n");
2331 for (i = 0; i < t->operands; i++)
2332 {
2333 fprintf (stdout, " #%d type ", i + 1);
2334 pt (t->operand_types[i]);
2335 fprintf (stdout, "\n");
2336 }
2337 }
2338
2339 static void
2340 pe (expressionS *e)
2341 {
2342 fprintf (stdout, " operation %d\n", e->X_op);
2343 fprintf (stdout, " add_number %ld (%lx)\n",
2344 (long) e->X_add_number, (long) e->X_add_number);
2345 if (e->X_add_symbol)
2346 {
2347 fprintf (stdout, " add_symbol ");
2348 ps (e->X_add_symbol);
2349 fprintf (stdout, "\n");
2350 }
2351 if (e->X_op_symbol)
2352 {
2353 fprintf (stdout, " op_symbol ");
2354 ps (e->X_op_symbol);
2355 fprintf (stdout, "\n");
2356 }
2357 }
2358
2359 static void
2360 ps (symbolS *s)
2361 {
2362 fprintf (stdout, "%s type %s%s",
2363 S_GET_NAME (s),
2364 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2365 segment_name (S_GET_SEGMENT (s)));
2366 }
2367
2368 static struct type_name
2369 {
2370 i386_operand_type mask;
2371 const char *name;
2372 }
2373 const type_names[] =
2374 {
2375 { OPERAND_TYPE_REG8, "r8" },
2376 { OPERAND_TYPE_REG16, "r16" },
2377 { OPERAND_TYPE_REG32, "r32" },
2378 { OPERAND_TYPE_REG64, "r64" },
2379 { OPERAND_TYPE_IMM8, "i8" },
2380 { OPERAND_TYPE_IMM8, "i8s" },
2381 { OPERAND_TYPE_IMM16, "i16" },
2382 { OPERAND_TYPE_IMM32, "i32" },
2383 { OPERAND_TYPE_IMM32S, "i32s" },
2384 { OPERAND_TYPE_IMM64, "i64" },
2385 { OPERAND_TYPE_IMM1, "i1" },
2386 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2387 { OPERAND_TYPE_DISP8, "d8" },
2388 { OPERAND_TYPE_DISP16, "d16" },
2389 { OPERAND_TYPE_DISP32, "d32" },
2390 { OPERAND_TYPE_DISP32S, "d32s" },
2391 { OPERAND_TYPE_DISP64, "d64" },
2392 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2393 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2394 { OPERAND_TYPE_CONTROL, "control reg" },
2395 { OPERAND_TYPE_TEST, "test reg" },
2396 { OPERAND_TYPE_DEBUG, "debug reg" },
2397 { OPERAND_TYPE_FLOATREG, "FReg" },
2398 { OPERAND_TYPE_FLOATACC, "FAcc" },
2399 { OPERAND_TYPE_SREG2, "SReg2" },
2400 { OPERAND_TYPE_SREG3, "SReg3" },
2401 { OPERAND_TYPE_ACC, "Acc" },
2402 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2403 { OPERAND_TYPE_REGMMX, "rMMX" },
2404 { OPERAND_TYPE_REGXMM, "rXMM" },
2405 { OPERAND_TYPE_REGYMM, "rYMM" },
2406 { OPERAND_TYPE_ESSEG, "es" },
2407 };
2408
2409 static void
2410 pt (i386_operand_type t)
2411 {
2412 unsigned int j;
2413 i386_operand_type a;
2414
2415 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2416 {
2417 a = operand_type_and (t, type_names[j].mask);
2418 if (!operand_type_all_zero (&a))
2419 fprintf (stdout, "%s, ", type_names[j].name);
2420 }
2421 fflush (stdout);
2422 }
2423
2424 #endif /* DEBUG386 */
2425 \f
2426 static bfd_reloc_code_real_type
2427 reloc (unsigned int size,
2428 int pcrel,
2429 int sign,
2430 bfd_reloc_code_real_type other)
2431 {
2432 if (other != NO_RELOC)
2433 {
2434 reloc_howto_type *reloc;
2435
2436 if (size == 8)
2437 switch (other)
2438 {
2439 case BFD_RELOC_X86_64_GOT32:
2440 return BFD_RELOC_X86_64_GOT64;
2441 break;
2442 case BFD_RELOC_X86_64_PLTOFF64:
2443 return BFD_RELOC_X86_64_PLTOFF64;
2444 break;
2445 case BFD_RELOC_X86_64_GOTPC32:
2446 other = BFD_RELOC_X86_64_GOTPC64;
2447 break;
2448 case BFD_RELOC_X86_64_GOTPCREL:
2449 other = BFD_RELOC_X86_64_GOTPCREL64;
2450 break;
2451 case BFD_RELOC_X86_64_TPOFF32:
2452 other = BFD_RELOC_X86_64_TPOFF64;
2453 break;
2454 case BFD_RELOC_X86_64_DTPOFF32:
2455 other = BFD_RELOC_X86_64_DTPOFF64;
2456 break;
2457 default:
2458 break;
2459 }
2460
2461 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2462 if (size == 4 && flag_code != CODE_64BIT)
2463 sign = -1;
2464
2465 reloc = bfd_reloc_type_lookup (stdoutput, other);
2466 if (!reloc)
2467 as_bad (_("unknown relocation (%u)"), other);
2468 else if (size != bfd_get_reloc_size (reloc))
2469 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2470 bfd_get_reloc_size (reloc),
2471 size);
2472 else if (pcrel && !reloc->pc_relative)
2473 as_bad (_("non-pc-relative relocation for pc-relative field"));
2474 else if ((reloc->complain_on_overflow == complain_overflow_signed
2475 && !sign)
2476 || (reloc->complain_on_overflow == complain_overflow_unsigned
2477 && sign > 0))
2478 as_bad (_("relocated field and relocation type differ in signedness"));
2479 else
2480 return other;
2481 return NO_RELOC;
2482 }
2483
2484 if (pcrel)
2485 {
2486 if (!sign)
2487 as_bad (_("there are no unsigned pc-relative relocations"));
2488 switch (size)
2489 {
2490 case 1: return BFD_RELOC_8_PCREL;
2491 case 2: return BFD_RELOC_16_PCREL;
2492 case 4: return BFD_RELOC_32_PCREL;
2493 case 8: return BFD_RELOC_64_PCREL;
2494 }
2495 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2496 }
2497 else
2498 {
2499 if (sign > 0)
2500 switch (size)
2501 {
2502 case 4: return BFD_RELOC_X86_64_32S;
2503 }
2504 else
2505 switch (size)
2506 {
2507 case 1: return BFD_RELOC_8;
2508 case 2: return BFD_RELOC_16;
2509 case 4: return BFD_RELOC_32;
2510 case 8: return BFD_RELOC_64;
2511 }
2512 as_bad (_("cannot do %s %u byte relocation"),
2513 sign > 0 ? "signed" : "unsigned", size);
2514 }
2515
2516 return NO_RELOC;
2517 }
2518
2519 /* Here we decide which fixups can be adjusted to make them relative to
2520 the beginning of the section instead of the symbol. Basically we need
2521 to make sure that the dynamic relocations are done correctly, so in
2522 some cases we force the original symbol to be used. */
2523
2524 int
2525 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2526 {
2527 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2528 if (!IS_ELF)
2529 return 1;
2530
2531 /* Don't adjust pc-relative references to merge sections in 64-bit
2532 mode. */
2533 if (use_rela_relocations
2534 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2535 && fixP->fx_pcrel)
2536 return 0;
2537
2538 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2539 and changed later by validate_fix. */
2540 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2541 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2542 return 0;
2543
2544 /* adjust_reloc_syms doesn't know about the GOT. */
2545 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2546 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2547 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2548 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2549 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2550 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2551 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2552 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2553 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2554 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2555 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2556 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2557 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2558 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2559 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2560 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2561 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2562 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2563 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2564 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2565 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2566 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2567 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2568 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2569 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2570 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2571 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2572 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2573 return 0;
2574 #endif
2575 return 1;
2576 }
2577
2578 static int
2579 intel_float_operand (const char *mnemonic)
2580 {
2581 /* Note that the value returned is meaningful only for opcodes with (memory)
2582 operands, hence the code here is free to improperly handle opcodes that
2583 have no operands (for better performance and smaller code). */
2584
2585 if (mnemonic[0] != 'f')
2586 return 0; /* non-math */
2587
2588 switch (mnemonic[1])
2589 {
2590 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2591 the fs segment override prefix not currently handled because no
2592 call path can make opcodes without operands get here */
2593 case 'i':
2594 return 2 /* integer op */;
2595 case 'l':
2596 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2597 return 3; /* fldcw/fldenv */
2598 break;
2599 case 'n':
2600 if (mnemonic[2] != 'o' /* fnop */)
2601 return 3; /* non-waiting control op */
2602 break;
2603 case 'r':
2604 if (mnemonic[2] == 's')
2605 return 3; /* frstor/frstpm */
2606 break;
2607 case 's':
2608 if (mnemonic[2] == 'a')
2609 return 3; /* fsave */
2610 if (mnemonic[2] == 't')
2611 {
2612 switch (mnemonic[3])
2613 {
2614 case 'c': /* fstcw */
2615 case 'd': /* fstdw */
2616 case 'e': /* fstenv */
2617 case 's': /* fsts[gw] */
2618 return 3;
2619 }
2620 }
2621 break;
2622 case 'x':
2623 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2624 return 0; /* fxsave/fxrstor are not really math ops */
2625 break;
2626 }
2627
2628 return 1;
2629 }
2630
2631 /* Build the VEX prefix. */
2632
2633 static void
2634 build_vex_prefix (const insn_template *t)
2635 {
2636 unsigned int register_specifier;
2637 unsigned int implied_prefix;
2638 unsigned int vector_length;
2639
2640 /* Check register specifier. */
2641 if (i.vex.register_specifier)
2642 {
2643 register_specifier = i.vex.register_specifier->reg_num;
2644 if ((i.vex.register_specifier->reg_flags & RegRex))
2645 register_specifier += 8;
2646 register_specifier = ~register_specifier & 0xf;
2647 }
2648 else
2649 register_specifier = 0xf;
2650
2651 /* Use 2-byte VEX prefix by swappping destination and source
2652 operand. */
2653 if (!i.swap_operand
2654 && i.operands == i.reg_operands
2655 && i.tm.opcode_modifier.vex0f
2656 && i.tm.opcode_modifier.s
2657 && i.rex == REX_B)
2658 {
2659 unsigned int xchg = i.operands - 1;
2660 union i386_op temp_op;
2661 i386_operand_type temp_type;
2662
2663 temp_type = i.types[xchg];
2664 i.types[xchg] = i.types[0];
2665 i.types[0] = temp_type;
2666 temp_op = i.op[xchg];
2667 i.op[xchg] = i.op[0];
2668 i.op[0] = temp_op;
2669
2670 gas_assert (i.rm.mode == 3);
2671
2672 i.rex = REX_R;
2673 xchg = i.rm.regmem;
2674 i.rm.regmem = i.rm.reg;
2675 i.rm.reg = xchg;
2676
2677 /* Use the next insn. */
2678 i.tm = t[1];
2679 }
2680
2681 vector_length = i.tm.opcode_modifier.vex == 2 ? 1 : 0;
2682
2683 switch ((i.tm.base_opcode >> 8) & 0xff)
2684 {
2685 case 0:
2686 implied_prefix = 0;
2687 break;
2688 case DATA_PREFIX_OPCODE:
2689 implied_prefix = 1;
2690 break;
2691 case REPE_PREFIX_OPCODE:
2692 implied_prefix = 2;
2693 break;
2694 case REPNE_PREFIX_OPCODE:
2695 implied_prefix = 3;
2696 break;
2697 default:
2698 abort ();
2699 }
2700
2701 /* Use 2-byte VEX prefix if possible. */
2702 if (i.tm.opcode_modifier.vex0f
2703 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2704 {
2705 /* 2-byte VEX prefix. */
2706 unsigned int r;
2707
2708 i.vex.length = 2;
2709 i.vex.bytes[0] = 0xc5;
2710
2711 /* Check the REX.R bit. */
2712 r = (i.rex & REX_R) ? 0 : 1;
2713 i.vex.bytes[1] = (r << 7
2714 | register_specifier << 3
2715 | vector_length << 2
2716 | implied_prefix);
2717 }
2718 else
2719 {
2720 /* 3-byte VEX prefix. */
2721 unsigned int m, w;
2722
2723 if (i.tm.opcode_modifier.vex0f)
2724 m = 0x1;
2725 else if (i.tm.opcode_modifier.vex0f38)
2726 m = 0x2;
2727 else if (i.tm.opcode_modifier.vex0f3a)
2728 m = 0x3;
2729 else
2730 abort ();
2731
2732 i.vex.length = 3;
2733 i.vex.bytes[0] = 0xc4;
2734
2735 /* The high 3 bits of the second VEX byte are 1's compliment
2736 of RXB bits from REX. */
2737 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2738
2739 /* Check the REX.W bit. */
2740 w = (i.rex & REX_W) ? 1 : 0;
2741 if (i.tm.opcode_modifier.vexw0 || i.tm.opcode_modifier.vexw1)
2742 {
2743 if (w)
2744 abort ();
2745
2746 if (i.tm.opcode_modifier.vexw1)
2747 w = 1;
2748 }
2749
2750 i.vex.bytes[2] = (w << 7
2751 | register_specifier << 3
2752 | vector_length << 2
2753 | implied_prefix);
2754 }
2755 }
2756
2757 static void
2758 process_immext (void)
2759 {
2760 expressionS *exp;
2761
2762 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2763 {
2764 /* SSE3 Instructions have the fixed operands with an opcode
2765 suffix which is coded in the same place as an 8-bit immediate
2766 field would be. Here we check those operands and remove them
2767 afterwards. */
2768 unsigned int x;
2769
2770 for (x = 0; x < i.operands; x++)
2771 if (i.op[x].regs->reg_num != x)
2772 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2773 register_prefix, i.op[x].regs->reg_name, x + 1,
2774 i.tm.name);
2775
2776 i.operands = 0;
2777 }
2778
2779 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2780 which is coded in the same place as an 8-bit immediate field
2781 would be. Here we fake an 8-bit immediate operand from the
2782 opcode suffix stored in tm.extension_opcode.
2783
2784 AVX instructions also use this encoding, for some of
2785 3 argument instructions. */
2786
2787 gas_assert (i.imm_operands == 0
2788 && (i.operands <= 2
2789 || (i.tm.opcode_modifier.vex
2790 && i.operands <= 4)));
2791
2792 exp = &im_expressions[i.imm_operands++];
2793 i.op[i.operands].imms = exp;
2794 i.types[i.operands] = imm8;
2795 i.operands++;
2796 exp->X_op = O_constant;
2797 exp->X_add_number = i.tm.extension_opcode;
2798 i.tm.extension_opcode = None;
2799 }
2800
2801 /* This is the guts of the machine-dependent assembler. LINE points to a
2802 machine dependent instruction. This function is supposed to emit
2803 the frags/bytes it assembles to. */
2804
2805 void
2806 md_assemble (char *line)
2807 {
2808 unsigned int j;
2809 char mnemonic[MAX_MNEM_SIZE];
2810 const insn_template *t;
2811
2812 /* Initialize globals. */
2813 memset (&i, '\0', sizeof (i));
2814 for (j = 0; j < MAX_OPERANDS; j++)
2815 i.reloc[j] = NO_RELOC;
2816 memset (disp_expressions, '\0', sizeof (disp_expressions));
2817 memset (im_expressions, '\0', sizeof (im_expressions));
2818 save_stack_p = save_stack;
2819
2820 /* First parse an instruction mnemonic & call i386_operand for the operands.
2821 We assume that the scrubber has arranged it so that line[0] is the valid
2822 start of a (possibly prefixed) mnemonic. */
2823
2824 line = parse_insn (line, mnemonic);
2825 if (line == NULL)
2826 return;
2827
2828 line = parse_operands (line, mnemonic);
2829 this_operand = -1;
2830 if (line == NULL)
2831 return;
2832
2833 /* Now we've parsed the mnemonic into a set of templates, and have the
2834 operands at hand. */
2835
2836 /* All intel opcodes have reversed operands except for "bound" and
2837 "enter". We also don't reverse intersegment "jmp" and "call"
2838 instructions with 2 immediate operands so that the immediate segment
2839 precedes the offset, as it does when in AT&T mode. */
2840 if (intel_syntax
2841 && i.operands > 1
2842 && (strcmp (mnemonic, "bound") != 0)
2843 && (strcmp (mnemonic, "invlpga") != 0)
2844 && !(operand_type_check (i.types[0], imm)
2845 && operand_type_check (i.types[1], imm)))
2846 swap_operands ();
2847
2848 /* The order of the immediates should be reversed
2849 for 2 immediates extrq and insertq instructions */
2850 if (i.imm_operands == 2
2851 && (strcmp (mnemonic, "extrq") == 0
2852 || strcmp (mnemonic, "insertq") == 0))
2853 swap_2_operands (0, 1);
2854
2855 if (i.imm_operands)
2856 optimize_imm ();
2857
2858 /* Don't optimize displacement for movabs since it only takes 64bit
2859 displacement. */
2860 if (i.disp_operands
2861 && (flag_code != CODE_64BIT
2862 || strcmp (mnemonic, "movabs") != 0))
2863 optimize_disp ();
2864
2865 /* Next, we find a template that matches the given insn,
2866 making sure the overlap of the given operands types is consistent
2867 with the template operand types. */
2868
2869 if (!(t = match_template ()))
2870 return;
2871
2872 if (sse_check != sse_check_none
2873 && !i.tm.opcode_modifier.noavx
2874 && (i.tm.cpu_flags.bitfield.cpusse
2875 || i.tm.cpu_flags.bitfield.cpusse2
2876 || i.tm.cpu_flags.bitfield.cpusse3
2877 || i.tm.cpu_flags.bitfield.cpussse3
2878 || i.tm.cpu_flags.bitfield.cpusse4_1
2879 || i.tm.cpu_flags.bitfield.cpusse4_2))
2880 {
2881 (sse_check == sse_check_warning
2882 ? as_warn
2883 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
2884 }
2885
2886 /* Zap movzx and movsx suffix. The suffix has been set from
2887 "word ptr" or "byte ptr" on the source operand in Intel syntax
2888 or extracted from mnemonic in AT&T syntax. But we'll use
2889 the destination register to choose the suffix for encoding. */
2890 if ((i.tm.base_opcode & ~9) == 0x0fb6)
2891 {
2892 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
2893 there is no suffix, the default will be byte extension. */
2894 if (i.reg_operands != 2
2895 && !i.suffix
2896 && intel_syntax)
2897 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
2898
2899 i.suffix = 0;
2900 }
2901
2902 if (i.tm.opcode_modifier.fwait)
2903 if (!add_prefix (FWAIT_OPCODE))
2904 return;
2905
2906 /* Check string instruction segment overrides. */
2907 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
2908 {
2909 if (!check_string ())
2910 return;
2911 i.disp_operands = 0;
2912 }
2913
2914 if (!process_suffix ())
2915 return;
2916
2917 /* Update operand types. */
2918 for (j = 0; j < i.operands; j++)
2919 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
2920
2921 /* Make still unresolved immediate matches conform to size of immediate
2922 given in i.suffix. */
2923 if (!finalize_imm ())
2924 return;
2925
2926 if (i.types[0].bitfield.imm1)
2927 i.imm_operands = 0; /* kludge for shift insns. */
2928
2929 /* We only need to check those implicit registers for instructions
2930 with 3 operands or less. */
2931 if (i.operands <= 3)
2932 for (j = 0; j < i.operands; j++)
2933 if (i.types[j].bitfield.inoutportreg
2934 || i.types[j].bitfield.shiftcount
2935 || i.types[j].bitfield.acc
2936 || i.types[j].bitfield.floatacc)
2937 i.reg_operands--;
2938
2939 /* ImmExt should be processed after SSE2AVX. */
2940 if (!i.tm.opcode_modifier.sse2avx
2941 && i.tm.opcode_modifier.immext)
2942 process_immext ();
2943
2944 /* For insns with operands there are more diddles to do to the opcode. */
2945 if (i.operands)
2946 {
2947 if (!process_operands ())
2948 return;
2949 }
2950 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
2951 {
2952 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
2953 as_warn (_("translating to `%sp'"), i.tm.name);
2954 }
2955
2956 if (i.tm.opcode_modifier.vex)
2957 build_vex_prefix (t);
2958
2959 /* Handle conversion of 'int $3' --> special int3 insn. */
2960 if (i.tm.base_opcode == INT_OPCODE && i.op[0].imms->X_add_number == 3)
2961 {
2962 i.tm.base_opcode = INT3_OPCODE;
2963 i.imm_operands = 0;
2964 }
2965
2966 if ((i.tm.opcode_modifier.jump
2967 || i.tm.opcode_modifier.jumpbyte
2968 || i.tm.opcode_modifier.jumpdword)
2969 && i.op[0].disps->X_op == O_constant)
2970 {
2971 /* Convert "jmp constant" (and "call constant") to a jump (call) to
2972 the absolute address given by the constant. Since ix86 jumps and
2973 calls are pc relative, we need to generate a reloc. */
2974 i.op[0].disps->X_add_symbol = &abs_symbol;
2975 i.op[0].disps->X_op = O_symbol;
2976 }
2977
2978 if (i.tm.opcode_modifier.rex64)
2979 i.rex |= REX_W;
2980
2981 /* For 8 bit registers we need an empty rex prefix. Also if the
2982 instruction already has a prefix, we need to convert old
2983 registers to new ones. */
2984
2985 if ((i.types[0].bitfield.reg8
2986 && (i.op[0].regs->reg_flags & RegRex64) != 0)
2987 || (i.types[1].bitfield.reg8
2988 && (i.op[1].regs->reg_flags & RegRex64) != 0)
2989 || ((i.types[0].bitfield.reg8
2990 || i.types[1].bitfield.reg8)
2991 && i.rex != 0))
2992 {
2993 int x;
2994
2995 i.rex |= REX_OPCODE;
2996 for (x = 0; x < 2; x++)
2997 {
2998 /* Look for 8 bit operand that uses old registers. */
2999 if (i.types[x].bitfield.reg8
3000 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3001 {
3002 /* In case it is "hi" register, give up. */
3003 if (i.op[x].regs->reg_num > 3)
3004 as_bad (_("can't encode register '%s%s' in an "
3005 "instruction requiring REX prefix."),
3006 register_prefix, i.op[x].regs->reg_name);
3007
3008 /* Otherwise it is equivalent to the extended register.
3009 Since the encoding doesn't change this is merely
3010 cosmetic cleanup for debug output. */
3011
3012 i.op[x].regs = i.op[x].regs + 8;
3013 }
3014 }
3015 }
3016
3017 if (i.rex != 0)
3018 add_prefix (REX_OPCODE | i.rex);
3019
3020 /* We are ready to output the insn. */
3021 output_insn ();
3022 }
3023
3024 static char *
3025 parse_insn (char *line, char *mnemonic)
3026 {
3027 char *l = line;
3028 char *token_start = l;
3029 char *mnem_p;
3030 int supported;
3031 const insn_template *t;
3032 char *dot_p = NULL;
3033
3034 /* Non-zero if we found a prefix only acceptable with string insns. */
3035 const char *expecting_string_instruction = NULL;
3036
3037 while (1)
3038 {
3039 mnem_p = mnemonic;
3040 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3041 {
3042 if (*mnem_p == '.')
3043 dot_p = mnem_p;
3044 mnem_p++;
3045 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3046 {
3047 as_bad (_("no such instruction: `%s'"), token_start);
3048 return NULL;
3049 }
3050 l++;
3051 }
3052 if (!is_space_char (*l)
3053 && *l != END_OF_INSN
3054 && (intel_syntax
3055 || (*l != PREFIX_SEPARATOR
3056 && *l != ',')))
3057 {
3058 as_bad (_("invalid character %s in mnemonic"),
3059 output_invalid (*l));
3060 return NULL;
3061 }
3062 if (token_start == l)
3063 {
3064 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3065 as_bad (_("expecting prefix; got nothing"));
3066 else
3067 as_bad (_("expecting mnemonic; got nothing"));
3068 return NULL;
3069 }
3070
3071 /* Look up instruction (or prefix) via hash table. */
3072 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3073
3074 if (*l != END_OF_INSN
3075 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3076 && current_templates
3077 && current_templates->start->opcode_modifier.isprefix)
3078 {
3079 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3080 {
3081 as_bad ((flag_code != CODE_64BIT
3082 ? _("`%s' is only supported in 64-bit mode")
3083 : _("`%s' is not supported in 64-bit mode")),
3084 current_templates->start->name);
3085 return NULL;
3086 }
3087 /* If we are in 16-bit mode, do not allow addr16 or data16.
3088 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3089 if ((current_templates->start->opcode_modifier.size16
3090 || current_templates->start->opcode_modifier.size32)
3091 && flag_code != CODE_64BIT
3092 && (current_templates->start->opcode_modifier.size32
3093 ^ (flag_code == CODE_16BIT)))
3094 {
3095 as_bad (_("redundant %s prefix"),
3096 current_templates->start->name);
3097 return NULL;
3098 }
3099 /* Add prefix, checking for repeated prefixes. */
3100 switch (add_prefix (current_templates->start->base_opcode))
3101 {
3102 case 0:
3103 return NULL;
3104 case 2:
3105 expecting_string_instruction = current_templates->start->name;
3106 break;
3107 }
3108 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3109 token_start = ++l;
3110 }
3111 else
3112 break;
3113 }
3114
3115 if (!current_templates)
3116 {
3117 /* Check if we should swap operand in encoding. */
3118 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3119 i.swap_operand = 1;
3120 else
3121 goto check_suffix;
3122 mnem_p = dot_p;
3123 *dot_p = '\0';
3124 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3125 }
3126
3127 if (!current_templates)
3128 {
3129 check_suffix:
3130 /* See if we can get a match by trimming off a suffix. */
3131 switch (mnem_p[-1])
3132 {
3133 case WORD_MNEM_SUFFIX:
3134 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3135 i.suffix = SHORT_MNEM_SUFFIX;
3136 else
3137 case BYTE_MNEM_SUFFIX:
3138 case QWORD_MNEM_SUFFIX:
3139 i.suffix = mnem_p[-1];
3140 mnem_p[-1] = '\0';
3141 current_templates = (const templates *) hash_find (op_hash,
3142 mnemonic);
3143 break;
3144 case SHORT_MNEM_SUFFIX:
3145 case LONG_MNEM_SUFFIX:
3146 if (!intel_syntax)
3147 {
3148 i.suffix = mnem_p[-1];
3149 mnem_p[-1] = '\0';
3150 current_templates = (const templates *) hash_find (op_hash,
3151 mnemonic);
3152 }
3153 break;
3154
3155 /* Intel Syntax. */
3156 case 'd':
3157 if (intel_syntax)
3158 {
3159 if (intel_float_operand (mnemonic) == 1)
3160 i.suffix = SHORT_MNEM_SUFFIX;
3161 else
3162 i.suffix = LONG_MNEM_SUFFIX;
3163 mnem_p[-1] = '\0';
3164 current_templates = (const templates *) hash_find (op_hash,
3165 mnemonic);
3166 }
3167 break;
3168 }
3169 if (!current_templates)
3170 {
3171 as_bad (_("no such instruction: `%s'"), token_start);
3172 return NULL;
3173 }
3174 }
3175
3176 if (current_templates->start->opcode_modifier.jump
3177 || current_templates->start->opcode_modifier.jumpbyte)
3178 {
3179 /* Check for a branch hint. We allow ",pt" and ",pn" for
3180 predict taken and predict not taken respectively.
3181 I'm not sure that branch hints actually do anything on loop
3182 and jcxz insns (JumpByte) for current Pentium4 chips. They
3183 may work in the future and it doesn't hurt to accept them
3184 now. */
3185 if (l[0] == ',' && l[1] == 'p')
3186 {
3187 if (l[2] == 't')
3188 {
3189 if (!add_prefix (DS_PREFIX_OPCODE))
3190 return NULL;
3191 l += 3;
3192 }
3193 else if (l[2] == 'n')
3194 {
3195 if (!add_prefix (CS_PREFIX_OPCODE))
3196 return NULL;
3197 l += 3;
3198 }
3199 }
3200 }
3201 /* Any other comma loses. */
3202 if (*l == ',')
3203 {
3204 as_bad (_("invalid character %s in mnemonic"),
3205 output_invalid (*l));
3206 return NULL;
3207 }
3208
3209 /* Check if instruction is supported on specified architecture. */
3210 supported = 0;
3211 for (t = current_templates->start; t < current_templates->end; ++t)
3212 {
3213 supported |= cpu_flags_match (t);
3214 if (supported == CPU_FLAGS_PERFECT_MATCH)
3215 goto skip;
3216 }
3217
3218 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3219 {
3220 as_bad (flag_code == CODE_64BIT
3221 ? _("`%s' is not supported in 64-bit mode")
3222 : _("`%s' is only supported in 64-bit mode"),
3223 current_templates->start->name);
3224 return NULL;
3225 }
3226 if (supported != CPU_FLAGS_PERFECT_MATCH)
3227 {
3228 as_bad (_("`%s' is not supported on `%s%s'"),
3229 current_templates->start->name,
3230 cpu_arch_name ? cpu_arch_name : default_arch,
3231 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3232 return NULL;
3233 }
3234
3235 skip:
3236 if (!cpu_arch_flags.bitfield.cpui386
3237 && (flag_code != CODE_16BIT))
3238 {
3239 as_warn (_("use .code16 to ensure correct addressing mode"));
3240 }
3241
3242 /* Check for rep/repne without a string instruction. */
3243 if (expecting_string_instruction)
3244 {
3245 static templates override;
3246
3247 for (t = current_templates->start; t < current_templates->end; ++t)
3248 if (t->opcode_modifier.isstring)
3249 break;
3250 if (t >= current_templates->end)
3251 {
3252 as_bad (_("expecting string instruction after `%s'"),
3253 expecting_string_instruction);
3254 return NULL;
3255 }
3256 for (override.start = t; t < current_templates->end; ++t)
3257 if (!t->opcode_modifier.isstring)
3258 break;
3259 override.end = t;
3260 current_templates = &override;
3261 }
3262
3263 return l;
3264 }
3265
3266 static char *
3267 parse_operands (char *l, const char *mnemonic)
3268 {
3269 char *token_start;
3270
3271 /* 1 if operand is pending after ','. */
3272 unsigned int expecting_operand = 0;
3273
3274 /* Non-zero if operand parens not balanced. */
3275 unsigned int paren_not_balanced;
3276
3277 while (*l != END_OF_INSN)
3278 {
3279 /* Skip optional white space before operand. */
3280 if (is_space_char (*l))
3281 ++l;
3282 if (!is_operand_char (*l) && *l != END_OF_INSN)
3283 {
3284 as_bad (_("invalid character %s before operand %d"),
3285 output_invalid (*l),
3286 i.operands + 1);
3287 return NULL;
3288 }
3289 token_start = l; /* after white space */
3290 paren_not_balanced = 0;
3291 while (paren_not_balanced || *l != ',')
3292 {
3293 if (*l == END_OF_INSN)
3294 {
3295 if (paren_not_balanced)
3296 {
3297 if (!intel_syntax)
3298 as_bad (_("unbalanced parenthesis in operand %d."),
3299 i.operands + 1);
3300 else
3301 as_bad (_("unbalanced brackets in operand %d."),
3302 i.operands + 1);
3303 return NULL;
3304 }
3305 else
3306 break; /* we are done */
3307 }
3308 else if (!is_operand_char (*l) && !is_space_char (*l))
3309 {
3310 as_bad (_("invalid character %s in operand %d"),
3311 output_invalid (*l),
3312 i.operands + 1);
3313 return NULL;
3314 }
3315 if (!intel_syntax)
3316 {
3317 if (*l == '(')
3318 ++paren_not_balanced;
3319 if (*l == ')')
3320 --paren_not_balanced;
3321 }
3322 else
3323 {
3324 if (*l == '[')
3325 ++paren_not_balanced;
3326 if (*l == ']')
3327 --paren_not_balanced;
3328 }
3329 l++;
3330 }
3331 if (l != token_start)
3332 { /* Yes, we've read in another operand. */
3333 unsigned int operand_ok;
3334 this_operand = i.operands++;
3335 i.types[this_operand].bitfield.unspecified = 1;
3336 if (i.operands > MAX_OPERANDS)
3337 {
3338 as_bad (_("spurious operands; (%d operands/instruction max)"),
3339 MAX_OPERANDS);
3340 return NULL;
3341 }
3342 /* Now parse operand adding info to 'i' as we go along. */
3343 END_STRING_AND_SAVE (l);
3344
3345 if (intel_syntax)
3346 operand_ok =
3347 i386_intel_operand (token_start,
3348 intel_float_operand (mnemonic));
3349 else
3350 operand_ok = i386_att_operand (token_start);
3351
3352 RESTORE_END_STRING (l);
3353 if (!operand_ok)
3354 return NULL;
3355 }
3356 else
3357 {
3358 if (expecting_operand)
3359 {
3360 expecting_operand_after_comma:
3361 as_bad (_("expecting operand after ','; got nothing"));
3362 return NULL;
3363 }
3364 if (*l == ',')
3365 {
3366 as_bad (_("expecting operand before ','; got nothing"));
3367 return NULL;
3368 }
3369 }
3370
3371 /* Now *l must be either ',' or END_OF_INSN. */
3372 if (*l == ',')
3373 {
3374 if (*++l == END_OF_INSN)
3375 {
3376 /* Just skip it, if it's \n complain. */
3377 goto expecting_operand_after_comma;
3378 }
3379 expecting_operand = 1;
3380 }
3381 }
3382 return l;
3383 }
3384
3385 static void
3386 swap_2_operands (int xchg1, int xchg2)
3387 {
3388 union i386_op temp_op;
3389 i386_operand_type temp_type;
3390 enum bfd_reloc_code_real temp_reloc;
3391
3392 temp_type = i.types[xchg2];
3393 i.types[xchg2] = i.types[xchg1];
3394 i.types[xchg1] = temp_type;
3395 temp_op = i.op[xchg2];
3396 i.op[xchg2] = i.op[xchg1];
3397 i.op[xchg1] = temp_op;
3398 temp_reloc = i.reloc[xchg2];
3399 i.reloc[xchg2] = i.reloc[xchg1];
3400 i.reloc[xchg1] = temp_reloc;
3401 }
3402
3403 static void
3404 swap_operands (void)
3405 {
3406 switch (i.operands)
3407 {
3408 case 5:
3409 case 4:
3410 swap_2_operands (1, i.operands - 2);
3411 case 3:
3412 case 2:
3413 swap_2_operands (0, i.operands - 1);
3414 break;
3415 default:
3416 abort ();
3417 }
3418
3419 if (i.mem_operands == 2)
3420 {
3421 const seg_entry *temp_seg;
3422 temp_seg = i.seg[0];
3423 i.seg[0] = i.seg[1];
3424 i.seg[1] = temp_seg;
3425 }
3426 }
3427
3428 /* Try to ensure constant immediates are represented in the smallest
3429 opcode possible. */
3430 static void
3431 optimize_imm (void)
3432 {
3433 char guess_suffix = 0;
3434 int op;
3435
3436 if (i.suffix)
3437 guess_suffix = i.suffix;
3438 else if (i.reg_operands)
3439 {
3440 /* Figure out a suffix from the last register operand specified.
3441 We can't do this properly yet, ie. excluding InOutPortReg,
3442 but the following works for instructions with immediates.
3443 In any case, we can't set i.suffix yet. */
3444 for (op = i.operands; --op >= 0;)
3445 if (i.types[op].bitfield.reg8)
3446 {
3447 guess_suffix = BYTE_MNEM_SUFFIX;
3448 break;
3449 }
3450 else if (i.types[op].bitfield.reg16)
3451 {
3452 guess_suffix = WORD_MNEM_SUFFIX;
3453 break;
3454 }
3455 else if (i.types[op].bitfield.reg32)
3456 {
3457 guess_suffix = LONG_MNEM_SUFFIX;
3458 break;
3459 }
3460 else if (i.types[op].bitfield.reg64)
3461 {
3462 guess_suffix = QWORD_MNEM_SUFFIX;
3463 break;
3464 }
3465 }
3466 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3467 guess_suffix = WORD_MNEM_SUFFIX;
3468
3469 for (op = i.operands; --op >= 0;)
3470 if (operand_type_check (i.types[op], imm))
3471 {
3472 switch (i.op[op].imms->X_op)
3473 {
3474 case O_constant:
3475 /* If a suffix is given, this operand may be shortened. */
3476 switch (guess_suffix)
3477 {
3478 case LONG_MNEM_SUFFIX:
3479 i.types[op].bitfield.imm32 = 1;
3480 i.types[op].bitfield.imm64 = 1;
3481 break;
3482 case WORD_MNEM_SUFFIX:
3483 i.types[op].bitfield.imm16 = 1;
3484 i.types[op].bitfield.imm32 = 1;
3485 i.types[op].bitfield.imm32s = 1;
3486 i.types[op].bitfield.imm64 = 1;
3487 break;
3488 case BYTE_MNEM_SUFFIX:
3489 i.types[op].bitfield.imm8 = 1;
3490 i.types[op].bitfield.imm8s = 1;
3491 i.types[op].bitfield.imm16 = 1;
3492 i.types[op].bitfield.imm32 = 1;
3493 i.types[op].bitfield.imm32s = 1;
3494 i.types[op].bitfield.imm64 = 1;
3495 break;
3496 }
3497
3498 /* If this operand is at most 16 bits, convert it
3499 to a signed 16 bit number before trying to see
3500 whether it will fit in an even smaller size.
3501 This allows a 16-bit operand such as $0xffe0 to
3502 be recognised as within Imm8S range. */
3503 if ((i.types[op].bitfield.imm16)
3504 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3505 {
3506 i.op[op].imms->X_add_number =
3507 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3508 }
3509 if ((i.types[op].bitfield.imm32)
3510 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3511 == 0))
3512 {
3513 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3514 ^ ((offsetT) 1 << 31))
3515 - ((offsetT) 1 << 31));
3516 }
3517 i.types[op]
3518 = operand_type_or (i.types[op],
3519 smallest_imm_type (i.op[op].imms->X_add_number));
3520
3521 /* We must avoid matching of Imm32 templates when 64bit
3522 only immediate is available. */
3523 if (guess_suffix == QWORD_MNEM_SUFFIX)
3524 i.types[op].bitfield.imm32 = 0;
3525 break;
3526
3527 case O_absent:
3528 case O_register:
3529 abort ();
3530
3531 /* Symbols and expressions. */
3532 default:
3533 /* Convert symbolic operand to proper sizes for matching, but don't
3534 prevent matching a set of insns that only supports sizes other
3535 than those matching the insn suffix. */
3536 {
3537 i386_operand_type mask, allowed;
3538 const insn_template *t;
3539
3540 operand_type_set (&mask, 0);
3541 operand_type_set (&allowed, 0);
3542
3543 for (t = current_templates->start;
3544 t < current_templates->end;
3545 ++t)
3546 allowed = operand_type_or (allowed,
3547 t->operand_types[op]);
3548 switch (guess_suffix)
3549 {
3550 case QWORD_MNEM_SUFFIX:
3551 mask.bitfield.imm64 = 1;
3552 mask.bitfield.imm32s = 1;
3553 break;
3554 case LONG_MNEM_SUFFIX:
3555 mask.bitfield.imm32 = 1;
3556 break;
3557 case WORD_MNEM_SUFFIX:
3558 mask.bitfield.imm16 = 1;
3559 break;
3560 case BYTE_MNEM_SUFFIX:
3561 mask.bitfield.imm8 = 1;
3562 break;
3563 default:
3564 break;
3565 }
3566 allowed = operand_type_and (mask, allowed);
3567 if (!operand_type_all_zero (&allowed))
3568 i.types[op] = operand_type_and (i.types[op], mask);
3569 }
3570 break;
3571 }
3572 }
3573 }
3574
3575 /* Try to use the smallest displacement type too. */
3576 static void
3577 optimize_disp (void)
3578 {
3579 int op;
3580
3581 for (op = i.operands; --op >= 0;)
3582 if (operand_type_check (i.types[op], disp))
3583 {
3584 if (i.op[op].disps->X_op == O_constant)
3585 {
3586 offsetT disp = i.op[op].disps->X_add_number;
3587
3588 if (i.types[op].bitfield.disp16
3589 && (disp & ~(offsetT) 0xffff) == 0)
3590 {
3591 /* If this operand is at most 16 bits, convert
3592 to a signed 16 bit number and don't use 64bit
3593 displacement. */
3594 disp = (((disp & 0xffff) ^ 0x8000) - 0x8000);
3595 i.types[op].bitfield.disp64 = 0;
3596 }
3597 if (i.types[op].bitfield.disp32
3598 && (disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3599 {
3600 /* If this operand is at most 32 bits, convert
3601 to a signed 32 bit number and don't use 64bit
3602 displacement. */
3603 disp &= (((offsetT) 2 << 31) - 1);
3604 disp = (disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3605 i.types[op].bitfield.disp64 = 0;
3606 }
3607 if (!disp && i.types[op].bitfield.baseindex)
3608 {
3609 i.types[op].bitfield.disp8 = 0;
3610 i.types[op].bitfield.disp16 = 0;
3611 i.types[op].bitfield.disp32 = 0;
3612 i.types[op].bitfield.disp32s = 0;
3613 i.types[op].bitfield.disp64 = 0;
3614 i.op[op].disps = 0;
3615 i.disp_operands--;
3616 }
3617 else if (flag_code == CODE_64BIT)
3618 {
3619 if (fits_in_signed_long (disp))
3620 {
3621 i.types[op].bitfield.disp64 = 0;
3622 i.types[op].bitfield.disp32s = 1;
3623 }
3624 if (i.prefix[ADDR_PREFIX]
3625 && fits_in_unsigned_long (disp))
3626 i.types[op].bitfield.disp32 = 1;
3627 }
3628 if ((i.types[op].bitfield.disp32
3629 || i.types[op].bitfield.disp32s
3630 || i.types[op].bitfield.disp16)
3631 && fits_in_signed_byte (disp))
3632 i.types[op].bitfield.disp8 = 1;
3633 }
3634 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3635 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3636 {
3637 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3638 i.op[op].disps, 0, i.reloc[op]);
3639 i.types[op].bitfield.disp8 = 0;
3640 i.types[op].bitfield.disp16 = 0;
3641 i.types[op].bitfield.disp32 = 0;
3642 i.types[op].bitfield.disp32s = 0;
3643 i.types[op].bitfield.disp64 = 0;
3644 }
3645 else
3646 /* We only support 64bit displacement on constants. */
3647 i.types[op].bitfield.disp64 = 0;
3648 }
3649 }
3650
3651 static const insn_template *
3652 match_template (void)
3653 {
3654 /* Points to template once we've found it. */
3655 const insn_template *t;
3656 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3657 i386_operand_type overlap4;
3658 unsigned int found_reverse_match;
3659 i386_opcode_modifier suffix_check;
3660 i386_operand_type operand_types [MAX_OPERANDS];
3661 int addr_prefix_disp;
3662 unsigned int j;
3663 unsigned int found_cpu_match;
3664 unsigned int check_register;
3665
3666 #if MAX_OPERANDS != 5
3667 # error "MAX_OPERANDS must be 5."
3668 #endif
3669
3670 found_reverse_match = 0;
3671 addr_prefix_disp = -1;
3672
3673 memset (&suffix_check, 0, sizeof (suffix_check));
3674 if (i.suffix == BYTE_MNEM_SUFFIX)
3675 suffix_check.no_bsuf = 1;
3676 else if (i.suffix == WORD_MNEM_SUFFIX)
3677 suffix_check.no_wsuf = 1;
3678 else if (i.suffix == SHORT_MNEM_SUFFIX)
3679 suffix_check.no_ssuf = 1;
3680 else if (i.suffix == LONG_MNEM_SUFFIX)
3681 suffix_check.no_lsuf = 1;
3682 else if (i.suffix == QWORD_MNEM_SUFFIX)
3683 suffix_check.no_qsuf = 1;
3684 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3685 suffix_check.no_ldsuf = 1;
3686
3687 for (t = current_templates->start; t < current_templates->end; t++)
3688 {
3689 addr_prefix_disp = -1;
3690
3691 /* Must have right number of operands. */
3692 if (i.operands != t->operands)
3693 continue;
3694
3695 /* Check processor support. */
3696 found_cpu_match = (cpu_flags_match (t)
3697 == CPU_FLAGS_PERFECT_MATCH);
3698 if (!found_cpu_match)
3699 continue;
3700
3701 /* Check old gcc support. */
3702 if (!old_gcc && t->opcode_modifier.oldgcc)
3703 continue;
3704
3705 /* Check AT&T mnemonic. */
3706 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3707 continue;
3708
3709 /* Check AT&T syntax Intel syntax. */
3710 if ((intel_syntax && t->opcode_modifier.attsyntax)
3711 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3712 continue;
3713
3714 /* Check the suffix, except for some instructions in intel mode. */
3715 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3716 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3717 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3718 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3719 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3720 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3721 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3722 continue;
3723
3724 if (!operand_size_match (t))
3725 continue;
3726
3727 for (j = 0; j < MAX_OPERANDS; j++)
3728 operand_types[j] = t->operand_types[j];
3729
3730 /* In general, don't allow 64-bit operands in 32-bit mode. */
3731 if (i.suffix == QWORD_MNEM_SUFFIX
3732 && flag_code != CODE_64BIT
3733 && (intel_syntax
3734 ? (!t->opcode_modifier.ignoresize
3735 && !intel_float_operand (t->name))
3736 : intel_float_operand (t->name) != 2)
3737 && ((!operand_types[0].bitfield.regmmx
3738 && !operand_types[0].bitfield.regxmm
3739 && !operand_types[0].bitfield.regymm)
3740 || (!operand_types[t->operands > 1].bitfield.regmmx
3741 && !!operand_types[t->operands > 1].bitfield.regxmm
3742 && !!operand_types[t->operands > 1].bitfield.regymm))
3743 && (t->base_opcode != 0x0fc7
3744 || t->extension_opcode != 1 /* cmpxchg8b */))
3745 continue;
3746
3747 /* In general, don't allow 32-bit operands on pre-386. */
3748 else if (i.suffix == LONG_MNEM_SUFFIX
3749 && !cpu_arch_flags.bitfield.cpui386
3750 && (intel_syntax
3751 ? (!t->opcode_modifier.ignoresize
3752 && !intel_float_operand (t->name))
3753 : intel_float_operand (t->name) != 2)
3754 && ((!operand_types[0].bitfield.regmmx
3755 && !operand_types[0].bitfield.regxmm)
3756 || (!operand_types[t->operands > 1].bitfield.regmmx
3757 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3758 continue;
3759
3760 /* Do not verify operands when there are none. */
3761 else
3762 {
3763 if (!t->operands)
3764 /* We've found a match; break out of loop. */
3765 break;
3766 }
3767
3768 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3769 into Disp32/Disp16/Disp32 operand. */
3770 if (i.prefix[ADDR_PREFIX] != 0)
3771 {
3772 /* There should be only one Disp operand. */
3773 switch (flag_code)
3774 {
3775 case CODE_16BIT:
3776 for (j = 0; j < MAX_OPERANDS; j++)
3777 {
3778 if (operand_types[j].bitfield.disp16)
3779 {
3780 addr_prefix_disp = j;
3781 operand_types[j].bitfield.disp32 = 1;
3782 operand_types[j].bitfield.disp16 = 0;
3783 break;
3784 }
3785 }
3786 break;
3787 case CODE_32BIT:
3788 for (j = 0; j < MAX_OPERANDS; j++)
3789 {
3790 if (operand_types[j].bitfield.disp32)
3791 {
3792 addr_prefix_disp = j;
3793 operand_types[j].bitfield.disp32 = 0;
3794 operand_types[j].bitfield.disp16 = 1;
3795 break;
3796 }
3797 }
3798 break;
3799 case CODE_64BIT:
3800 for (j = 0; j < MAX_OPERANDS; j++)
3801 {
3802 if (operand_types[j].bitfield.disp64)
3803 {
3804 addr_prefix_disp = j;
3805 operand_types[j].bitfield.disp64 = 0;
3806 operand_types[j].bitfield.disp32 = 1;
3807 break;
3808 }
3809 }
3810 break;
3811 }
3812 }
3813
3814 /* We check register size only if size of operands can be
3815 encoded the canonical way. */
3816 check_register = t->opcode_modifier.w;
3817 overlap0 = operand_type_and (i.types[0], operand_types[0]);
3818 switch (t->operands)
3819 {
3820 case 1:
3821 if (!operand_type_match (overlap0, i.types[0]))
3822 continue;
3823 break;
3824 case 2:
3825 /* xchg %eax, %eax is a special case. It is an aliase for nop
3826 only in 32bit mode and we can use opcode 0x90. In 64bit
3827 mode, we can't use 0x90 for xchg %eax, %eax since it should
3828 zero-extend %eax to %rax. */
3829 if (flag_code == CODE_64BIT
3830 && t->base_opcode == 0x90
3831 && operand_type_equal (&i.types [0], &acc32)
3832 && operand_type_equal (&i.types [1], &acc32))
3833 continue;
3834 if (i.swap_operand)
3835 {
3836 /* If we swap operand in encoding, we either match
3837 the next one or reverse direction of operands. */
3838 if (t->opcode_modifier.s)
3839 continue;
3840 else if (t->opcode_modifier.d)
3841 goto check_reverse;
3842 }
3843
3844 case 3:
3845 /* If we swap operand in encoding, we match the next one. */
3846 if (i.swap_operand && t->opcode_modifier.s)
3847 continue;
3848 case 4:
3849 case 5:
3850 overlap1 = operand_type_and (i.types[1], operand_types[1]);
3851 if (!operand_type_match (overlap0, i.types[0])
3852 || !operand_type_match (overlap1, i.types[1])
3853 || (check_register
3854 && !operand_type_register_match (overlap0, i.types[0],
3855 operand_types[0],
3856 overlap1, i.types[1],
3857 operand_types[1])))
3858 {
3859 /* Check if other direction is valid ... */
3860 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
3861 continue;
3862
3863 check_reverse:
3864 /* Try reversing direction of operands. */
3865 overlap0 = operand_type_and (i.types[0], operand_types[1]);
3866 overlap1 = operand_type_and (i.types[1], operand_types[0]);
3867 if (!operand_type_match (overlap0, i.types[0])
3868 || !operand_type_match (overlap1, i.types[1])
3869 || (check_register
3870 && !operand_type_register_match (overlap0,
3871 i.types[0],
3872 operand_types[1],
3873 overlap1,
3874 i.types[1],
3875 operand_types[0])))
3876 {
3877 /* Does not match either direction. */
3878 continue;
3879 }
3880 /* found_reverse_match holds which of D or FloatDR
3881 we've found. */
3882 if (t->opcode_modifier.d)
3883 found_reverse_match = Opcode_D;
3884 else if (t->opcode_modifier.floatd)
3885 found_reverse_match = Opcode_FloatD;
3886 else
3887 found_reverse_match = 0;
3888 if (t->opcode_modifier.floatr)
3889 found_reverse_match |= Opcode_FloatR;
3890 }
3891 else
3892 {
3893 /* Found a forward 2 operand match here. */
3894 switch (t->operands)
3895 {
3896 case 5:
3897 overlap4 = operand_type_and (i.types[4],
3898 operand_types[4]);
3899 case 4:
3900 overlap3 = operand_type_and (i.types[3],
3901 operand_types[3]);
3902 case 3:
3903 overlap2 = operand_type_and (i.types[2],
3904 operand_types[2]);
3905 break;
3906 }
3907
3908 switch (t->operands)
3909 {
3910 case 5:
3911 if (!operand_type_match (overlap4, i.types[4])
3912 || !operand_type_register_match (overlap3,
3913 i.types[3],
3914 operand_types[3],
3915 overlap4,
3916 i.types[4],
3917 operand_types[4]))
3918 continue;
3919 case 4:
3920 if (!operand_type_match (overlap3, i.types[3])
3921 || (check_register
3922 && !operand_type_register_match (overlap2,
3923 i.types[2],
3924 operand_types[2],
3925 overlap3,
3926 i.types[3],
3927 operand_types[3])))
3928 continue;
3929 case 3:
3930 /* Here we make use of the fact that there are no
3931 reverse match 3 operand instructions, and all 3
3932 operand instructions only need to be checked for
3933 register consistency between operands 2 and 3. */
3934 if (!operand_type_match (overlap2, i.types[2])
3935 || (check_register
3936 && !operand_type_register_match (overlap1,
3937 i.types[1],
3938 operand_types[1],
3939 overlap2,
3940 i.types[2],
3941 operand_types[2])))
3942 continue;
3943 break;
3944 }
3945 }
3946 /* Found either forward/reverse 2, 3 or 4 operand match here:
3947 slip through to break. */
3948 }
3949 if (!found_cpu_match)
3950 {
3951 found_reverse_match = 0;
3952 continue;
3953 }
3954
3955 /* We've found a match; break out of loop. */
3956 break;
3957 }
3958
3959 if (t == current_templates->end)
3960 {
3961 /* We found no match. */
3962 if (intel_syntax)
3963 as_bad (_("ambiguous operand size or operands invalid for `%s'"),
3964 current_templates->start->name);
3965 else
3966 as_bad (_("suffix or operands invalid for `%s'"),
3967 current_templates->start->name);
3968 return NULL;
3969 }
3970
3971 if (!quiet_warnings)
3972 {
3973 if (!intel_syntax
3974 && (i.types[0].bitfield.jumpabsolute
3975 != operand_types[0].bitfield.jumpabsolute))
3976 {
3977 as_warn (_("indirect %s without `*'"), t->name);
3978 }
3979
3980 if (t->opcode_modifier.isprefix
3981 && t->opcode_modifier.ignoresize)
3982 {
3983 /* Warn them that a data or address size prefix doesn't
3984 affect assembly of the next line of code. */
3985 as_warn (_("stand-alone `%s' prefix"), t->name);
3986 }
3987 }
3988
3989 /* Copy the template we found. */
3990 i.tm = *t;
3991
3992 if (addr_prefix_disp != -1)
3993 i.tm.operand_types[addr_prefix_disp]
3994 = operand_types[addr_prefix_disp];
3995
3996 if (found_reverse_match)
3997 {
3998 /* If we found a reverse match we must alter the opcode
3999 direction bit. found_reverse_match holds bits to change
4000 (different for int & float insns). */
4001
4002 i.tm.base_opcode ^= found_reverse_match;
4003
4004 i.tm.operand_types[0] = operand_types[1];
4005 i.tm.operand_types[1] = operand_types[0];
4006 }
4007
4008 return t;
4009 }
4010
4011 static int
4012 check_string (void)
4013 {
4014 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4015 if (i.tm.operand_types[mem_op].bitfield.esseg)
4016 {
4017 if (i.seg[0] != NULL && i.seg[0] != &es)
4018 {
4019 as_bad (_("`%s' operand %d must use `%ses' segment"),
4020 i.tm.name,
4021 mem_op + 1,
4022 register_prefix);
4023 return 0;
4024 }
4025 /* There's only ever one segment override allowed per instruction.
4026 This instruction possibly has a legal segment override on the
4027 second operand, so copy the segment to where non-string
4028 instructions store it, allowing common code. */
4029 i.seg[0] = i.seg[1];
4030 }
4031 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4032 {
4033 if (i.seg[1] != NULL && i.seg[1] != &es)
4034 {
4035 as_bad (_("`%s' operand %d must use `%ses' segment"),
4036 i.tm.name,
4037 mem_op + 2,
4038 register_prefix);
4039 return 0;
4040 }
4041 }
4042 return 1;
4043 }
4044
4045 static int
4046 process_suffix (void)
4047 {
4048 /* If matched instruction specifies an explicit instruction mnemonic
4049 suffix, use it. */
4050 if (i.tm.opcode_modifier.size16)
4051 i.suffix = WORD_MNEM_SUFFIX;
4052 else if (i.tm.opcode_modifier.size32)
4053 i.suffix = LONG_MNEM_SUFFIX;
4054 else if (i.tm.opcode_modifier.size64)
4055 i.suffix = QWORD_MNEM_SUFFIX;
4056 else if (i.reg_operands)
4057 {
4058 /* If there's no instruction mnemonic suffix we try to invent one
4059 based on register operands. */
4060 if (!i.suffix)
4061 {
4062 /* We take i.suffix from the last register operand specified,
4063 Destination register type is more significant than source
4064 register type. crc32 in SSE4.2 prefers source register
4065 type. */
4066 if (i.tm.base_opcode == 0xf20f38f1)
4067 {
4068 if (i.types[0].bitfield.reg16)
4069 i.suffix = WORD_MNEM_SUFFIX;
4070 else if (i.types[0].bitfield.reg32)
4071 i.suffix = LONG_MNEM_SUFFIX;
4072 else if (i.types[0].bitfield.reg64)
4073 i.suffix = QWORD_MNEM_SUFFIX;
4074 }
4075 else if (i.tm.base_opcode == 0xf20f38f0)
4076 {
4077 if (i.types[0].bitfield.reg8)
4078 i.suffix = BYTE_MNEM_SUFFIX;
4079 }
4080
4081 if (!i.suffix)
4082 {
4083 int op;
4084
4085 if (i.tm.base_opcode == 0xf20f38f1
4086 || i.tm.base_opcode == 0xf20f38f0)
4087 {
4088 /* We have to know the operand size for crc32. */
4089 as_bad (_("ambiguous memory operand size for `%s`"),
4090 i.tm.name);
4091 return 0;
4092 }
4093
4094 for (op = i.operands; --op >= 0;)
4095 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4096 {
4097 if (i.types[op].bitfield.reg8)
4098 {
4099 i.suffix = BYTE_MNEM_SUFFIX;
4100 break;
4101 }
4102 else if (i.types[op].bitfield.reg16)
4103 {
4104 i.suffix = WORD_MNEM_SUFFIX;
4105 break;
4106 }
4107 else if (i.types[op].bitfield.reg32)
4108 {
4109 i.suffix = LONG_MNEM_SUFFIX;
4110 break;
4111 }
4112 else if (i.types[op].bitfield.reg64)
4113 {
4114 i.suffix = QWORD_MNEM_SUFFIX;
4115 break;
4116 }
4117 }
4118 }
4119 }
4120 else if (i.suffix == BYTE_MNEM_SUFFIX)
4121 {
4122 if (!check_byte_reg ())
4123 return 0;
4124 }
4125 else if (i.suffix == LONG_MNEM_SUFFIX)
4126 {
4127 if (!check_long_reg ())
4128 return 0;
4129 }
4130 else if (i.suffix == QWORD_MNEM_SUFFIX)
4131 {
4132 if (intel_syntax
4133 && i.tm.opcode_modifier.ignoresize
4134 && i.tm.opcode_modifier.no_qsuf)
4135 i.suffix = 0;
4136 else if (!check_qword_reg ())
4137 return 0;
4138 }
4139 else if (i.suffix == WORD_MNEM_SUFFIX)
4140 {
4141 if (!check_word_reg ())
4142 return 0;
4143 }
4144 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4145 || i.suffix == YMMWORD_MNEM_SUFFIX)
4146 {
4147 /* Skip if the instruction has x/y suffix. match_template
4148 should check if it is a valid suffix. */
4149 }
4150 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4151 /* Do nothing if the instruction is going to ignore the prefix. */
4152 ;
4153 else
4154 abort ();
4155 }
4156 else if (i.tm.opcode_modifier.defaultsize
4157 && !i.suffix
4158 /* exclude fldenv/frstor/fsave/fstenv */
4159 && i.tm.opcode_modifier.no_ssuf)
4160 {
4161 i.suffix = stackop_size;
4162 }
4163 else if (intel_syntax
4164 && !i.suffix
4165 && (i.tm.operand_types[0].bitfield.jumpabsolute
4166 || i.tm.opcode_modifier.jumpbyte
4167 || i.tm.opcode_modifier.jumpintersegment
4168 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4169 && i.tm.extension_opcode <= 3)))
4170 {
4171 switch (flag_code)
4172 {
4173 case CODE_64BIT:
4174 if (!i.tm.opcode_modifier.no_qsuf)
4175 {
4176 i.suffix = QWORD_MNEM_SUFFIX;
4177 break;
4178 }
4179 case CODE_32BIT:
4180 if (!i.tm.opcode_modifier.no_lsuf)
4181 i.suffix = LONG_MNEM_SUFFIX;
4182 break;
4183 case CODE_16BIT:
4184 if (!i.tm.opcode_modifier.no_wsuf)
4185 i.suffix = WORD_MNEM_SUFFIX;
4186 break;
4187 }
4188 }
4189
4190 if (!i.suffix)
4191 {
4192 if (!intel_syntax)
4193 {
4194 if (i.tm.opcode_modifier.w)
4195 {
4196 as_bad (_("no instruction mnemonic suffix given and "
4197 "no register operands; can't size instruction"));
4198 return 0;
4199 }
4200 }
4201 else
4202 {
4203 unsigned int suffixes;
4204
4205 suffixes = !i.tm.opcode_modifier.no_bsuf;
4206 if (!i.tm.opcode_modifier.no_wsuf)
4207 suffixes |= 1 << 1;
4208 if (!i.tm.opcode_modifier.no_lsuf)
4209 suffixes |= 1 << 2;
4210 if (!i.tm.opcode_modifier.no_ldsuf)
4211 suffixes |= 1 << 3;
4212 if (!i.tm.opcode_modifier.no_ssuf)
4213 suffixes |= 1 << 4;
4214 if (!i.tm.opcode_modifier.no_qsuf)
4215 suffixes |= 1 << 5;
4216
4217 /* There are more than suffix matches. */
4218 if (i.tm.opcode_modifier.w
4219 || ((suffixes & (suffixes - 1))
4220 && !i.tm.opcode_modifier.defaultsize
4221 && !i.tm.opcode_modifier.ignoresize))
4222 {
4223 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4224 return 0;
4225 }
4226 }
4227 }
4228
4229 /* Change the opcode based on the operand size given by i.suffix;
4230 We don't need to change things for byte insns. */
4231
4232 if (i.suffix
4233 && i.suffix != BYTE_MNEM_SUFFIX
4234 && i.suffix != XMMWORD_MNEM_SUFFIX
4235 && i.suffix != YMMWORD_MNEM_SUFFIX)
4236 {
4237 /* It's not a byte, select word/dword operation. */
4238 if (i.tm.opcode_modifier.w)
4239 {
4240 if (i.tm.opcode_modifier.shortform)
4241 i.tm.base_opcode |= 8;
4242 else
4243 i.tm.base_opcode |= 1;
4244 }
4245
4246 /* Now select between word & dword operations via the operand
4247 size prefix, except for instructions that will ignore this
4248 prefix anyway. */
4249 if (i.tm.opcode_modifier.addrprefixop0)
4250 {
4251 /* The address size override prefix changes the size of the
4252 first operand. */
4253 if ((flag_code == CODE_32BIT
4254 && i.op->regs[0].reg_type.bitfield.reg16)
4255 || (flag_code != CODE_32BIT
4256 && i.op->regs[0].reg_type.bitfield.reg32))
4257 if (!add_prefix (ADDR_PREFIX_OPCODE))
4258 return 0;
4259 }
4260 else if (i.suffix != QWORD_MNEM_SUFFIX
4261 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4262 && !i.tm.opcode_modifier.ignoresize
4263 && !i.tm.opcode_modifier.floatmf
4264 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4265 || (flag_code == CODE_64BIT
4266 && i.tm.opcode_modifier.jumpbyte)))
4267 {
4268 unsigned int prefix = DATA_PREFIX_OPCODE;
4269
4270 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4271 prefix = ADDR_PREFIX_OPCODE;
4272
4273 if (!add_prefix (prefix))
4274 return 0;
4275 }
4276
4277 /* Set mode64 for an operand. */
4278 if (i.suffix == QWORD_MNEM_SUFFIX
4279 && flag_code == CODE_64BIT
4280 && !i.tm.opcode_modifier.norex64)
4281 {
4282 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4283 need rex64. cmpxchg8b is also a special case. */
4284 if (! (i.operands == 2
4285 && i.tm.base_opcode == 0x90
4286 && i.tm.extension_opcode == None
4287 && operand_type_equal (&i.types [0], &acc64)
4288 && operand_type_equal (&i.types [1], &acc64))
4289 && ! (i.operands == 1
4290 && i.tm.base_opcode == 0xfc7
4291 && i.tm.extension_opcode == 1
4292 && !operand_type_check (i.types [0], reg)
4293 && operand_type_check (i.types [0], anymem)))
4294 i.rex |= REX_W;
4295 }
4296
4297 /* Size floating point instruction. */
4298 if (i.suffix == LONG_MNEM_SUFFIX)
4299 if (i.tm.opcode_modifier.floatmf)
4300 i.tm.base_opcode ^= 4;
4301 }
4302
4303 return 1;
4304 }
4305
4306 static int
4307 check_byte_reg (void)
4308 {
4309 int op;
4310
4311 for (op = i.operands; --op >= 0;)
4312 {
4313 /* If this is an eight bit register, it's OK. If it's the 16 or
4314 32 bit version of an eight bit register, we will just use the
4315 low portion, and that's OK too. */
4316 if (i.types[op].bitfield.reg8)
4317 continue;
4318
4319 /* Don't generate this warning if not needed. */
4320 if (intel_syntax && i.tm.opcode_modifier.byteokintel)
4321 continue;
4322
4323 /* crc32 doesn't generate this warning. */
4324 if (i.tm.base_opcode == 0xf20f38f0)
4325 continue;
4326
4327 if ((i.types[op].bitfield.reg16
4328 || i.types[op].bitfield.reg32
4329 || i.types[op].bitfield.reg64)
4330 && i.op[op].regs->reg_num < 4)
4331 {
4332 /* Prohibit these changes in the 64bit mode, since the
4333 lowering is more complicated. */
4334 if (flag_code == CODE_64BIT
4335 && !i.tm.operand_types[op].bitfield.inoutportreg)
4336 {
4337 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4338 register_prefix, i.op[op].regs->reg_name,
4339 i.suffix);
4340 return 0;
4341 }
4342 #if REGISTER_WARNINGS
4343 if (!quiet_warnings
4344 && !i.tm.operand_types[op].bitfield.inoutportreg)
4345 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4346 register_prefix,
4347 (i.op[op].regs + (i.types[op].bitfield.reg16
4348 ? REGNAM_AL - REGNAM_AX
4349 : REGNAM_AL - REGNAM_EAX))->reg_name,
4350 register_prefix,
4351 i.op[op].regs->reg_name,
4352 i.suffix);
4353 #endif
4354 continue;
4355 }
4356 /* Any other register is bad. */
4357 if (i.types[op].bitfield.reg16
4358 || i.types[op].bitfield.reg32
4359 || i.types[op].bitfield.reg64
4360 || i.types[op].bitfield.regmmx
4361 || i.types[op].bitfield.regxmm
4362 || i.types[op].bitfield.regymm
4363 || i.types[op].bitfield.sreg2
4364 || i.types[op].bitfield.sreg3
4365 || i.types[op].bitfield.control
4366 || i.types[op].bitfield.debug
4367 || i.types[op].bitfield.test
4368 || i.types[op].bitfield.floatreg
4369 || i.types[op].bitfield.floatacc)
4370 {
4371 as_bad (_("`%s%s' not allowed with `%s%c'"),
4372 register_prefix,
4373 i.op[op].regs->reg_name,
4374 i.tm.name,
4375 i.suffix);
4376 return 0;
4377 }
4378 }
4379 return 1;
4380 }
4381
4382 static int
4383 check_long_reg (void)
4384 {
4385 int op;
4386
4387 for (op = i.operands; --op >= 0;)
4388 /* Reject eight bit registers, except where the template requires
4389 them. (eg. movzb) */
4390 if (i.types[op].bitfield.reg8
4391 && (i.tm.operand_types[op].bitfield.reg16
4392 || i.tm.operand_types[op].bitfield.reg32
4393 || i.tm.operand_types[op].bitfield.acc))
4394 {
4395 as_bad (_("`%s%s' not allowed with `%s%c'"),
4396 register_prefix,
4397 i.op[op].regs->reg_name,
4398 i.tm.name,
4399 i.suffix);
4400 return 0;
4401 }
4402 /* Warn if the e prefix on a general reg is missing. */
4403 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4404 && i.types[op].bitfield.reg16
4405 && (i.tm.operand_types[op].bitfield.reg32
4406 || i.tm.operand_types[op].bitfield.acc))
4407 {
4408 /* Prohibit these changes in the 64bit mode, since the
4409 lowering is more complicated. */
4410 if (flag_code == CODE_64BIT)
4411 {
4412 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4413 register_prefix, i.op[op].regs->reg_name,
4414 i.suffix);
4415 return 0;
4416 }
4417 #if REGISTER_WARNINGS
4418 else
4419 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4420 register_prefix,
4421 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4422 register_prefix,
4423 i.op[op].regs->reg_name,
4424 i.suffix);
4425 #endif
4426 }
4427 /* Warn if the r prefix on a general reg is missing. */
4428 else if (i.types[op].bitfield.reg64
4429 && (i.tm.operand_types[op].bitfield.reg32
4430 || i.tm.operand_types[op].bitfield.acc))
4431 {
4432 if (intel_syntax
4433 && i.tm.opcode_modifier.toqword
4434 && !i.types[0].bitfield.regxmm)
4435 {
4436 /* Convert to QWORD. We want REX byte. */
4437 i.suffix = QWORD_MNEM_SUFFIX;
4438 }
4439 else
4440 {
4441 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4442 register_prefix, i.op[op].regs->reg_name,
4443 i.suffix);
4444 return 0;
4445 }
4446 }
4447 return 1;
4448 }
4449
4450 static int
4451 check_qword_reg (void)
4452 {
4453 int op;
4454
4455 for (op = i.operands; --op >= 0; )
4456 /* Reject eight bit registers, except where the template requires
4457 them. (eg. movzb) */
4458 if (i.types[op].bitfield.reg8
4459 && (i.tm.operand_types[op].bitfield.reg16
4460 || i.tm.operand_types[op].bitfield.reg32
4461 || i.tm.operand_types[op].bitfield.acc))
4462 {
4463 as_bad (_("`%s%s' not allowed with `%s%c'"),
4464 register_prefix,
4465 i.op[op].regs->reg_name,
4466 i.tm.name,
4467 i.suffix);
4468 return 0;
4469 }
4470 /* Warn if the e prefix on a general reg is missing. */
4471 else if ((i.types[op].bitfield.reg16
4472 || i.types[op].bitfield.reg32)
4473 && (i.tm.operand_types[op].bitfield.reg32
4474 || i.tm.operand_types[op].bitfield.acc))
4475 {
4476 /* Prohibit these changes in the 64bit mode, since the
4477 lowering is more complicated. */
4478 if (intel_syntax
4479 && i.tm.opcode_modifier.todword
4480 && !i.types[0].bitfield.regxmm)
4481 {
4482 /* Convert to DWORD. We don't want REX byte. */
4483 i.suffix = LONG_MNEM_SUFFIX;
4484 }
4485 else
4486 {
4487 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4488 register_prefix, i.op[op].regs->reg_name,
4489 i.suffix);
4490 return 0;
4491 }
4492 }
4493 return 1;
4494 }
4495
4496 static int
4497 check_word_reg (void)
4498 {
4499 int op;
4500 for (op = i.operands; --op >= 0;)
4501 /* Reject eight bit registers, except where the template requires
4502 them. (eg. movzb) */
4503 if (i.types[op].bitfield.reg8
4504 && (i.tm.operand_types[op].bitfield.reg16
4505 || i.tm.operand_types[op].bitfield.reg32
4506 || i.tm.operand_types[op].bitfield.acc))
4507 {
4508 as_bad (_("`%s%s' not allowed with `%s%c'"),
4509 register_prefix,
4510 i.op[op].regs->reg_name,
4511 i.tm.name,
4512 i.suffix);
4513 return 0;
4514 }
4515 /* Warn if the e prefix on a general reg is present. */
4516 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4517 && i.types[op].bitfield.reg32
4518 && (i.tm.operand_types[op].bitfield.reg16
4519 || i.tm.operand_types[op].bitfield.acc))
4520 {
4521 /* Prohibit these changes in the 64bit mode, since the
4522 lowering is more complicated. */
4523 if (flag_code == CODE_64BIT)
4524 {
4525 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4526 register_prefix, i.op[op].regs->reg_name,
4527 i.suffix);
4528 return 0;
4529 }
4530 else
4531 #if REGISTER_WARNINGS
4532 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4533 register_prefix,
4534 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4535 register_prefix,
4536 i.op[op].regs->reg_name,
4537 i.suffix);
4538 #endif
4539 }
4540 return 1;
4541 }
4542
4543 static int
4544 update_imm (unsigned int j)
4545 {
4546 i386_operand_type overlap = i.types[j];
4547 if ((overlap.bitfield.imm8
4548 || overlap.bitfield.imm8s
4549 || overlap.bitfield.imm16
4550 || overlap.bitfield.imm32
4551 || overlap.bitfield.imm32s
4552 || overlap.bitfield.imm64)
4553 && !operand_type_equal (&overlap, &imm8)
4554 && !operand_type_equal (&overlap, &imm8s)
4555 && !operand_type_equal (&overlap, &imm16)
4556 && !operand_type_equal (&overlap, &imm32)
4557 && !operand_type_equal (&overlap, &imm32s)
4558 && !operand_type_equal (&overlap, &imm64))
4559 {
4560 if (i.suffix)
4561 {
4562 i386_operand_type temp;
4563
4564 operand_type_set (&temp, 0);
4565 if (i.suffix == BYTE_MNEM_SUFFIX)
4566 {
4567 temp.bitfield.imm8 = overlap.bitfield.imm8;
4568 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4569 }
4570 else if (i.suffix == WORD_MNEM_SUFFIX)
4571 temp.bitfield.imm16 = overlap.bitfield.imm16;
4572 else if (i.suffix == QWORD_MNEM_SUFFIX)
4573 {
4574 temp.bitfield.imm64 = overlap.bitfield.imm64;
4575 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4576 }
4577 else
4578 temp.bitfield.imm32 = overlap.bitfield.imm32;
4579 overlap = temp;
4580 }
4581 else if (operand_type_equal (&overlap, &imm16_32_32s)
4582 || operand_type_equal (&overlap, &imm16_32)
4583 || operand_type_equal (&overlap, &imm16_32s))
4584 {
4585 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4586 overlap = imm16;
4587 else
4588 overlap = imm32s;
4589 }
4590 if (!operand_type_equal (&overlap, &imm8)
4591 && !operand_type_equal (&overlap, &imm8s)
4592 && !operand_type_equal (&overlap, &imm16)
4593 && !operand_type_equal (&overlap, &imm32)
4594 && !operand_type_equal (&overlap, &imm32s)
4595 && !operand_type_equal (&overlap, &imm64))
4596 {
4597 as_bad (_("no instruction mnemonic suffix given; "
4598 "can't determine immediate size"));
4599 return 0;
4600 }
4601 }
4602 i.types[j] = overlap;
4603
4604 return 1;
4605 }
4606
4607 static int
4608 finalize_imm (void)
4609 {
4610 unsigned int j, n;
4611
4612 /* Update the first 2 immediate operands. */
4613 n = i.operands > 2 ? 2 : i.operands;
4614 if (n)
4615 {
4616 for (j = 0; j < n; j++)
4617 if (update_imm (j) == 0)
4618 return 0;
4619
4620 /* The 3rd operand can't be immediate operand. */
4621 gas_assert (operand_type_check (i.types[2], imm) == 0);
4622 }
4623
4624 return 1;
4625 }
4626
4627 static int
4628 bad_implicit_operand (int xmm)
4629 {
4630 const char *reg = xmm ? "xmm0" : "ymm0";
4631 if (intel_syntax)
4632 as_bad (_("the last operand of `%s' must be `%s%s'"),
4633 i.tm.name, register_prefix, reg);
4634 else
4635 as_bad (_("the first operand of `%s' must be `%s%s'"),
4636 i.tm.name, register_prefix, reg);
4637 return 0;
4638 }
4639
4640 static int
4641 process_operands (void)
4642 {
4643 /* Default segment register this instruction will use for memory
4644 accesses. 0 means unknown. This is only for optimizing out
4645 unnecessary segment overrides. */
4646 const seg_entry *default_seg = 0;
4647
4648 if (i.tm.opcode_modifier.sse2avx
4649 && (i.tm.opcode_modifier.vexnds
4650 || i.tm.opcode_modifier.vexndd))
4651 {
4652 unsigned int dup = i.operands;
4653 unsigned int dest = dup - 1;
4654 unsigned int j;
4655
4656 /* The destination must be an xmm register. */
4657 gas_assert (i.reg_operands
4658 && MAX_OPERANDS > dup
4659 && operand_type_equal (&i.types[dest], &regxmm));
4660
4661 if (i.tm.opcode_modifier.firstxmm0)
4662 {
4663 /* The first operand is implicit and must be xmm0. */
4664 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4665 if (i.op[0].regs->reg_num != 0)
4666 return bad_implicit_operand (1);
4667
4668 if (i.tm.opcode_modifier.vex3sources)
4669 {
4670 /* Keep xmm0 for instructions with VEX prefix and 3
4671 sources. */
4672 goto duplicate;
4673 }
4674 else
4675 {
4676 /* We remove the first xmm0 and keep the number of
4677 operands unchanged, which in fact duplicates the
4678 destination. */
4679 for (j = 1; j < i.operands; j++)
4680 {
4681 i.op[j - 1] = i.op[j];
4682 i.types[j - 1] = i.types[j];
4683 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4684 }
4685 }
4686 }
4687 else if (i.tm.opcode_modifier.implicit1stxmm0)
4688 {
4689 gas_assert ((MAX_OPERANDS - 1) > dup
4690 && i.tm.opcode_modifier.vex3sources);
4691
4692 /* Add the implicit xmm0 for instructions with VEX prefix
4693 and 3 sources. */
4694 for (j = i.operands; j > 0; j--)
4695 {
4696 i.op[j] = i.op[j - 1];
4697 i.types[j] = i.types[j - 1];
4698 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4699 }
4700 i.op[0].regs
4701 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4702 i.types[0] = regxmm;
4703 i.tm.operand_types[0] = regxmm;
4704
4705 i.operands += 2;
4706 i.reg_operands += 2;
4707 i.tm.operands += 2;
4708
4709 dup++;
4710 dest++;
4711 i.op[dup] = i.op[dest];
4712 i.types[dup] = i.types[dest];
4713 i.tm.operand_types[dup] = i.tm.operand_types[dest];
4714 }
4715 else
4716 {
4717 duplicate:
4718 i.operands++;
4719 i.reg_operands++;
4720 i.tm.operands++;
4721
4722 i.op[dup] = i.op[dest];
4723 i.types[dup] = i.types[dest];
4724 i.tm.operand_types[dup] = i.tm.operand_types[dest];
4725 }
4726
4727 if (i.tm.opcode_modifier.immext)
4728 process_immext ();
4729 }
4730 else if (i.tm.opcode_modifier.firstxmm0)
4731 {
4732 unsigned int j;
4733
4734 /* The first operand is implicit and must be xmm0/ymm0. */
4735 gas_assert (i.reg_operands
4736 && (operand_type_equal (&i.types[0], &regxmm)
4737 || operand_type_equal (&i.types[0], &regymm)));
4738 if (i.op[0].regs->reg_num != 0)
4739 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4740
4741 for (j = 1; j < i.operands; j++)
4742 {
4743 i.op[j - 1] = i.op[j];
4744 i.types[j - 1] = i.types[j];
4745
4746 /* We need to adjust fields in i.tm since they are used by
4747 build_modrm_byte. */
4748 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
4749 }
4750
4751 i.operands--;
4752 i.reg_operands--;
4753 i.tm.operands--;
4754 }
4755 else if (i.tm.opcode_modifier.regkludge)
4756 {
4757 /* The imul $imm, %reg instruction is converted into
4758 imul $imm, %reg, %reg, and the clr %reg instruction
4759 is converted into xor %reg, %reg. */
4760
4761 unsigned int first_reg_op;
4762
4763 if (operand_type_check (i.types[0], reg))
4764 first_reg_op = 0;
4765 else
4766 first_reg_op = 1;
4767 /* Pretend we saw the extra register operand. */
4768 gas_assert (i.reg_operands == 1
4769 && i.op[first_reg_op + 1].regs == 0);
4770 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
4771 i.types[first_reg_op + 1] = i.types[first_reg_op];
4772 i.operands++;
4773 i.reg_operands++;
4774 }
4775
4776 if (i.tm.opcode_modifier.shortform)
4777 {
4778 if (i.types[0].bitfield.sreg2
4779 || i.types[0].bitfield.sreg3)
4780 {
4781 if (i.tm.base_opcode == POP_SEG_SHORT
4782 && i.op[0].regs->reg_num == 1)
4783 {
4784 as_bad (_("you can't `pop %scs'"), register_prefix);
4785 return 0;
4786 }
4787 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
4788 if ((i.op[0].regs->reg_flags & RegRex) != 0)
4789 i.rex |= REX_B;
4790 }
4791 else
4792 {
4793 /* The register or float register operand is in operand
4794 0 or 1. */
4795 unsigned int op;
4796
4797 if (i.types[0].bitfield.floatreg
4798 || operand_type_check (i.types[0], reg))
4799 op = 0;
4800 else
4801 op = 1;
4802 /* Register goes in low 3 bits of opcode. */
4803 i.tm.base_opcode |= i.op[op].regs->reg_num;
4804 if ((i.op[op].regs->reg_flags & RegRex) != 0)
4805 i.rex |= REX_B;
4806 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4807 {
4808 /* Warn about some common errors, but press on regardless.
4809 The first case can be generated by gcc (<= 2.8.1). */
4810 if (i.operands == 2)
4811 {
4812 /* Reversed arguments on faddp, fsubp, etc. */
4813 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
4814 register_prefix, i.op[!intel_syntax].regs->reg_name,
4815 register_prefix, i.op[intel_syntax].regs->reg_name);
4816 }
4817 else
4818 {
4819 /* Extraneous `l' suffix on fp insn. */
4820 as_warn (_("translating to `%s %s%s'"), i.tm.name,
4821 register_prefix, i.op[0].regs->reg_name);
4822 }
4823 }
4824 }
4825 }
4826 else if (i.tm.opcode_modifier.modrm)
4827 {
4828 /* The opcode is completed (modulo i.tm.extension_opcode which
4829 must be put into the modrm byte). Now, we make the modrm and
4830 index base bytes based on all the info we've collected. */
4831
4832 default_seg = build_modrm_byte ();
4833 }
4834 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
4835 {
4836 default_seg = &ds;
4837 }
4838 else if (i.tm.opcode_modifier.isstring)
4839 {
4840 /* For the string instructions that allow a segment override
4841 on one of their operands, the default segment is ds. */
4842 default_seg = &ds;
4843 }
4844
4845 if (i.tm.base_opcode == 0x8d /* lea */
4846 && i.seg[0]
4847 && !quiet_warnings)
4848 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
4849
4850 /* If a segment was explicitly specified, and the specified segment
4851 is not the default, use an opcode prefix to select it. If we
4852 never figured out what the default segment is, then default_seg
4853 will be zero at this point, and the specified segment prefix will
4854 always be used. */
4855 if ((i.seg[0]) && (i.seg[0] != default_seg))
4856 {
4857 if (!add_prefix (i.seg[0]->seg_prefix))
4858 return 0;
4859 }
4860 return 1;
4861 }
4862
4863 static const seg_entry *
4864 build_modrm_byte (void)
4865 {
4866 const seg_entry *default_seg = 0;
4867 unsigned int source, dest;
4868 int vex_3_sources;
4869
4870 /* The first operand of instructions with VEX prefix and 3 sources
4871 must be VEX_Imm4. */
4872 vex_3_sources = i.tm.opcode_modifier.vex3sources;
4873 if (vex_3_sources)
4874 {
4875 unsigned int nds, reg;
4876
4877 if (i.tm.opcode_modifier.veximmext
4878 && i.tm.opcode_modifier.immext)
4879 {
4880 dest = i.operands - 2;
4881 gas_assert (dest == 3);
4882 }
4883 else
4884 dest = i.operands - 1;
4885 nds = dest - 1;
4886
4887 /* This instruction must have 4 register operands
4888 or 3 register operands plus 1 memory operand.
4889 It must have VexNDS and VexImmExt. */
4890 gas_assert ((i.reg_operands == 4
4891 || (i.reg_operands == 3 && i.mem_operands == 1))
4892 && i.tm.opcode_modifier.vexnds
4893 && i.tm.opcode_modifier.veximmext
4894 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
4895 || operand_type_equal (&i.tm.operand_types[dest], &regymm)));
4896
4897 /* Generate an 8bit immediate operand to encode the register
4898 operand. */
4899 expressionS *exp = &im_expressions[i.imm_operands++];
4900 i.op[i.operands].imms = exp;
4901 i.types[i.operands] = imm8;
4902 i.operands++;
4903 /* If VexW1 is set, the first operand is the source and
4904 the second operand is encoded in the immediate operand. */
4905 if (i.tm.opcode_modifier.vexw1)
4906 {
4907 source = 0;
4908 reg = 1;
4909 }
4910 else
4911 {
4912 source = 1;
4913 reg = 0;
4914 }
4915 /* FMA4 swaps REG and NDS. */
4916 if (i.tm.cpu_flags.bitfield.cpufma4)
4917 {
4918 unsigned int tmp;
4919 tmp = reg;
4920 reg = nds;
4921 nds = tmp;
4922 }
4923 gas_assert ((operand_type_equal (&i.tm.operand_types[reg], &regxmm)
4924 || operand_type_equal (&i.tm.operand_types[reg],
4925 &regymm))
4926 && (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
4927 || operand_type_equal (&i.tm.operand_types[nds],
4928 &regymm)));
4929 exp->X_op = O_constant;
4930 exp->X_add_number
4931 = ((i.op[reg].regs->reg_num
4932 + ((i.op[reg].regs->reg_flags & RegRex) ? 8 : 0)) << 4);
4933 i.vex.register_specifier = i.op[nds].regs;
4934 }
4935 else
4936 source = dest = 0;
4937
4938 /* i.reg_operands MUST be the number of real register operands;
4939 implicit registers do not count. If there are 3 register
4940 operands, it must be a instruction with VexNDS. For a
4941 instruction with VexNDD, the destination register is encoded
4942 in VEX prefix. If there are 4 register operands, it must be
4943 a instruction with VEX prefix and 3 sources. */
4944 if (i.mem_operands == 0
4945 && ((i.reg_operands == 2
4946 && !i.tm.opcode_modifier.vexndd)
4947 || (i.reg_operands == 3
4948 && i.tm.opcode_modifier.vexnds)
4949 || (i.reg_operands == 4 && vex_3_sources)))
4950 {
4951 switch (i.operands)
4952 {
4953 case 2:
4954 source = 0;
4955 break;
4956 case 3:
4957 /* When there are 3 operands, one of them may be immediate,
4958 which may be the first or the last operand. Otherwise,
4959 the first operand must be shift count register (cl) or it
4960 is an instruction with VexNDS. */
4961 gas_assert (i.imm_operands == 1
4962 || (i.imm_operands == 0
4963 && (i.tm.opcode_modifier.vexnds
4964 || i.types[0].bitfield.shiftcount)));
4965 if (operand_type_check (i.types[0], imm)
4966 || i.types[0].bitfield.shiftcount)
4967 source = 1;
4968 else
4969 source = 0;
4970 break;
4971 case 4:
4972 /* When there are 4 operands, the first two must be 8bit
4973 immediate operands. The source operand will be the 3rd
4974 one.
4975
4976 For instructions with VexNDS, if the first operand
4977 an imm8, the source operand is the 2nd one. If the last
4978 operand is imm8, the source operand is the first one. */
4979 gas_assert ((i.imm_operands == 2
4980 && i.types[0].bitfield.imm8
4981 && i.types[1].bitfield.imm8)
4982 || (i.tm.opcode_modifier.vexnds
4983 && i.imm_operands == 1
4984 && (i.types[0].bitfield.imm8
4985 || i.types[i.operands - 1].bitfield.imm8)));
4986 if (i.tm.opcode_modifier.vexnds)
4987 {
4988 if (i.types[0].bitfield.imm8)
4989 source = 1;
4990 else
4991 source = 0;
4992 }
4993 else
4994 source = 2;
4995 break;
4996 case 5:
4997 break;
4998 default:
4999 abort ();
5000 }
5001
5002 if (!vex_3_sources)
5003 {
5004 dest = source + 1;
5005
5006 if (i.tm.opcode_modifier.vexnds)
5007 {
5008 /* For instructions with VexNDS, the register-only
5009 source operand must be XMM or YMM register. It is
5010 encoded in VEX prefix. We need to clear RegMem bit
5011 before calling operand_type_equal. */
5012 i386_operand_type op = i.tm.operand_types[dest];
5013 op.bitfield.regmem = 0;
5014 if ((dest + 1) >= i.operands
5015 || (!operand_type_equal (&op, &regxmm)
5016 && !operand_type_equal (&op, &regymm)))
5017 abort ();
5018 i.vex.register_specifier = i.op[dest].regs;
5019 dest++;
5020 }
5021 }
5022
5023 i.rm.mode = 3;
5024 /* One of the register operands will be encoded in the i.tm.reg
5025 field, the other in the combined i.tm.mode and i.tm.regmem
5026 fields. If no form of this instruction supports a memory
5027 destination operand, then we assume the source operand may
5028 sometimes be a memory operand and so we need to store the
5029 destination in the i.rm.reg field. */
5030 if (!i.tm.operand_types[dest].bitfield.regmem
5031 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5032 {
5033 i.rm.reg = i.op[dest].regs->reg_num;
5034 i.rm.regmem = i.op[source].regs->reg_num;
5035 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5036 i.rex |= REX_R;
5037 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5038 i.rex |= REX_B;
5039 }
5040 else
5041 {
5042 i.rm.reg = i.op[source].regs->reg_num;
5043 i.rm.regmem = i.op[dest].regs->reg_num;
5044 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5045 i.rex |= REX_B;
5046 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5047 i.rex |= REX_R;
5048 }
5049 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5050 {
5051 if (!i.types[0].bitfield.control
5052 && !i.types[1].bitfield.control)
5053 abort ();
5054 i.rex &= ~(REX_R | REX_B);
5055 add_prefix (LOCK_PREFIX_OPCODE);
5056 }
5057 }
5058 else
5059 { /* If it's not 2 reg operands... */
5060 unsigned int mem;
5061
5062 if (i.mem_operands)
5063 {
5064 unsigned int fake_zero_displacement = 0;
5065 unsigned int op;
5066
5067 for (op = 0; op < i.operands; op++)
5068 if (operand_type_check (i.types[op], anymem))
5069 break;
5070 gas_assert (op < i.operands);
5071
5072 default_seg = &ds;
5073
5074 if (i.base_reg == 0)
5075 {
5076 i.rm.mode = 0;
5077 if (!i.disp_operands)
5078 fake_zero_displacement = 1;
5079 if (i.index_reg == 0)
5080 {
5081 /* Operand is just <disp> */
5082 if (flag_code == CODE_64BIT)
5083 {
5084 /* 64bit mode overwrites the 32bit absolute
5085 addressing by RIP relative addressing and
5086 absolute addressing is encoded by one of the
5087 redundant SIB forms. */
5088 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5089 i.sib.base = NO_BASE_REGISTER;
5090 i.sib.index = NO_INDEX_REGISTER;
5091 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5092 ? disp32s : disp32);
5093 }
5094 else if ((flag_code == CODE_16BIT)
5095 ^ (i.prefix[ADDR_PREFIX] != 0))
5096 {
5097 i.rm.regmem = NO_BASE_REGISTER_16;
5098 i.types[op] = disp16;
5099 }
5100 else
5101 {
5102 i.rm.regmem = NO_BASE_REGISTER;
5103 i.types[op] = disp32;
5104 }
5105 }
5106 else /* !i.base_reg && i.index_reg */
5107 {
5108 if (i.index_reg->reg_num == RegEiz
5109 || i.index_reg->reg_num == RegRiz)
5110 i.sib.index = NO_INDEX_REGISTER;
5111 else
5112 i.sib.index = i.index_reg->reg_num;
5113 i.sib.base = NO_BASE_REGISTER;
5114 i.sib.scale = i.log2_scale_factor;
5115 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5116 i.types[op].bitfield.disp8 = 0;
5117 i.types[op].bitfield.disp16 = 0;
5118 i.types[op].bitfield.disp64 = 0;
5119 if (flag_code != CODE_64BIT)
5120 {
5121 /* Must be 32 bit */
5122 i.types[op].bitfield.disp32 = 1;
5123 i.types[op].bitfield.disp32s = 0;
5124 }
5125 else
5126 {
5127 i.types[op].bitfield.disp32 = 0;
5128 i.types[op].bitfield.disp32s = 1;
5129 }
5130 if ((i.index_reg->reg_flags & RegRex) != 0)
5131 i.rex |= REX_X;
5132 }
5133 }
5134 /* RIP addressing for 64bit mode. */
5135 else if (i.base_reg->reg_num == RegRip ||
5136 i.base_reg->reg_num == RegEip)
5137 {
5138 i.rm.regmem = NO_BASE_REGISTER;
5139 i.types[op].bitfield.disp8 = 0;
5140 i.types[op].bitfield.disp16 = 0;
5141 i.types[op].bitfield.disp32 = 0;
5142 i.types[op].bitfield.disp32s = 1;
5143 i.types[op].bitfield.disp64 = 0;
5144 i.flags[op] |= Operand_PCrel;
5145 if (! i.disp_operands)
5146 fake_zero_displacement = 1;
5147 }
5148 else if (i.base_reg->reg_type.bitfield.reg16)
5149 {
5150 switch (i.base_reg->reg_num)
5151 {
5152 case 3: /* (%bx) */
5153 if (i.index_reg == 0)
5154 i.rm.regmem = 7;
5155 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5156 i.rm.regmem = i.index_reg->reg_num - 6;
5157 break;
5158 case 5: /* (%bp) */
5159 default_seg = &ss;
5160 if (i.index_reg == 0)
5161 {
5162 i.rm.regmem = 6;
5163 if (operand_type_check (i.types[op], disp) == 0)
5164 {
5165 /* fake (%bp) into 0(%bp) */
5166 i.types[op].bitfield.disp8 = 1;
5167 fake_zero_displacement = 1;
5168 }
5169 }
5170 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5171 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5172 break;
5173 default: /* (%si) -> 4 or (%di) -> 5 */
5174 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5175 }
5176 i.rm.mode = mode_from_disp_size (i.types[op]);
5177 }
5178 else /* i.base_reg and 32/64 bit mode */
5179 {
5180 if (flag_code == CODE_64BIT
5181 && operand_type_check (i.types[op], disp))
5182 {
5183 i386_operand_type temp;
5184 operand_type_set (&temp, 0);
5185 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5186 i.types[op] = temp;
5187 if (i.prefix[ADDR_PREFIX] == 0)
5188 i.types[op].bitfield.disp32s = 1;
5189 else
5190 i.types[op].bitfield.disp32 = 1;
5191 }
5192
5193 i.rm.regmem = i.base_reg->reg_num;
5194 if ((i.base_reg->reg_flags & RegRex) != 0)
5195 i.rex |= REX_B;
5196 i.sib.base = i.base_reg->reg_num;
5197 /* x86-64 ignores REX prefix bit here to avoid decoder
5198 complications. */
5199 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5200 {
5201 default_seg = &ss;
5202 if (i.disp_operands == 0)
5203 {
5204 fake_zero_displacement = 1;
5205 i.types[op].bitfield.disp8 = 1;
5206 }
5207 }
5208 else if (i.base_reg->reg_num == ESP_REG_NUM)
5209 {
5210 default_seg = &ss;
5211 }
5212 i.sib.scale = i.log2_scale_factor;
5213 if (i.index_reg == 0)
5214 {
5215 /* <disp>(%esp) becomes two byte modrm with no index
5216 register. We've already stored the code for esp
5217 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5218 Any base register besides %esp will not use the
5219 extra modrm byte. */
5220 i.sib.index = NO_INDEX_REGISTER;
5221 }
5222 else
5223 {
5224 if (i.index_reg->reg_num == RegEiz
5225 || i.index_reg->reg_num == RegRiz)
5226 i.sib.index = NO_INDEX_REGISTER;
5227 else
5228 i.sib.index = i.index_reg->reg_num;
5229 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5230 if ((i.index_reg->reg_flags & RegRex) != 0)
5231 i.rex |= REX_X;
5232 }
5233
5234 if (i.disp_operands
5235 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5236 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5237 i.rm.mode = 0;
5238 else
5239 i.rm.mode = mode_from_disp_size (i.types[op]);
5240 }
5241
5242 if (fake_zero_displacement)
5243 {
5244 /* Fakes a zero displacement assuming that i.types[op]
5245 holds the correct displacement size. */
5246 expressionS *exp;
5247
5248 gas_assert (i.op[op].disps == 0);
5249 exp = &disp_expressions[i.disp_operands++];
5250 i.op[op].disps = exp;
5251 exp->X_op = O_constant;
5252 exp->X_add_number = 0;
5253 exp->X_add_symbol = (symbolS *) 0;
5254 exp->X_op_symbol = (symbolS *) 0;
5255 }
5256
5257 mem = op;
5258 }
5259 else
5260 mem = ~0;
5261
5262 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5263 (if any) based on i.tm.extension_opcode. Again, we must be
5264 careful to make sure that segment/control/debug/test/MMX
5265 registers are coded into the i.rm.reg field. */
5266 if (i.reg_operands)
5267 {
5268 unsigned int op;
5269 unsigned int vex_reg = ~0;
5270
5271 for (op = 0; op < i.operands; op++)
5272 if (i.types[op].bitfield.reg8
5273 || i.types[op].bitfield.reg16
5274 || i.types[op].bitfield.reg32
5275 || i.types[op].bitfield.reg64
5276 || i.types[op].bitfield.regmmx
5277 || i.types[op].bitfield.regxmm
5278 || i.types[op].bitfield.regymm
5279 || i.types[op].bitfield.sreg2
5280 || i.types[op].bitfield.sreg3
5281 || i.types[op].bitfield.control
5282 || i.types[op].bitfield.debug
5283 || i.types[op].bitfield.test)
5284 break;
5285
5286 if (vex_3_sources)
5287 op = dest;
5288 else if (i.tm.opcode_modifier.vexnds)
5289 {
5290 /* For instructions with VexNDS, the register-only
5291 source operand is encoded in VEX prefix. */
5292 gas_assert (mem != (unsigned int) ~0);
5293
5294 if (op > mem)
5295 {
5296 vex_reg = op++;
5297 gas_assert (op < i.operands);
5298 }
5299 else
5300 {
5301 vex_reg = op + 1;
5302 gas_assert (vex_reg < i.operands);
5303 }
5304 }
5305 else if (i.tm.opcode_modifier.vexndd)
5306 {
5307 /* For instructions with VexNDD, there should be
5308 no memory operand and the register destination
5309 is encoded in VEX prefix. */
5310 gas_assert (i.mem_operands == 0
5311 && (op + 2) == i.operands);
5312 vex_reg = op + 1;
5313 }
5314 else
5315 gas_assert (op < i.operands);
5316
5317 if (vex_reg != (unsigned int) ~0)
5318 {
5319 gas_assert (i.reg_operands == 2);
5320
5321 if (!operand_type_equal (&i.tm.operand_types[vex_reg],
5322 &regxmm)
5323 && !operand_type_equal (&i.tm.operand_types[vex_reg],
5324 &regymm))
5325 abort ();
5326 i.vex.register_specifier = i.op[vex_reg].regs;
5327 }
5328
5329 /* If there is an extension opcode to put here, the
5330 register number must be put into the regmem field. */
5331 if (i.tm.extension_opcode != None)
5332 {
5333 i.rm.regmem = i.op[op].regs->reg_num;
5334 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5335 i.rex |= REX_B;
5336 }
5337 else
5338 {
5339 i.rm.reg = i.op[op].regs->reg_num;
5340 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5341 i.rex |= REX_R;
5342 }
5343
5344 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5345 must set it to 3 to indicate this is a register operand
5346 in the regmem field. */
5347 if (!i.mem_operands)
5348 i.rm.mode = 3;
5349 }
5350
5351 /* Fill in i.rm.reg field with extension opcode (if any). */
5352 if (i.tm.extension_opcode != None)
5353 i.rm.reg = i.tm.extension_opcode;
5354 }
5355 return default_seg;
5356 }
5357
5358 static void
5359 output_branch (void)
5360 {
5361 char *p;
5362 int code16;
5363 int prefix;
5364 relax_substateT subtype;
5365 symbolS *sym;
5366 offsetT off;
5367
5368 code16 = 0;
5369 if (flag_code == CODE_16BIT)
5370 code16 = CODE16;
5371
5372 prefix = 0;
5373 if (i.prefix[DATA_PREFIX] != 0)
5374 {
5375 prefix = 1;
5376 i.prefixes -= 1;
5377 code16 ^= CODE16;
5378 }
5379 /* Pentium4 branch hints. */
5380 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5381 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5382 {
5383 prefix++;
5384 i.prefixes--;
5385 }
5386 if (i.prefix[REX_PREFIX] != 0)
5387 {
5388 prefix++;
5389 i.prefixes--;
5390 }
5391
5392 if (i.prefixes != 0 && !intel_syntax)
5393 as_warn (_("skipping prefixes on this instruction"));
5394
5395 /* It's always a symbol; End frag & setup for relax.
5396 Make sure there is enough room in this frag for the largest
5397 instruction we may generate in md_convert_frag. This is 2
5398 bytes for the opcode and room for the prefix and largest
5399 displacement. */
5400 frag_grow (prefix + 2 + 4);
5401 /* Prefix and 1 opcode byte go in fr_fix. */
5402 p = frag_more (prefix + 1);
5403 if (i.prefix[DATA_PREFIX] != 0)
5404 *p++ = DATA_PREFIX_OPCODE;
5405 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5406 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5407 *p++ = i.prefix[SEG_PREFIX];
5408 if (i.prefix[REX_PREFIX] != 0)
5409 *p++ = i.prefix[REX_PREFIX];
5410 *p = i.tm.base_opcode;
5411
5412 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5413 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, SMALL);
5414 else if (cpu_arch_flags.bitfield.cpui386)
5415 subtype = ENCODE_RELAX_STATE (COND_JUMP, SMALL);
5416 else
5417 subtype = ENCODE_RELAX_STATE (COND_JUMP86, SMALL);
5418 subtype |= code16;
5419
5420 sym = i.op[0].disps->X_add_symbol;
5421 off = i.op[0].disps->X_add_number;
5422
5423 if (i.op[0].disps->X_op != O_constant
5424 && i.op[0].disps->X_op != O_symbol)
5425 {
5426 /* Handle complex expressions. */
5427 sym = make_expr_symbol (i.op[0].disps);
5428 off = 0;
5429 }
5430
5431 /* 1 possible extra opcode + 4 byte displacement go in var part.
5432 Pass reloc in fr_var. */
5433 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5434 }
5435
5436 static void
5437 output_jump (void)
5438 {
5439 char *p;
5440 int size;
5441 fixS *fixP;
5442
5443 if (i.tm.opcode_modifier.jumpbyte)
5444 {
5445 /* This is a loop or jecxz type instruction. */
5446 size = 1;
5447 if (i.prefix[ADDR_PREFIX] != 0)
5448 {
5449 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5450 i.prefixes -= 1;
5451 }
5452 /* Pentium4 branch hints. */
5453 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5454 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5455 {
5456 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5457 i.prefixes--;
5458 }
5459 }
5460 else
5461 {
5462 int code16;
5463
5464 code16 = 0;
5465 if (flag_code == CODE_16BIT)
5466 code16 = CODE16;
5467
5468 if (i.prefix[DATA_PREFIX] != 0)
5469 {
5470 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5471 i.prefixes -= 1;
5472 code16 ^= CODE16;
5473 }
5474
5475 size = 4;
5476 if (code16)
5477 size = 2;
5478 }
5479
5480 if (i.prefix[REX_PREFIX] != 0)
5481 {
5482 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5483 i.prefixes -= 1;
5484 }
5485
5486 if (i.prefixes != 0 && !intel_syntax)
5487 as_warn (_("skipping prefixes on this instruction"));
5488
5489 p = frag_more (1 + size);
5490 *p++ = i.tm.base_opcode;
5491
5492 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5493 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5494
5495 /* All jumps handled here are signed, but don't use a signed limit
5496 check for 32 and 16 bit jumps as we want to allow wrap around at
5497 4G and 64k respectively. */
5498 if (size == 1)
5499 fixP->fx_signed = 1;
5500 }
5501
5502 static void
5503 output_interseg_jump (void)
5504 {
5505 char *p;
5506 int size;
5507 int prefix;
5508 int code16;
5509
5510 code16 = 0;
5511 if (flag_code == CODE_16BIT)
5512 code16 = CODE16;
5513
5514 prefix = 0;
5515 if (i.prefix[DATA_PREFIX] != 0)
5516 {
5517 prefix = 1;
5518 i.prefixes -= 1;
5519 code16 ^= CODE16;
5520 }
5521 if (i.prefix[REX_PREFIX] != 0)
5522 {
5523 prefix++;
5524 i.prefixes -= 1;
5525 }
5526
5527 size = 4;
5528 if (code16)
5529 size = 2;
5530
5531 if (i.prefixes != 0 && !intel_syntax)
5532 as_warn (_("skipping prefixes on this instruction"));
5533
5534 /* 1 opcode; 2 segment; offset */
5535 p = frag_more (prefix + 1 + 2 + size);
5536
5537 if (i.prefix[DATA_PREFIX] != 0)
5538 *p++ = DATA_PREFIX_OPCODE;
5539
5540 if (i.prefix[REX_PREFIX] != 0)
5541 *p++ = i.prefix[REX_PREFIX];
5542
5543 *p++ = i.tm.base_opcode;
5544 if (i.op[1].imms->X_op == O_constant)
5545 {
5546 offsetT n = i.op[1].imms->X_add_number;
5547
5548 if (size == 2
5549 && !fits_in_unsigned_word (n)
5550 && !fits_in_signed_word (n))
5551 {
5552 as_bad (_("16-bit jump out of range"));
5553 return;
5554 }
5555 md_number_to_chars (p, n, size);
5556 }
5557 else
5558 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5559 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5560 if (i.op[0].imms->X_op != O_constant)
5561 as_bad (_("can't handle non absolute segment in `%s'"),
5562 i.tm.name);
5563 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5564 }
5565
5566 static void
5567 output_insn (void)
5568 {
5569 fragS *insn_start_frag;
5570 offsetT insn_start_off;
5571
5572 /* Tie dwarf2 debug info to the address at the start of the insn.
5573 We can't do this after the insn has been output as the current
5574 frag may have been closed off. eg. by frag_var. */
5575 dwarf2_emit_insn (0);
5576
5577 insn_start_frag = frag_now;
5578 insn_start_off = frag_now_fix ();
5579
5580 /* Output jumps. */
5581 if (i.tm.opcode_modifier.jump)
5582 output_branch ();
5583 else if (i.tm.opcode_modifier.jumpbyte
5584 || i.tm.opcode_modifier.jumpdword)
5585 output_jump ();
5586 else if (i.tm.opcode_modifier.jumpintersegment)
5587 output_interseg_jump ();
5588 else
5589 {
5590 /* Output normal instructions here. */
5591 char *p;
5592 unsigned char *q;
5593 unsigned int j;
5594 unsigned int prefix;
5595
5596 /* Since the VEX prefix contains the implicit prefix, we don't
5597 need the explicit prefix. */
5598 if (!i.tm.opcode_modifier.vex)
5599 {
5600 switch (i.tm.opcode_length)
5601 {
5602 case 3:
5603 if (i.tm.base_opcode & 0xff000000)
5604 {
5605 prefix = (i.tm.base_opcode >> 24) & 0xff;
5606 goto check_prefix;
5607 }
5608 break;
5609 case 2:
5610 if ((i.tm.base_opcode & 0xff0000) != 0)
5611 {
5612 prefix = (i.tm.base_opcode >> 16) & 0xff;
5613 if (i.tm.cpu_flags.bitfield.cpupadlock)
5614 {
5615 check_prefix:
5616 if (prefix != REPE_PREFIX_OPCODE
5617 || (i.prefix[LOCKREP_PREFIX]
5618 != REPE_PREFIX_OPCODE))
5619 add_prefix (prefix);
5620 }
5621 else
5622 add_prefix (prefix);
5623 }
5624 break;
5625 case 1:
5626 break;
5627 default:
5628 abort ();
5629 }
5630
5631 /* The prefix bytes. */
5632 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
5633 if (*q)
5634 FRAG_APPEND_1_CHAR (*q);
5635 }
5636
5637 if (i.tm.opcode_modifier.vex)
5638 {
5639 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
5640 if (*q)
5641 switch (j)
5642 {
5643 case REX_PREFIX:
5644 /* REX byte is encoded in VEX prefix. */
5645 break;
5646 case SEG_PREFIX:
5647 case ADDR_PREFIX:
5648 FRAG_APPEND_1_CHAR (*q);
5649 break;
5650 default:
5651 /* There should be no other prefixes for instructions
5652 with VEX prefix. */
5653 abort ();
5654 }
5655
5656 /* Now the VEX prefix. */
5657 p = frag_more (i.vex.length);
5658 for (j = 0; j < i.vex.length; j++)
5659 p[j] = i.vex.bytes[j];
5660 }
5661
5662 /* Now the opcode; be careful about word order here! */
5663 if (i.tm.opcode_length == 1)
5664 {
5665 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
5666 }
5667 else
5668 {
5669 switch (i.tm.opcode_length)
5670 {
5671 case 3:
5672 p = frag_more (3);
5673 *p++ = (i.tm.base_opcode >> 16) & 0xff;
5674 break;
5675 case 2:
5676 p = frag_more (2);
5677 break;
5678 default:
5679 abort ();
5680 break;
5681 }
5682
5683 /* Put out high byte first: can't use md_number_to_chars! */
5684 *p++ = (i.tm.base_opcode >> 8) & 0xff;
5685 *p = i.tm.base_opcode & 0xff;
5686 }
5687
5688 /* Now the modrm byte and sib byte (if present). */
5689 if (i.tm.opcode_modifier.modrm)
5690 {
5691 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
5692 | i.rm.reg << 3
5693 | i.rm.mode << 6));
5694 /* If i.rm.regmem == ESP (4)
5695 && i.rm.mode != (Register mode)
5696 && not 16 bit
5697 ==> need second modrm byte. */
5698 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
5699 && i.rm.mode != 3
5700 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
5701 FRAG_APPEND_1_CHAR ((i.sib.base << 0
5702 | i.sib.index << 3
5703 | i.sib.scale << 6));
5704 }
5705
5706 if (i.disp_operands)
5707 output_disp (insn_start_frag, insn_start_off);
5708
5709 if (i.imm_operands)
5710 output_imm (insn_start_frag, insn_start_off);
5711 }
5712
5713 #ifdef DEBUG386
5714 if (flag_debug)
5715 {
5716 pi ("" /*line*/, &i);
5717 }
5718 #endif /* DEBUG386 */
5719 }
5720
5721 /* Return the size of the displacement operand N. */
5722
5723 static int
5724 disp_size (unsigned int n)
5725 {
5726 int size = 4;
5727 if (i.types[n].bitfield.disp64)
5728 size = 8;
5729 else if (i.types[n].bitfield.disp8)
5730 size = 1;
5731 else if (i.types[n].bitfield.disp16)
5732 size = 2;
5733 return size;
5734 }
5735
5736 /* Return the size of the immediate operand N. */
5737
5738 static int
5739 imm_size (unsigned int n)
5740 {
5741 int size = 4;
5742 if (i.types[n].bitfield.imm64)
5743 size = 8;
5744 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
5745 size = 1;
5746 else if (i.types[n].bitfield.imm16)
5747 size = 2;
5748 return size;
5749 }
5750
5751 static void
5752 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
5753 {
5754 char *p;
5755 unsigned int n;
5756
5757 for (n = 0; n < i.operands; n++)
5758 {
5759 if (operand_type_check (i.types[n], disp))
5760 {
5761 if (i.op[n].disps->X_op == O_constant)
5762 {
5763 int size = disp_size (n);
5764 offsetT val;
5765
5766 val = offset_in_range (i.op[n].disps->X_add_number,
5767 size);
5768 p = frag_more (size);
5769 md_number_to_chars (p, val, size);
5770 }
5771 else
5772 {
5773 enum bfd_reloc_code_real reloc_type;
5774 int size = disp_size (n);
5775 int sign = i.types[n].bitfield.disp32s;
5776 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
5777
5778 /* We can't have 8 bit displacement here. */
5779 gas_assert (!i.types[n].bitfield.disp8);
5780
5781 /* The PC relative address is computed relative
5782 to the instruction boundary, so in case immediate
5783 fields follows, we need to adjust the value. */
5784 if (pcrel && i.imm_operands)
5785 {
5786 unsigned int n1;
5787 int sz = 0;
5788
5789 for (n1 = 0; n1 < i.operands; n1++)
5790 if (operand_type_check (i.types[n1], imm))
5791 {
5792 /* Only one immediate is allowed for PC
5793 relative address. */
5794 gas_assert (sz == 0);
5795 sz = imm_size (n1);
5796 i.op[n].disps->X_add_number -= sz;
5797 }
5798 /* We should find the immediate. */
5799 gas_assert (sz != 0);
5800 }
5801
5802 p = frag_more (size);
5803 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
5804 if (GOT_symbol
5805 && GOT_symbol == i.op[n].disps->X_add_symbol
5806 && (((reloc_type == BFD_RELOC_32
5807 || reloc_type == BFD_RELOC_X86_64_32S
5808 || (reloc_type == BFD_RELOC_64
5809 && object_64bit))
5810 && (i.op[n].disps->X_op == O_symbol
5811 || (i.op[n].disps->X_op == O_add
5812 && ((symbol_get_value_expression
5813 (i.op[n].disps->X_op_symbol)->X_op)
5814 == O_subtract))))
5815 || reloc_type == BFD_RELOC_32_PCREL))
5816 {
5817 offsetT add;
5818
5819 if (insn_start_frag == frag_now)
5820 add = (p - frag_now->fr_literal) - insn_start_off;
5821 else
5822 {
5823 fragS *fr;
5824
5825 add = insn_start_frag->fr_fix - insn_start_off;
5826 for (fr = insn_start_frag->fr_next;
5827 fr && fr != frag_now; fr = fr->fr_next)
5828 add += fr->fr_fix;
5829 add += p - frag_now->fr_literal;
5830 }
5831
5832 if (!object_64bit)
5833 {
5834 reloc_type = BFD_RELOC_386_GOTPC;
5835 i.op[n].imms->X_add_number += add;
5836 }
5837 else if (reloc_type == BFD_RELOC_64)
5838 reloc_type = BFD_RELOC_X86_64_GOTPC64;
5839 else
5840 /* Don't do the adjustment for x86-64, as there
5841 the pcrel addressing is relative to the _next_
5842 insn, and that is taken care of in other code. */
5843 reloc_type = BFD_RELOC_X86_64_GOTPC32;
5844 }
5845 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5846 i.op[n].disps, pcrel, reloc_type);
5847 }
5848 }
5849 }
5850 }
5851
5852 static void
5853 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
5854 {
5855 char *p;
5856 unsigned int n;
5857
5858 for (n = 0; n < i.operands; n++)
5859 {
5860 if (operand_type_check (i.types[n], imm))
5861 {
5862 if (i.op[n].imms->X_op == O_constant)
5863 {
5864 int size = imm_size (n);
5865 offsetT val;
5866
5867 val = offset_in_range (i.op[n].imms->X_add_number,
5868 size);
5869 p = frag_more (size);
5870 md_number_to_chars (p, val, size);
5871 }
5872 else
5873 {
5874 /* Not absolute_section.
5875 Need a 32-bit fixup (don't support 8bit
5876 non-absolute imms). Try to support other
5877 sizes ... */
5878 enum bfd_reloc_code_real reloc_type;
5879 int size = imm_size (n);
5880 int sign;
5881
5882 if (i.types[n].bitfield.imm32s
5883 && (i.suffix == QWORD_MNEM_SUFFIX
5884 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
5885 sign = 1;
5886 else
5887 sign = 0;
5888
5889 p = frag_more (size);
5890 reloc_type = reloc (size, 0, sign, i.reloc[n]);
5891
5892 /* This is tough to explain. We end up with this one if we
5893 * have operands that look like
5894 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
5895 * obtain the absolute address of the GOT, and it is strongly
5896 * preferable from a performance point of view to avoid using
5897 * a runtime relocation for this. The actual sequence of
5898 * instructions often look something like:
5899 *
5900 * call .L66
5901 * .L66:
5902 * popl %ebx
5903 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
5904 *
5905 * The call and pop essentially return the absolute address
5906 * of the label .L66 and store it in %ebx. The linker itself
5907 * will ultimately change the first operand of the addl so
5908 * that %ebx points to the GOT, but to keep things simple, the
5909 * .o file must have this operand set so that it generates not
5910 * the absolute address of .L66, but the absolute address of
5911 * itself. This allows the linker itself simply treat a GOTPC
5912 * relocation as asking for a pcrel offset to the GOT to be
5913 * added in, and the addend of the relocation is stored in the
5914 * operand field for the instruction itself.
5915 *
5916 * Our job here is to fix the operand so that it would add
5917 * the correct offset so that %ebx would point to itself. The
5918 * thing that is tricky is that .-.L66 will point to the
5919 * beginning of the instruction, so we need to further modify
5920 * the operand so that it will point to itself. There are
5921 * other cases where you have something like:
5922 *
5923 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
5924 *
5925 * and here no correction would be required. Internally in
5926 * the assembler we treat operands of this form as not being
5927 * pcrel since the '.' is explicitly mentioned, and I wonder
5928 * whether it would simplify matters to do it this way. Who
5929 * knows. In earlier versions of the PIC patches, the
5930 * pcrel_adjust field was used to store the correction, but
5931 * since the expression is not pcrel, I felt it would be
5932 * confusing to do it this way. */
5933
5934 if ((reloc_type == BFD_RELOC_32
5935 || reloc_type == BFD_RELOC_X86_64_32S
5936 || reloc_type == BFD_RELOC_64)
5937 && GOT_symbol
5938 && GOT_symbol == i.op[n].imms->X_add_symbol
5939 && (i.op[n].imms->X_op == O_symbol
5940 || (i.op[n].imms->X_op == O_add
5941 && ((symbol_get_value_expression
5942 (i.op[n].imms->X_op_symbol)->X_op)
5943 == O_subtract))))
5944 {
5945 offsetT add;
5946
5947 if (insn_start_frag == frag_now)
5948 add = (p - frag_now->fr_literal) - insn_start_off;
5949 else
5950 {
5951 fragS *fr;
5952
5953 add = insn_start_frag->fr_fix - insn_start_off;
5954 for (fr = insn_start_frag->fr_next;
5955 fr && fr != frag_now; fr = fr->fr_next)
5956 add += fr->fr_fix;
5957 add += p - frag_now->fr_literal;
5958 }
5959
5960 if (!object_64bit)
5961 reloc_type = BFD_RELOC_386_GOTPC;
5962 else if (size == 4)
5963 reloc_type = BFD_RELOC_X86_64_GOTPC32;
5964 else if (size == 8)
5965 reloc_type = BFD_RELOC_X86_64_GOTPC64;
5966 i.op[n].imms->X_add_number += add;
5967 }
5968 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5969 i.op[n].imms, 0, reloc_type);
5970 }
5971 }
5972 }
5973 }
5974 \f
5975 /* x86_cons_fix_new is called via the expression parsing code when a
5976 reloc is needed. We use this hook to get the correct .got reloc. */
5977 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
5978 static int cons_sign = -1;
5979
5980 void
5981 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
5982 expressionS *exp)
5983 {
5984 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
5985
5986 got_reloc = NO_RELOC;
5987
5988 #ifdef TE_PE
5989 if (exp->X_op == O_secrel)
5990 {
5991 exp->X_op = O_symbol;
5992 r = BFD_RELOC_32_SECREL;
5993 }
5994 #endif
5995
5996 fix_new_exp (frag, off, len, exp, 0, r);
5997 }
5998
5999 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6000 # define lex_got(reloc, adjust, types) NULL
6001 #else
6002 /* Parse operands of the form
6003 <symbol>@GOTOFF+<nnn>
6004 and similar .plt or .got references.
6005
6006 If we find one, set up the correct relocation in RELOC and copy the
6007 input string, minus the `@GOTOFF' into a malloc'd buffer for
6008 parsing by the calling routine. Return this buffer, and if ADJUST
6009 is non-null set it to the length of the string we removed from the
6010 input line. Otherwise return NULL. */
6011 static char *
6012 lex_got (enum bfd_reloc_code_real *reloc,
6013 int *adjust,
6014 i386_operand_type *types)
6015 {
6016 /* Some of the relocations depend on the size of what field is to
6017 be relocated. But in our callers i386_immediate and i386_displacement
6018 we don't yet know the operand size (this will be set by insn
6019 matching). Hence we record the word32 relocation here,
6020 and adjust the reloc according to the real size in reloc(). */
6021 static const struct {
6022 const char *str;
6023 const enum bfd_reloc_code_real rel[2];
6024 const i386_operand_type types64;
6025 } gotrel[] = {
6026 { "PLTOFF", { _dummy_first_bfd_reloc_code_real,
6027 BFD_RELOC_X86_64_PLTOFF64 },
6028 OPERAND_TYPE_IMM64 },
6029 { "PLT", { BFD_RELOC_386_PLT32,
6030 BFD_RELOC_X86_64_PLT32 },
6031 OPERAND_TYPE_IMM32_32S_DISP32 },
6032 { "GOTPLT", { _dummy_first_bfd_reloc_code_real,
6033 BFD_RELOC_X86_64_GOTPLT64 },
6034 OPERAND_TYPE_IMM64_DISP64 },
6035 { "GOTOFF", { BFD_RELOC_386_GOTOFF,
6036 BFD_RELOC_X86_64_GOTOFF64 },
6037 OPERAND_TYPE_IMM64_DISP64 },
6038 { "GOTPCREL", { _dummy_first_bfd_reloc_code_real,
6039 BFD_RELOC_X86_64_GOTPCREL },
6040 OPERAND_TYPE_IMM32_32S_DISP32 },
6041 { "TLSGD", { BFD_RELOC_386_TLS_GD,
6042 BFD_RELOC_X86_64_TLSGD },
6043 OPERAND_TYPE_IMM32_32S_DISP32 },
6044 { "TLSLDM", { BFD_RELOC_386_TLS_LDM,
6045 _dummy_first_bfd_reloc_code_real },
6046 OPERAND_TYPE_NONE },
6047 { "TLSLD", { _dummy_first_bfd_reloc_code_real,
6048 BFD_RELOC_X86_64_TLSLD },
6049 OPERAND_TYPE_IMM32_32S_DISP32 },
6050 { "GOTTPOFF", { BFD_RELOC_386_TLS_IE_32,
6051 BFD_RELOC_X86_64_GOTTPOFF },
6052 OPERAND_TYPE_IMM32_32S_DISP32 },
6053 { "TPOFF", { BFD_RELOC_386_TLS_LE_32,
6054 BFD_RELOC_X86_64_TPOFF32 },
6055 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6056 { "NTPOFF", { BFD_RELOC_386_TLS_LE,
6057 _dummy_first_bfd_reloc_code_real },
6058 OPERAND_TYPE_NONE },
6059 { "DTPOFF", { BFD_RELOC_386_TLS_LDO_32,
6060 BFD_RELOC_X86_64_DTPOFF32 },
6061
6062 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6063 { "GOTNTPOFF",{ BFD_RELOC_386_TLS_GOTIE,
6064 _dummy_first_bfd_reloc_code_real },
6065 OPERAND_TYPE_NONE },
6066 { "INDNTPOFF",{ BFD_RELOC_386_TLS_IE,
6067 _dummy_first_bfd_reloc_code_real },
6068 OPERAND_TYPE_NONE },
6069 { "GOT", { BFD_RELOC_386_GOT32,
6070 BFD_RELOC_X86_64_GOT32 },
6071 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6072 { "TLSDESC", { BFD_RELOC_386_TLS_GOTDESC,
6073 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6074 OPERAND_TYPE_IMM32_32S_DISP32 },
6075 { "TLSCALL", { BFD_RELOC_386_TLS_DESC_CALL,
6076 BFD_RELOC_X86_64_TLSDESC_CALL },
6077 OPERAND_TYPE_IMM32_32S_DISP32 },
6078 };
6079 char *cp;
6080 unsigned int j;
6081
6082 if (!IS_ELF)
6083 return NULL;
6084
6085 for (cp = input_line_pointer; *cp != '@'; cp++)
6086 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6087 return NULL;
6088
6089 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6090 {
6091 int len;
6092
6093 len = strlen (gotrel[j].str);
6094 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6095 {
6096 if (gotrel[j].rel[object_64bit] != 0)
6097 {
6098 int first, second;
6099 char *tmpbuf, *past_reloc;
6100
6101 *reloc = gotrel[j].rel[object_64bit];
6102 if (adjust)
6103 *adjust = len;
6104
6105 if (types)
6106 {
6107 if (flag_code != CODE_64BIT)
6108 {
6109 types->bitfield.imm32 = 1;
6110 types->bitfield.disp32 = 1;
6111 }
6112 else
6113 *types = gotrel[j].types64;
6114 }
6115
6116 if (GOT_symbol == NULL)
6117 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6118
6119 /* The length of the first part of our input line. */
6120 first = cp - input_line_pointer;
6121
6122 /* The second part goes from after the reloc token until
6123 (and including) an end_of_line char or comma. */
6124 past_reloc = cp + 1 + len;
6125 cp = past_reloc;
6126 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6127 ++cp;
6128 second = cp + 1 - past_reloc;
6129
6130 /* Allocate and copy string. The trailing NUL shouldn't
6131 be necessary, but be safe. */
6132 tmpbuf = (char *) xmalloc (first + second + 2);
6133 memcpy (tmpbuf, input_line_pointer, first);
6134 if (second != 0 && *past_reloc != ' ')
6135 /* Replace the relocation token with ' ', so that
6136 errors like foo@GOTOFF1 will be detected. */
6137 tmpbuf[first++] = ' ';
6138 memcpy (tmpbuf + first, past_reloc, second);
6139 tmpbuf[first + second] = '\0';
6140 return tmpbuf;
6141 }
6142
6143 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6144 gotrel[j].str, 1 << (5 + object_64bit));
6145 return NULL;
6146 }
6147 }
6148
6149 /* Might be a symbol version string. Don't as_bad here. */
6150 return NULL;
6151 }
6152
6153 void
6154 x86_cons (expressionS *exp, int size)
6155 {
6156 intel_syntax = -intel_syntax;
6157
6158 if (size == 4 || (object_64bit && size == 8))
6159 {
6160 /* Handle @GOTOFF and the like in an expression. */
6161 char *save;
6162 char *gotfree_input_line;
6163 int adjust;
6164
6165 save = input_line_pointer;
6166 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6167 if (gotfree_input_line)
6168 input_line_pointer = gotfree_input_line;
6169
6170 expression (exp);
6171
6172 if (gotfree_input_line)
6173 {
6174 /* expression () has merrily parsed up to the end of line,
6175 or a comma - in the wrong buffer. Transfer how far
6176 input_line_pointer has moved to the right buffer. */
6177 input_line_pointer = (save
6178 + (input_line_pointer - gotfree_input_line)
6179 + adjust);
6180 free (gotfree_input_line);
6181 if (exp->X_op == O_constant
6182 || exp->X_op == O_absent
6183 || exp->X_op == O_illegal
6184 || exp->X_op == O_register
6185 || exp->X_op == O_big)
6186 {
6187 char c = *input_line_pointer;
6188 *input_line_pointer = 0;
6189 as_bad (_("missing or invalid expression `%s'"), save);
6190 *input_line_pointer = c;
6191 }
6192 }
6193 }
6194 else
6195 expression (exp);
6196
6197 intel_syntax = -intel_syntax;
6198
6199 if (intel_syntax)
6200 i386_intel_simplify (exp);
6201 }
6202 #endif
6203
6204 static void signed_cons (int size)
6205 {
6206 if (flag_code == CODE_64BIT)
6207 cons_sign = 1;
6208 cons (size);
6209 cons_sign = -1;
6210 }
6211
6212 #ifdef TE_PE
6213 static void
6214 pe_directive_secrel (dummy)
6215 int dummy ATTRIBUTE_UNUSED;
6216 {
6217 expressionS exp;
6218
6219 do
6220 {
6221 expression (&exp);
6222 if (exp.X_op == O_symbol)
6223 exp.X_op = O_secrel;
6224
6225 emit_expr (&exp, 4);
6226 }
6227 while (*input_line_pointer++ == ',');
6228
6229 input_line_pointer--;
6230 demand_empty_rest_of_line ();
6231 }
6232 #endif
6233
6234 static int
6235 i386_immediate (char *imm_start)
6236 {
6237 char *save_input_line_pointer;
6238 char *gotfree_input_line;
6239 segT exp_seg = 0;
6240 expressionS *exp;
6241 i386_operand_type types;
6242
6243 operand_type_set (&types, ~0);
6244
6245 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6246 {
6247 as_bad (_("at most %d immediate operands are allowed"),
6248 MAX_IMMEDIATE_OPERANDS);
6249 return 0;
6250 }
6251
6252 exp = &im_expressions[i.imm_operands++];
6253 i.op[this_operand].imms = exp;
6254
6255 if (is_space_char (*imm_start))
6256 ++imm_start;
6257
6258 save_input_line_pointer = input_line_pointer;
6259 input_line_pointer = imm_start;
6260
6261 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6262 if (gotfree_input_line)
6263 input_line_pointer = gotfree_input_line;
6264
6265 exp_seg = expression (exp);
6266
6267 SKIP_WHITESPACE ();
6268 if (*input_line_pointer)
6269 as_bad (_("junk `%s' after expression"), input_line_pointer);
6270
6271 input_line_pointer = save_input_line_pointer;
6272 if (gotfree_input_line)
6273 {
6274 free (gotfree_input_line);
6275
6276 if (exp->X_op == O_constant || exp->X_op == O_register)
6277 exp->X_op = O_illegal;
6278 }
6279
6280 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6281 }
6282
6283 static int
6284 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6285 i386_operand_type types, const char *imm_start)
6286 {
6287 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6288 {
6289 as_bad (_("missing or invalid immediate expression `%s'"),
6290 imm_start);
6291 return 0;
6292 }
6293 else if (exp->X_op == O_constant)
6294 {
6295 /* Size it properly later. */
6296 i.types[this_operand].bitfield.imm64 = 1;
6297 /* If BFD64, sign extend val. */
6298 if (!use_rela_relocations
6299 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6300 exp->X_add_number
6301 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6302 }
6303 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6304 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6305 && exp_seg != absolute_section
6306 && exp_seg != text_section
6307 && exp_seg != data_section
6308 && exp_seg != bss_section
6309 && exp_seg != undefined_section
6310 && !bfd_is_com_section (exp_seg))
6311 {
6312 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6313 return 0;
6314 }
6315 #endif
6316 else if (!intel_syntax && exp->X_op == O_register)
6317 {
6318 as_bad (_("illegal immediate register operand %s"), imm_start);
6319 return 0;
6320 }
6321 else
6322 {
6323 /* This is an address. The size of the address will be
6324 determined later, depending on destination register,
6325 suffix, or the default for the section. */
6326 i.types[this_operand].bitfield.imm8 = 1;
6327 i.types[this_operand].bitfield.imm16 = 1;
6328 i.types[this_operand].bitfield.imm32 = 1;
6329 i.types[this_operand].bitfield.imm32s = 1;
6330 i.types[this_operand].bitfield.imm64 = 1;
6331 i.types[this_operand] = operand_type_and (i.types[this_operand],
6332 types);
6333 }
6334
6335 return 1;
6336 }
6337
6338 static char *
6339 i386_scale (char *scale)
6340 {
6341 offsetT val;
6342 char *save = input_line_pointer;
6343
6344 input_line_pointer = scale;
6345 val = get_absolute_expression ();
6346
6347 switch (val)
6348 {
6349 case 1:
6350 i.log2_scale_factor = 0;
6351 break;
6352 case 2:
6353 i.log2_scale_factor = 1;
6354 break;
6355 case 4:
6356 i.log2_scale_factor = 2;
6357 break;
6358 case 8:
6359 i.log2_scale_factor = 3;
6360 break;
6361 default:
6362 {
6363 char sep = *input_line_pointer;
6364
6365 *input_line_pointer = '\0';
6366 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6367 scale);
6368 *input_line_pointer = sep;
6369 input_line_pointer = save;
6370 return NULL;
6371 }
6372 }
6373 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6374 {
6375 as_warn (_("scale factor of %d without an index register"),
6376 1 << i.log2_scale_factor);
6377 i.log2_scale_factor = 0;
6378 }
6379 scale = input_line_pointer;
6380 input_line_pointer = save;
6381 return scale;
6382 }
6383
6384 static int
6385 i386_displacement (char *disp_start, char *disp_end)
6386 {
6387 expressionS *exp;
6388 segT exp_seg = 0;
6389 char *save_input_line_pointer;
6390 char *gotfree_input_line;
6391 int override;
6392 i386_operand_type bigdisp, types = anydisp;
6393 int ret;
6394
6395 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6396 {
6397 as_bad (_("at most %d displacement operands are allowed"),
6398 MAX_MEMORY_OPERANDS);
6399 return 0;
6400 }
6401
6402 operand_type_set (&bigdisp, 0);
6403 if ((i.types[this_operand].bitfield.jumpabsolute)
6404 || (!current_templates->start->opcode_modifier.jump
6405 && !current_templates->start->opcode_modifier.jumpdword))
6406 {
6407 bigdisp.bitfield.disp32 = 1;
6408 override = (i.prefix[ADDR_PREFIX] != 0);
6409 if (flag_code == CODE_64BIT)
6410 {
6411 if (!override)
6412 {
6413 bigdisp.bitfield.disp32s = 1;
6414 bigdisp.bitfield.disp64 = 1;
6415 }
6416 }
6417 else if ((flag_code == CODE_16BIT) ^ override)
6418 {
6419 bigdisp.bitfield.disp32 = 0;
6420 bigdisp.bitfield.disp16 = 1;
6421 }
6422 }
6423 else
6424 {
6425 /* For PC-relative branches, the width of the displacement
6426 is dependent upon data size, not address size. */
6427 override = (i.prefix[DATA_PREFIX] != 0);
6428 if (flag_code == CODE_64BIT)
6429 {
6430 if (override || i.suffix == WORD_MNEM_SUFFIX)
6431 bigdisp.bitfield.disp16 = 1;
6432 else
6433 {
6434 bigdisp.bitfield.disp32 = 1;
6435 bigdisp.bitfield.disp32s = 1;
6436 }
6437 }
6438 else
6439 {
6440 if (!override)
6441 override = (i.suffix == (flag_code != CODE_16BIT
6442 ? WORD_MNEM_SUFFIX
6443 : LONG_MNEM_SUFFIX));
6444 bigdisp.bitfield.disp32 = 1;
6445 if ((flag_code == CODE_16BIT) ^ override)
6446 {
6447 bigdisp.bitfield.disp32 = 0;
6448 bigdisp.bitfield.disp16 = 1;
6449 }
6450 }
6451 }
6452 i.types[this_operand] = operand_type_or (i.types[this_operand],
6453 bigdisp);
6454
6455 exp = &disp_expressions[i.disp_operands];
6456 i.op[this_operand].disps = exp;
6457 i.disp_operands++;
6458 save_input_line_pointer = input_line_pointer;
6459 input_line_pointer = disp_start;
6460 END_STRING_AND_SAVE (disp_end);
6461
6462 #ifndef GCC_ASM_O_HACK
6463 #define GCC_ASM_O_HACK 0
6464 #endif
6465 #if GCC_ASM_O_HACK
6466 END_STRING_AND_SAVE (disp_end + 1);
6467 if (i.types[this_operand].bitfield.baseIndex
6468 && displacement_string_end[-1] == '+')
6469 {
6470 /* This hack is to avoid a warning when using the "o"
6471 constraint within gcc asm statements.
6472 For instance:
6473
6474 #define _set_tssldt_desc(n,addr,limit,type) \
6475 __asm__ __volatile__ ( \
6476 "movw %w2,%0\n\t" \
6477 "movw %w1,2+%0\n\t" \
6478 "rorl $16,%1\n\t" \
6479 "movb %b1,4+%0\n\t" \
6480 "movb %4,5+%0\n\t" \
6481 "movb $0,6+%0\n\t" \
6482 "movb %h1,7+%0\n\t" \
6483 "rorl $16,%1" \
6484 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6485
6486 This works great except that the output assembler ends
6487 up looking a bit weird if it turns out that there is
6488 no offset. You end up producing code that looks like:
6489
6490 #APP
6491 movw $235,(%eax)
6492 movw %dx,2+(%eax)
6493 rorl $16,%edx
6494 movb %dl,4+(%eax)
6495 movb $137,5+(%eax)
6496 movb $0,6+(%eax)
6497 movb %dh,7+(%eax)
6498 rorl $16,%edx
6499 #NO_APP
6500
6501 So here we provide the missing zero. */
6502
6503 *displacement_string_end = '0';
6504 }
6505 #endif
6506 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6507 if (gotfree_input_line)
6508 input_line_pointer = gotfree_input_line;
6509
6510 exp_seg = expression (exp);
6511
6512 SKIP_WHITESPACE ();
6513 if (*input_line_pointer)
6514 as_bad (_("junk `%s' after expression"), input_line_pointer);
6515 #if GCC_ASM_O_HACK
6516 RESTORE_END_STRING (disp_end + 1);
6517 #endif
6518 input_line_pointer = save_input_line_pointer;
6519 if (gotfree_input_line)
6520 {
6521 free (gotfree_input_line);
6522
6523 if (exp->X_op == O_constant || exp->X_op == O_register)
6524 exp->X_op = O_illegal;
6525 }
6526
6527 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6528
6529 RESTORE_END_STRING (disp_end);
6530
6531 return ret;
6532 }
6533
6534 static int
6535 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6536 i386_operand_type types, const char *disp_start)
6537 {
6538 i386_operand_type bigdisp;
6539 int ret = 1;
6540
6541 /* We do this to make sure that the section symbol is in
6542 the symbol table. We will ultimately change the relocation
6543 to be relative to the beginning of the section. */
6544 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6545 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6546 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6547 {
6548 if (exp->X_op != O_symbol)
6549 goto inv_disp;
6550
6551 if (S_IS_LOCAL (exp->X_add_symbol)
6552 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section)
6553 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6554 exp->X_op = O_subtract;
6555 exp->X_op_symbol = GOT_symbol;
6556 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6557 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6558 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6559 i.reloc[this_operand] = BFD_RELOC_64;
6560 else
6561 i.reloc[this_operand] = BFD_RELOC_32;
6562 }
6563
6564 else if (exp->X_op == O_absent
6565 || exp->X_op == O_illegal
6566 || exp->X_op == O_big)
6567 {
6568 inv_disp:
6569 as_bad (_("missing or invalid displacement expression `%s'"),
6570 disp_start);
6571 ret = 0;
6572 }
6573
6574 else if (flag_code == CODE_64BIT
6575 && !i.prefix[ADDR_PREFIX]
6576 && exp->X_op == O_constant)
6577 {
6578 /* Since displacement is signed extended to 64bit, don't allow
6579 disp32 and turn off disp32s if they are out of range. */
6580 i.types[this_operand].bitfield.disp32 = 0;
6581 if (!fits_in_signed_long (exp->X_add_number))
6582 {
6583 i.types[this_operand].bitfield.disp32s = 0;
6584 if (i.types[this_operand].bitfield.baseindex)
6585 {
6586 as_bad (_("0x%lx out range of signed 32bit displacement"),
6587 (long) exp->X_add_number);
6588 ret = 0;
6589 }
6590 }
6591 }
6592
6593 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6594 else if (exp->X_op != O_constant
6595 && OUTPUT_FLAVOR == bfd_target_aout_flavour
6596 && exp_seg != absolute_section
6597 && exp_seg != text_section
6598 && exp_seg != data_section
6599 && exp_seg != bss_section
6600 && exp_seg != undefined_section
6601 && !bfd_is_com_section (exp_seg))
6602 {
6603 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6604 ret = 0;
6605 }
6606 #endif
6607
6608 /* Check if this is a displacement only operand. */
6609 bigdisp = i.types[this_operand];
6610 bigdisp.bitfield.disp8 = 0;
6611 bigdisp.bitfield.disp16 = 0;
6612 bigdisp.bitfield.disp32 = 0;
6613 bigdisp.bitfield.disp32s = 0;
6614 bigdisp.bitfield.disp64 = 0;
6615 if (operand_type_all_zero (&bigdisp))
6616 i.types[this_operand] = operand_type_and (i.types[this_operand],
6617 types);
6618
6619 return ret;
6620 }
6621
6622 /* Make sure the memory operand we've been dealt is valid.
6623 Return 1 on success, 0 on a failure. */
6624
6625 static int
6626 i386_index_check (const char *operand_string)
6627 {
6628 int ok;
6629 const char *kind = "base/index";
6630 #if INFER_ADDR_PREFIX
6631 int fudged = 0;
6632
6633 tryprefix:
6634 #endif
6635 ok = 1;
6636 if (current_templates->start->opcode_modifier.isstring
6637 && !current_templates->start->opcode_modifier.immext
6638 && (current_templates->end[-1].opcode_modifier.isstring
6639 || i.mem_operands))
6640 {
6641 /* Memory operands of string insns are special in that they only allow
6642 a single register (rDI, rSI, or rBX) as their memory address. */
6643 unsigned int expected;
6644
6645 kind = "string address";
6646
6647 if (current_templates->start->opcode_modifier.w)
6648 {
6649 i386_operand_type type = current_templates->end[-1].operand_types[0];
6650
6651 if (!type.bitfield.baseindex
6652 || ((!i.mem_operands != !intel_syntax)
6653 && current_templates->end[-1].operand_types[1]
6654 .bitfield.baseindex))
6655 type = current_templates->end[-1].operand_types[1];
6656 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
6657 }
6658 else
6659 expected = 3 /* rBX */;
6660
6661 if (!i.base_reg || i.index_reg
6662 || operand_type_check (i.types[this_operand], disp))
6663 ok = -1;
6664 else if (!(flag_code == CODE_64BIT
6665 ? i.prefix[ADDR_PREFIX]
6666 ? i.base_reg->reg_type.bitfield.reg32
6667 : i.base_reg->reg_type.bitfield.reg64
6668 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6669 ? i.base_reg->reg_type.bitfield.reg32
6670 : i.base_reg->reg_type.bitfield.reg16))
6671 ok = 0;
6672 else if (i.base_reg->reg_num != expected)
6673 ok = -1;
6674
6675 if (ok < 0)
6676 {
6677 unsigned int j;
6678
6679 for (j = 0; j < i386_regtab_size; ++j)
6680 if ((flag_code == CODE_64BIT
6681 ? i.prefix[ADDR_PREFIX]
6682 ? i386_regtab[j].reg_type.bitfield.reg32
6683 : i386_regtab[j].reg_type.bitfield.reg64
6684 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
6685 ? i386_regtab[j].reg_type.bitfield.reg32
6686 : i386_regtab[j].reg_type.bitfield.reg16)
6687 && i386_regtab[j].reg_num == expected)
6688 break;
6689 gas_assert (j < i386_regtab_size);
6690 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
6691 operand_string,
6692 intel_syntax ? '[' : '(',
6693 register_prefix,
6694 i386_regtab[j].reg_name,
6695 intel_syntax ? ']' : ')');
6696 ok = 1;
6697 }
6698 }
6699 else if (flag_code == CODE_64BIT)
6700 {
6701 if ((i.base_reg
6702 && ((i.prefix[ADDR_PREFIX] == 0
6703 && !i.base_reg->reg_type.bitfield.reg64)
6704 || (i.prefix[ADDR_PREFIX]
6705 && !i.base_reg->reg_type.bitfield.reg32))
6706 && (i.index_reg
6707 || i.base_reg->reg_num !=
6708 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
6709 || (i.index_reg
6710 && (!i.index_reg->reg_type.bitfield.baseindex
6711 || (i.prefix[ADDR_PREFIX] == 0
6712 && i.index_reg->reg_num != RegRiz
6713 && !i.index_reg->reg_type.bitfield.reg64
6714 )
6715 || (i.prefix[ADDR_PREFIX]
6716 && i.index_reg->reg_num != RegEiz
6717 && !i.index_reg->reg_type.bitfield.reg32))))
6718 ok = 0;
6719 }
6720 else
6721 {
6722 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
6723 {
6724 /* 16bit checks. */
6725 if ((i.base_reg
6726 && (!i.base_reg->reg_type.bitfield.reg16
6727 || !i.base_reg->reg_type.bitfield.baseindex))
6728 || (i.index_reg
6729 && (!i.index_reg->reg_type.bitfield.reg16
6730 || !i.index_reg->reg_type.bitfield.baseindex
6731 || !(i.base_reg
6732 && i.base_reg->reg_num < 6
6733 && i.index_reg->reg_num >= 6
6734 && i.log2_scale_factor == 0))))
6735 ok = 0;
6736 }
6737 else
6738 {
6739 /* 32bit checks. */
6740 if ((i.base_reg
6741 && !i.base_reg->reg_type.bitfield.reg32)
6742 || (i.index_reg
6743 && ((!i.index_reg->reg_type.bitfield.reg32
6744 && i.index_reg->reg_num != RegEiz)
6745 || !i.index_reg->reg_type.bitfield.baseindex)))
6746 ok = 0;
6747 }
6748 }
6749 if (!ok)
6750 {
6751 #if INFER_ADDR_PREFIX
6752 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
6753 {
6754 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
6755 i.prefixes += 1;
6756 /* Change the size of any displacement too. At most one of
6757 Disp16 or Disp32 is set.
6758 FIXME. There doesn't seem to be any real need for separate
6759 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
6760 Removing them would probably clean up the code quite a lot. */
6761 if (flag_code != CODE_64BIT
6762 && (i.types[this_operand].bitfield.disp16
6763 || i.types[this_operand].bitfield.disp32))
6764 i.types[this_operand]
6765 = operand_type_xor (i.types[this_operand], disp16_32);
6766 fudged = 1;
6767 goto tryprefix;
6768 }
6769 if (fudged)
6770 as_bad (_("`%s' is not a valid %s expression"),
6771 operand_string,
6772 kind);
6773 else
6774 #endif
6775 as_bad (_("`%s' is not a valid %s-bit %s expression"),
6776 operand_string,
6777 flag_code_names[i.prefix[ADDR_PREFIX]
6778 ? flag_code == CODE_32BIT
6779 ? CODE_16BIT
6780 : CODE_32BIT
6781 : flag_code],
6782 kind);
6783 }
6784 return ok;
6785 }
6786
6787 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
6788 on error. */
6789
6790 static int
6791 i386_att_operand (char *operand_string)
6792 {
6793 const reg_entry *r;
6794 char *end_op;
6795 char *op_string = operand_string;
6796
6797 if (is_space_char (*op_string))
6798 ++op_string;
6799
6800 /* We check for an absolute prefix (differentiating,
6801 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
6802 if (*op_string == ABSOLUTE_PREFIX)
6803 {
6804 ++op_string;
6805 if (is_space_char (*op_string))
6806 ++op_string;
6807 i.types[this_operand].bitfield.jumpabsolute = 1;
6808 }
6809
6810 /* Check if operand is a register. */
6811 if ((r = parse_register (op_string, &end_op)) != NULL)
6812 {
6813 i386_operand_type temp;
6814
6815 /* Check for a segment override by searching for ':' after a
6816 segment register. */
6817 op_string = end_op;
6818 if (is_space_char (*op_string))
6819 ++op_string;
6820 if (*op_string == ':'
6821 && (r->reg_type.bitfield.sreg2
6822 || r->reg_type.bitfield.sreg3))
6823 {
6824 switch (r->reg_num)
6825 {
6826 case 0:
6827 i.seg[i.mem_operands] = &es;
6828 break;
6829 case 1:
6830 i.seg[i.mem_operands] = &cs;
6831 break;
6832 case 2:
6833 i.seg[i.mem_operands] = &ss;
6834 break;
6835 case 3:
6836 i.seg[i.mem_operands] = &ds;
6837 break;
6838 case 4:
6839 i.seg[i.mem_operands] = &fs;
6840 break;
6841 case 5:
6842 i.seg[i.mem_operands] = &gs;
6843 break;
6844 }
6845
6846 /* Skip the ':' and whitespace. */
6847 ++op_string;
6848 if (is_space_char (*op_string))
6849 ++op_string;
6850
6851 if (!is_digit_char (*op_string)
6852 && !is_identifier_char (*op_string)
6853 && *op_string != '('
6854 && *op_string != ABSOLUTE_PREFIX)
6855 {
6856 as_bad (_("bad memory operand `%s'"), op_string);
6857 return 0;
6858 }
6859 /* Handle case of %es:*foo. */
6860 if (*op_string == ABSOLUTE_PREFIX)
6861 {
6862 ++op_string;
6863 if (is_space_char (*op_string))
6864 ++op_string;
6865 i.types[this_operand].bitfield.jumpabsolute = 1;
6866 }
6867 goto do_memory_reference;
6868 }
6869 if (*op_string)
6870 {
6871 as_bad (_("junk `%s' after register"), op_string);
6872 return 0;
6873 }
6874 temp = r->reg_type;
6875 temp.bitfield.baseindex = 0;
6876 i.types[this_operand] = operand_type_or (i.types[this_operand],
6877 temp);
6878 i.types[this_operand].bitfield.unspecified = 0;
6879 i.op[this_operand].regs = r;
6880 i.reg_operands++;
6881 }
6882 else if (*op_string == REGISTER_PREFIX)
6883 {
6884 as_bad (_("bad register name `%s'"), op_string);
6885 return 0;
6886 }
6887 else if (*op_string == IMMEDIATE_PREFIX)
6888 {
6889 ++op_string;
6890 if (i.types[this_operand].bitfield.jumpabsolute)
6891 {
6892 as_bad (_("immediate operand illegal with absolute jump"));
6893 return 0;
6894 }
6895 if (!i386_immediate (op_string))
6896 return 0;
6897 }
6898 else if (is_digit_char (*op_string)
6899 || is_identifier_char (*op_string)
6900 || *op_string == '(')
6901 {
6902 /* This is a memory reference of some sort. */
6903 char *base_string;
6904
6905 /* Start and end of displacement string expression (if found). */
6906 char *displacement_string_start;
6907 char *displacement_string_end;
6908
6909 do_memory_reference:
6910 if ((i.mem_operands == 1
6911 && !current_templates->start->opcode_modifier.isstring)
6912 || i.mem_operands == 2)
6913 {
6914 as_bad (_("too many memory references for `%s'"),
6915 current_templates->start->name);
6916 return 0;
6917 }
6918
6919 /* Check for base index form. We detect the base index form by
6920 looking for an ')' at the end of the operand, searching
6921 for the '(' matching it, and finding a REGISTER_PREFIX or ','
6922 after the '('. */
6923 base_string = op_string + strlen (op_string);
6924
6925 --base_string;
6926 if (is_space_char (*base_string))
6927 --base_string;
6928
6929 /* If we only have a displacement, set-up for it to be parsed later. */
6930 displacement_string_start = op_string;
6931 displacement_string_end = base_string + 1;
6932
6933 if (*base_string == ')')
6934 {
6935 char *temp_string;
6936 unsigned int parens_balanced = 1;
6937 /* We've already checked that the number of left & right ()'s are
6938 equal, so this loop will not be infinite. */
6939 do
6940 {
6941 base_string--;
6942 if (*base_string == ')')
6943 parens_balanced++;
6944 if (*base_string == '(')
6945 parens_balanced--;
6946 }
6947 while (parens_balanced);
6948
6949 temp_string = base_string;
6950
6951 /* Skip past '(' and whitespace. */
6952 ++base_string;
6953 if (is_space_char (*base_string))
6954 ++base_string;
6955
6956 if (*base_string == ','
6957 || ((i.base_reg = parse_register (base_string, &end_op))
6958 != NULL))
6959 {
6960 displacement_string_end = temp_string;
6961
6962 i.types[this_operand].bitfield.baseindex = 1;
6963
6964 if (i.base_reg)
6965 {
6966 base_string = end_op;
6967 if (is_space_char (*base_string))
6968 ++base_string;
6969 }
6970
6971 /* There may be an index reg or scale factor here. */
6972 if (*base_string == ',')
6973 {
6974 ++base_string;
6975 if (is_space_char (*base_string))
6976 ++base_string;
6977
6978 if ((i.index_reg = parse_register (base_string, &end_op))
6979 != NULL)
6980 {
6981 base_string = end_op;
6982 if (is_space_char (*base_string))
6983 ++base_string;
6984 if (*base_string == ',')
6985 {
6986 ++base_string;
6987 if (is_space_char (*base_string))
6988 ++base_string;
6989 }
6990 else if (*base_string != ')')
6991 {
6992 as_bad (_("expecting `,' or `)' "
6993 "after index register in `%s'"),
6994 operand_string);
6995 return 0;
6996 }
6997 }
6998 else if (*base_string == REGISTER_PREFIX)
6999 {
7000 as_bad (_("bad register name `%s'"), base_string);
7001 return 0;
7002 }
7003
7004 /* Check for scale factor. */
7005 if (*base_string != ')')
7006 {
7007 char *end_scale = i386_scale (base_string);
7008
7009 if (!end_scale)
7010 return 0;
7011
7012 base_string = end_scale;
7013 if (is_space_char (*base_string))
7014 ++base_string;
7015 if (*base_string != ')')
7016 {
7017 as_bad (_("expecting `)' "
7018 "after scale factor in `%s'"),
7019 operand_string);
7020 return 0;
7021 }
7022 }
7023 else if (!i.index_reg)
7024 {
7025 as_bad (_("expecting index register or scale factor "
7026 "after `,'; got '%c'"),
7027 *base_string);
7028 return 0;
7029 }
7030 }
7031 else if (*base_string != ')')
7032 {
7033 as_bad (_("expecting `,' or `)' "
7034 "after base register in `%s'"),
7035 operand_string);
7036 return 0;
7037 }
7038 }
7039 else if (*base_string == REGISTER_PREFIX)
7040 {
7041 as_bad (_("bad register name `%s'"), base_string);
7042 return 0;
7043 }
7044 }
7045
7046 /* If there's an expression beginning the operand, parse it,
7047 assuming displacement_string_start and
7048 displacement_string_end are meaningful. */
7049 if (displacement_string_start != displacement_string_end)
7050 {
7051 if (!i386_displacement (displacement_string_start,
7052 displacement_string_end))
7053 return 0;
7054 }
7055
7056 /* Special case for (%dx) while doing input/output op. */
7057 if (i.base_reg
7058 && operand_type_equal (&i.base_reg->reg_type,
7059 &reg16_inoutportreg)
7060 && i.index_reg == 0
7061 && i.log2_scale_factor == 0
7062 && i.seg[i.mem_operands] == 0
7063 && !operand_type_check (i.types[this_operand], disp))
7064 {
7065 i.types[this_operand] = inoutportreg;
7066 return 1;
7067 }
7068
7069 if (i386_index_check (operand_string) == 0)
7070 return 0;
7071 i.types[this_operand].bitfield.mem = 1;
7072 i.mem_operands++;
7073 }
7074 else
7075 {
7076 /* It's not a memory operand; argh! */
7077 as_bad (_("invalid char %s beginning operand %d `%s'"),
7078 output_invalid (*op_string),
7079 this_operand + 1,
7080 op_string);
7081 return 0;
7082 }
7083 return 1; /* Normal return. */
7084 }
7085 \f
7086 /* md_estimate_size_before_relax()
7087
7088 Called just before relax() for rs_machine_dependent frags. The x86
7089 assembler uses these frags to handle variable size jump
7090 instructions.
7091
7092 Any symbol that is now undefined will not become defined.
7093 Return the correct fr_subtype in the frag.
7094 Return the initial "guess for variable size of frag" to caller.
7095 The guess is actually the growth beyond the fixed part. Whatever
7096 we do to grow the fixed or variable part contributes to our
7097 returned value. */
7098
7099 int
7100 md_estimate_size_before_relax (fragP, segment)
7101 fragS *fragP;
7102 segT segment;
7103 {
7104 /* We've already got fragP->fr_subtype right; all we have to do is
7105 check for un-relaxable symbols. On an ELF system, we can't relax
7106 an externally visible symbol, because it may be overridden by a
7107 shared library. */
7108 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7109 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7110 || (IS_ELF
7111 && (S_IS_EXTERNAL (fragP->fr_symbol)
7112 || S_IS_WEAK (fragP->fr_symbol)
7113 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7114 & BSF_GNU_INDIRECT_FUNCTION))))
7115 #endif
7116 #if defined (OBJ_COFF) && defined (TE_PE)
7117 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7118 && S_IS_WEAK (fragP->fr_symbol))
7119 #endif
7120 )
7121 {
7122 /* Symbol is undefined in this segment, or we need to keep a
7123 reloc so that weak symbols can be overridden. */
7124 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7125 enum bfd_reloc_code_real reloc_type;
7126 unsigned char *opcode;
7127 int old_fr_fix;
7128
7129 if (fragP->fr_var != NO_RELOC)
7130 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7131 else if (size == 2)
7132 reloc_type = BFD_RELOC_16_PCREL;
7133 else
7134 reloc_type = BFD_RELOC_32_PCREL;
7135
7136 old_fr_fix = fragP->fr_fix;
7137 opcode = (unsigned char *) fragP->fr_opcode;
7138
7139 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7140 {
7141 case UNCOND_JUMP:
7142 /* Make jmp (0xeb) a (d)word displacement jump. */
7143 opcode[0] = 0xe9;
7144 fragP->fr_fix += size;
7145 fix_new (fragP, old_fr_fix, size,
7146 fragP->fr_symbol,
7147 fragP->fr_offset, 1,
7148 reloc_type);
7149 break;
7150
7151 case COND_JUMP86:
7152 if (size == 2
7153 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7154 {
7155 /* Negate the condition, and branch past an
7156 unconditional jump. */
7157 opcode[0] ^= 1;
7158 opcode[1] = 3;
7159 /* Insert an unconditional jump. */
7160 opcode[2] = 0xe9;
7161 /* We added two extra opcode bytes, and have a two byte
7162 offset. */
7163 fragP->fr_fix += 2 + 2;
7164 fix_new (fragP, old_fr_fix + 2, 2,
7165 fragP->fr_symbol,
7166 fragP->fr_offset, 1,
7167 reloc_type);
7168 break;
7169 }
7170 /* Fall through. */
7171
7172 case COND_JUMP:
7173 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7174 {
7175 fixS *fixP;
7176
7177 fragP->fr_fix += 1;
7178 fixP = fix_new (fragP, old_fr_fix, 1,
7179 fragP->fr_symbol,
7180 fragP->fr_offset, 1,
7181 BFD_RELOC_8_PCREL);
7182 fixP->fx_signed = 1;
7183 break;
7184 }
7185
7186 /* This changes the byte-displacement jump 0x7N
7187 to the (d)word-displacement jump 0x0f,0x8N. */
7188 opcode[1] = opcode[0] + 0x10;
7189 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7190 /* We've added an opcode byte. */
7191 fragP->fr_fix += 1 + size;
7192 fix_new (fragP, old_fr_fix + 1, size,
7193 fragP->fr_symbol,
7194 fragP->fr_offset, 1,
7195 reloc_type);
7196 break;
7197
7198 default:
7199 BAD_CASE (fragP->fr_subtype);
7200 break;
7201 }
7202 frag_wane (fragP);
7203 return fragP->fr_fix - old_fr_fix;
7204 }
7205
7206 /* Guess size depending on current relax state. Initially the relax
7207 state will correspond to a short jump and we return 1, because
7208 the variable part of the frag (the branch offset) is one byte
7209 long. However, we can relax a section more than once and in that
7210 case we must either set fr_subtype back to the unrelaxed state,
7211 or return the value for the appropriate branch. */
7212 return md_relax_table[fragP->fr_subtype].rlx_length;
7213 }
7214
7215 /* Called after relax() is finished.
7216
7217 In: Address of frag.
7218 fr_type == rs_machine_dependent.
7219 fr_subtype is what the address relaxed to.
7220
7221 Out: Any fixSs and constants are set up.
7222 Caller will turn frag into a ".space 0". */
7223
7224 void
7225 md_convert_frag (abfd, sec, fragP)
7226 bfd *abfd ATTRIBUTE_UNUSED;
7227 segT sec ATTRIBUTE_UNUSED;
7228 fragS *fragP;
7229 {
7230 unsigned char *opcode;
7231 unsigned char *where_to_put_displacement = NULL;
7232 offsetT target_address;
7233 offsetT opcode_address;
7234 unsigned int extension = 0;
7235 offsetT displacement_from_opcode_start;
7236
7237 opcode = (unsigned char *) fragP->fr_opcode;
7238
7239 /* Address we want to reach in file space. */
7240 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7241
7242 /* Address opcode resides at in file space. */
7243 opcode_address = fragP->fr_address + fragP->fr_fix;
7244
7245 /* Displacement from opcode start to fill into instruction. */
7246 displacement_from_opcode_start = target_address - opcode_address;
7247
7248 if ((fragP->fr_subtype & BIG) == 0)
7249 {
7250 /* Don't have to change opcode. */
7251 extension = 1; /* 1 opcode + 1 displacement */
7252 where_to_put_displacement = &opcode[1];
7253 }
7254 else
7255 {
7256 if (no_cond_jump_promotion
7257 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7258 as_warn_where (fragP->fr_file, fragP->fr_line,
7259 _("long jump required"));
7260
7261 switch (fragP->fr_subtype)
7262 {
7263 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7264 extension = 4; /* 1 opcode + 4 displacement */
7265 opcode[0] = 0xe9;
7266 where_to_put_displacement = &opcode[1];
7267 break;
7268
7269 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7270 extension = 2; /* 1 opcode + 2 displacement */
7271 opcode[0] = 0xe9;
7272 where_to_put_displacement = &opcode[1];
7273 break;
7274
7275 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7276 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7277 extension = 5; /* 2 opcode + 4 displacement */
7278 opcode[1] = opcode[0] + 0x10;
7279 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7280 where_to_put_displacement = &opcode[2];
7281 break;
7282
7283 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7284 extension = 3; /* 2 opcode + 2 displacement */
7285 opcode[1] = opcode[0] + 0x10;
7286 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7287 where_to_put_displacement = &opcode[2];
7288 break;
7289
7290 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7291 extension = 4;
7292 opcode[0] ^= 1;
7293 opcode[1] = 3;
7294 opcode[2] = 0xe9;
7295 where_to_put_displacement = &opcode[3];
7296 break;
7297
7298 default:
7299 BAD_CASE (fragP->fr_subtype);
7300 break;
7301 }
7302 }
7303
7304 /* If size if less then four we are sure that the operand fits,
7305 but if it's 4, then it could be that the displacement is larger
7306 then -/+ 2GB. */
7307 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7308 && object_64bit
7309 && ((addressT) (displacement_from_opcode_start - extension
7310 + ((addressT) 1 << 31))
7311 > (((addressT) 2 << 31) - 1)))
7312 {
7313 as_bad_where (fragP->fr_file, fragP->fr_line,
7314 _("jump target out of range"));
7315 /* Make us emit 0. */
7316 displacement_from_opcode_start = extension;
7317 }
7318 /* Now put displacement after opcode. */
7319 md_number_to_chars ((char *) where_to_put_displacement,
7320 (valueT) (displacement_from_opcode_start - extension),
7321 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7322 fragP->fr_fix += extension;
7323 }
7324 \f
7325 /* Apply a fixup (fixS) to segment data, once it has been determined
7326 by our caller that we have all the info we need to fix it up.
7327
7328 On the 386, immediates, displacements, and data pointers are all in
7329 the same (little-endian) format, so we don't need to care about which
7330 we are handling. */
7331
7332 void
7333 md_apply_fix (fixP, valP, seg)
7334 /* The fix we're to put in. */
7335 fixS *fixP;
7336 /* Pointer to the value of the bits. */
7337 valueT *valP;
7338 /* Segment fix is from. */
7339 segT seg ATTRIBUTE_UNUSED;
7340 {
7341 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7342 valueT value = *valP;
7343
7344 #if !defined (TE_Mach)
7345 if (fixP->fx_pcrel)
7346 {
7347 switch (fixP->fx_r_type)
7348 {
7349 default:
7350 break;
7351
7352 case BFD_RELOC_64:
7353 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7354 break;
7355 case BFD_RELOC_32:
7356 case BFD_RELOC_X86_64_32S:
7357 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7358 break;
7359 case BFD_RELOC_16:
7360 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7361 break;
7362 case BFD_RELOC_8:
7363 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7364 break;
7365 }
7366 }
7367
7368 if (fixP->fx_addsy != NULL
7369 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7370 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7371 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7372 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7373 && !use_rela_relocations)
7374 {
7375 /* This is a hack. There should be a better way to handle this.
7376 This covers for the fact that bfd_install_relocation will
7377 subtract the current location (for partial_inplace, PC relative
7378 relocations); see more below. */
7379 #ifndef OBJ_AOUT
7380 if (IS_ELF
7381 #ifdef TE_PE
7382 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7383 #endif
7384 )
7385 value += fixP->fx_where + fixP->fx_frag->fr_address;
7386 #endif
7387 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7388 if (IS_ELF)
7389 {
7390 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7391
7392 if ((sym_seg == seg
7393 || (symbol_section_p (fixP->fx_addsy)
7394 && sym_seg != absolute_section))
7395 && !generic_force_reloc (fixP))
7396 {
7397 /* Yes, we add the values in twice. This is because
7398 bfd_install_relocation subtracts them out again. I think
7399 bfd_install_relocation is broken, but I don't dare change
7400 it. FIXME. */
7401 value += fixP->fx_where + fixP->fx_frag->fr_address;
7402 }
7403 }
7404 #endif
7405 #if defined (OBJ_COFF) && defined (TE_PE)
7406 /* For some reason, the PE format does not store a
7407 section address offset for a PC relative symbol. */
7408 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7409 || S_IS_WEAK (fixP->fx_addsy))
7410 value += md_pcrel_from (fixP);
7411 #endif
7412 }
7413 #if defined (OBJ_COFF) && defined (TE_PE)
7414 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7415 {
7416 value -= S_GET_VALUE (fixP->fx_addsy);
7417 }
7418 #endif
7419
7420 /* Fix a few things - the dynamic linker expects certain values here,
7421 and we must not disappoint it. */
7422 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7423 if (IS_ELF && fixP->fx_addsy)
7424 switch (fixP->fx_r_type)
7425 {
7426 case BFD_RELOC_386_PLT32:
7427 case BFD_RELOC_X86_64_PLT32:
7428 /* Make the jump instruction point to the address of the operand. At
7429 runtime we merely add the offset to the actual PLT entry. */
7430 value = -4;
7431 break;
7432
7433 case BFD_RELOC_386_TLS_GD:
7434 case BFD_RELOC_386_TLS_LDM:
7435 case BFD_RELOC_386_TLS_IE_32:
7436 case BFD_RELOC_386_TLS_IE:
7437 case BFD_RELOC_386_TLS_GOTIE:
7438 case BFD_RELOC_386_TLS_GOTDESC:
7439 case BFD_RELOC_X86_64_TLSGD:
7440 case BFD_RELOC_X86_64_TLSLD:
7441 case BFD_RELOC_X86_64_GOTTPOFF:
7442 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7443 value = 0; /* Fully resolved at runtime. No addend. */
7444 /* Fallthrough */
7445 case BFD_RELOC_386_TLS_LE:
7446 case BFD_RELOC_386_TLS_LDO_32:
7447 case BFD_RELOC_386_TLS_LE_32:
7448 case BFD_RELOC_X86_64_DTPOFF32:
7449 case BFD_RELOC_X86_64_DTPOFF64:
7450 case BFD_RELOC_X86_64_TPOFF32:
7451 case BFD_RELOC_X86_64_TPOFF64:
7452 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7453 break;
7454
7455 case BFD_RELOC_386_TLS_DESC_CALL:
7456 case BFD_RELOC_X86_64_TLSDESC_CALL:
7457 value = 0; /* Fully resolved at runtime. No addend. */
7458 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7459 fixP->fx_done = 0;
7460 return;
7461
7462 case BFD_RELOC_386_GOT32:
7463 case BFD_RELOC_X86_64_GOT32:
7464 value = 0; /* Fully resolved at runtime. No addend. */
7465 break;
7466
7467 case BFD_RELOC_VTABLE_INHERIT:
7468 case BFD_RELOC_VTABLE_ENTRY:
7469 fixP->fx_done = 0;
7470 return;
7471
7472 default:
7473 break;
7474 }
7475 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7476 *valP = value;
7477 #endif /* !defined (TE_Mach) */
7478
7479 /* Are we finished with this relocation now? */
7480 if (fixP->fx_addsy == NULL)
7481 fixP->fx_done = 1;
7482 #if defined (OBJ_COFF) && defined (TE_PE)
7483 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7484 {
7485 fixP->fx_done = 0;
7486 /* Remember value for tc_gen_reloc. */
7487 fixP->fx_addnumber = value;
7488 /* Clear out the frag for now. */
7489 value = 0;
7490 }
7491 #endif
7492 else if (use_rela_relocations)
7493 {
7494 fixP->fx_no_overflow = 1;
7495 /* Remember value for tc_gen_reloc. */
7496 fixP->fx_addnumber = value;
7497 value = 0;
7498 }
7499
7500 md_number_to_chars (p, value, fixP->fx_size);
7501 }
7502 \f
7503 char *
7504 md_atof (int type, char *litP, int *sizeP)
7505 {
7506 /* This outputs the LITTLENUMs in REVERSE order;
7507 in accord with the bigendian 386. */
7508 return ieee_md_atof (type, litP, sizeP, FALSE);
7509 }
7510 \f
7511 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7512
7513 static char *
7514 output_invalid (int c)
7515 {
7516 if (ISPRINT (c))
7517 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7518 "'%c'", c);
7519 else
7520 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7521 "(0x%x)", (unsigned char) c);
7522 return output_invalid_buf;
7523 }
7524
7525 /* REG_STRING starts *before* REGISTER_PREFIX. */
7526
7527 static const reg_entry *
7528 parse_real_register (char *reg_string, char **end_op)
7529 {
7530 char *s = reg_string;
7531 char *p;
7532 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7533 const reg_entry *r;
7534
7535 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7536 if (*s == REGISTER_PREFIX)
7537 ++s;
7538
7539 if (is_space_char (*s))
7540 ++s;
7541
7542 p = reg_name_given;
7543 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7544 {
7545 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7546 return (const reg_entry *) NULL;
7547 s++;
7548 }
7549
7550 /* For naked regs, make sure that we are not dealing with an identifier.
7551 This prevents confusing an identifier like `eax_var' with register
7552 `eax'. */
7553 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7554 return (const reg_entry *) NULL;
7555
7556 *end_op = s;
7557
7558 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7559
7560 /* Handle floating point regs, allowing spaces in the (i) part. */
7561 if (r == i386_regtab /* %st is first entry of table */)
7562 {
7563 if (is_space_char (*s))
7564 ++s;
7565 if (*s == '(')
7566 {
7567 ++s;
7568 if (is_space_char (*s))
7569 ++s;
7570 if (*s >= '0' && *s <= '7')
7571 {
7572 int fpr = *s - '0';
7573 ++s;
7574 if (is_space_char (*s))
7575 ++s;
7576 if (*s == ')')
7577 {
7578 *end_op = s + 1;
7579 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7580 know (r);
7581 return r + fpr;
7582 }
7583 }
7584 /* We have "%st(" then garbage. */
7585 return (const reg_entry *) NULL;
7586 }
7587 }
7588
7589 if (r == NULL || allow_pseudo_reg)
7590 return r;
7591
7592 if (operand_type_all_zero (&r->reg_type))
7593 return (const reg_entry *) NULL;
7594
7595 if ((r->reg_type.bitfield.reg32
7596 || r->reg_type.bitfield.sreg3
7597 || r->reg_type.bitfield.control
7598 || r->reg_type.bitfield.debug
7599 || r->reg_type.bitfield.test)
7600 && !cpu_arch_flags.bitfield.cpui386)
7601 return (const reg_entry *) NULL;
7602
7603 if (r->reg_type.bitfield.floatreg
7604 && !cpu_arch_flags.bitfield.cpu8087
7605 && !cpu_arch_flags.bitfield.cpu287
7606 && !cpu_arch_flags.bitfield.cpu387)
7607 return (const reg_entry *) NULL;
7608
7609 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
7610 return (const reg_entry *) NULL;
7611
7612 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
7613 return (const reg_entry *) NULL;
7614
7615 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
7616 return (const reg_entry *) NULL;
7617
7618 /* Don't allow fake index register unless allow_index_reg isn't 0. */
7619 if (!allow_index_reg
7620 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
7621 return (const reg_entry *) NULL;
7622
7623 if (((r->reg_flags & (RegRex64 | RegRex))
7624 || r->reg_type.bitfield.reg64)
7625 && (!cpu_arch_flags.bitfield.cpulm
7626 || !operand_type_equal (&r->reg_type, &control))
7627 && flag_code != CODE_64BIT)
7628 return (const reg_entry *) NULL;
7629
7630 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
7631 return (const reg_entry *) NULL;
7632
7633 return r;
7634 }
7635
7636 /* REG_STRING starts *before* REGISTER_PREFIX. */
7637
7638 static const reg_entry *
7639 parse_register (char *reg_string, char **end_op)
7640 {
7641 const reg_entry *r;
7642
7643 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
7644 r = parse_real_register (reg_string, end_op);
7645 else
7646 r = NULL;
7647 if (!r)
7648 {
7649 char *save = input_line_pointer;
7650 char c;
7651 symbolS *symbolP;
7652
7653 input_line_pointer = reg_string;
7654 c = get_symbol_end ();
7655 symbolP = symbol_find (reg_string);
7656 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
7657 {
7658 const expressionS *e = symbol_get_value_expression (symbolP);
7659
7660 know (e->X_op == O_register);
7661 know (e->X_add_number >= 0
7662 && (valueT) e->X_add_number < i386_regtab_size);
7663 r = i386_regtab + e->X_add_number;
7664 *end_op = input_line_pointer;
7665 }
7666 *input_line_pointer = c;
7667 input_line_pointer = save;
7668 }
7669 return r;
7670 }
7671
7672 int
7673 i386_parse_name (char *name, expressionS *e, char *nextcharP)
7674 {
7675 const reg_entry *r;
7676 char *end = input_line_pointer;
7677
7678 *end = *nextcharP;
7679 r = parse_register (name, &input_line_pointer);
7680 if (r && end <= input_line_pointer)
7681 {
7682 *nextcharP = *input_line_pointer;
7683 *input_line_pointer = 0;
7684 e->X_op = O_register;
7685 e->X_add_number = r - i386_regtab;
7686 return 1;
7687 }
7688 input_line_pointer = end;
7689 *end = 0;
7690 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
7691 }
7692
7693 void
7694 md_operand (expressionS *e)
7695 {
7696 char *end;
7697 const reg_entry *r;
7698
7699 switch (*input_line_pointer)
7700 {
7701 case REGISTER_PREFIX:
7702 r = parse_real_register (input_line_pointer, &end);
7703 if (r)
7704 {
7705 e->X_op = O_register;
7706 e->X_add_number = r - i386_regtab;
7707 input_line_pointer = end;
7708 }
7709 break;
7710
7711 case '[':
7712 gas_assert (intel_syntax);
7713 end = input_line_pointer++;
7714 expression (e);
7715 if (*input_line_pointer == ']')
7716 {
7717 ++input_line_pointer;
7718 e->X_op_symbol = make_expr_symbol (e);
7719 e->X_add_symbol = NULL;
7720 e->X_add_number = 0;
7721 e->X_op = O_index;
7722 }
7723 else
7724 {
7725 e->X_op = O_absent;
7726 input_line_pointer = end;
7727 }
7728 break;
7729 }
7730 }
7731
7732 \f
7733 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7734 const char *md_shortopts = "kVQ:sqn";
7735 #else
7736 const char *md_shortopts = "qn";
7737 #endif
7738
7739 #define OPTION_32 (OPTION_MD_BASE + 0)
7740 #define OPTION_64 (OPTION_MD_BASE + 1)
7741 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
7742 #define OPTION_MARCH (OPTION_MD_BASE + 3)
7743 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
7744 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
7745 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
7746 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
7747 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
7748 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
7749 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
7750 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
7751
7752 struct option md_longopts[] =
7753 {
7754 {"32", no_argument, NULL, OPTION_32},
7755 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7756 || defined (TE_PE) || defined (TE_PEP))
7757 {"64", no_argument, NULL, OPTION_64},
7758 #endif
7759 {"divide", no_argument, NULL, OPTION_DIVIDE},
7760 {"march", required_argument, NULL, OPTION_MARCH},
7761 {"mtune", required_argument, NULL, OPTION_MTUNE},
7762 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
7763 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
7764 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
7765 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
7766 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
7767 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
7768 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
7769 {NULL, no_argument, NULL, 0}
7770 };
7771 size_t md_longopts_size = sizeof (md_longopts);
7772
7773 int
7774 md_parse_option (int c, char *arg)
7775 {
7776 unsigned int i;
7777 char *arch, *next;
7778
7779 switch (c)
7780 {
7781 case 'n':
7782 optimize_align_code = 0;
7783 break;
7784
7785 case 'q':
7786 quiet_warnings = 1;
7787 break;
7788
7789 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7790 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
7791 should be emitted or not. FIXME: Not implemented. */
7792 case 'Q':
7793 break;
7794
7795 /* -V: SVR4 argument to print version ID. */
7796 case 'V':
7797 print_version_id ();
7798 break;
7799
7800 /* -k: Ignore for FreeBSD compatibility. */
7801 case 'k':
7802 break;
7803
7804 case 's':
7805 /* -s: On i386 Solaris, this tells the native assembler to use
7806 .stab instead of .stab.excl. We always use .stab anyhow. */
7807 break;
7808 #endif
7809 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
7810 || defined (TE_PE) || defined (TE_PEP))
7811 case OPTION_64:
7812 {
7813 const char **list, **l;
7814
7815 list = bfd_target_list ();
7816 for (l = list; *l != NULL; l++)
7817 if (CONST_STRNEQ (*l, "elf64-x86-64")
7818 || strcmp (*l, "coff-x86-64") == 0
7819 || strcmp (*l, "pe-x86-64") == 0
7820 || strcmp (*l, "pei-x86-64") == 0)
7821 {
7822 default_arch = "x86_64";
7823 break;
7824 }
7825 if (*l == NULL)
7826 as_fatal (_("No compiled in support for x86_64"));
7827 free (list);
7828 }
7829 break;
7830 #endif
7831
7832 case OPTION_32:
7833 default_arch = "i386";
7834 break;
7835
7836 case OPTION_DIVIDE:
7837 #ifdef SVR4_COMMENT_CHARS
7838 {
7839 char *n, *t;
7840 const char *s;
7841
7842 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
7843 t = n;
7844 for (s = i386_comment_chars; *s != '\0'; s++)
7845 if (*s != '/')
7846 *t++ = *s;
7847 *t = '\0';
7848 i386_comment_chars = n;
7849 }
7850 #endif
7851 break;
7852
7853 case OPTION_MARCH:
7854 arch = xstrdup (arg);
7855 do
7856 {
7857 if (*arch == '.')
7858 as_fatal (_("Invalid -march= option: `%s'"), arg);
7859 next = strchr (arch, '+');
7860 if (next)
7861 *next++ = '\0';
7862 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
7863 {
7864 if (strcmp (arch, cpu_arch [i].name) == 0)
7865 {
7866 /* Processor. */
7867 cpu_arch_name = cpu_arch[i].name;
7868 cpu_sub_arch_name = NULL;
7869 cpu_arch_flags = cpu_arch[i].flags;
7870 cpu_arch_isa = cpu_arch[i].type;
7871 cpu_arch_isa_flags = cpu_arch[i].flags;
7872 if (!cpu_arch_tune_set)
7873 {
7874 cpu_arch_tune = cpu_arch_isa;
7875 cpu_arch_tune_flags = cpu_arch_isa_flags;
7876 }
7877 break;
7878 }
7879 else if (*cpu_arch [i].name == '.'
7880 && strcmp (arch, cpu_arch [i].name + 1) == 0)
7881 {
7882 /* ISA entension. */
7883 i386_cpu_flags flags;
7884
7885 if (strncmp (arch, "no", 2))
7886 flags = cpu_flags_or (cpu_arch_flags,
7887 cpu_arch[i].flags);
7888 else
7889 flags = cpu_flags_and_not (cpu_arch_flags,
7890 cpu_arch[i].flags);
7891 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
7892 {
7893 if (cpu_sub_arch_name)
7894 {
7895 char *name = cpu_sub_arch_name;
7896 cpu_sub_arch_name = concat (name,
7897 cpu_arch[i].name,
7898 (const char *) NULL);
7899 free (name);
7900 }
7901 else
7902 cpu_sub_arch_name = xstrdup (cpu_arch[i].name);
7903 cpu_arch_flags = flags;
7904 }
7905 break;
7906 }
7907 }
7908
7909 if (i >= ARRAY_SIZE (cpu_arch))
7910 as_fatal (_("Invalid -march= option: `%s'"), arg);
7911
7912 arch = next;
7913 }
7914 while (next != NULL );
7915 break;
7916
7917 case OPTION_MTUNE:
7918 if (*arg == '.')
7919 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
7920 for (i = 0; i < ARRAY_SIZE (cpu_arch); i++)
7921 {
7922 if (strcmp (arg, cpu_arch [i].name) == 0)
7923 {
7924 cpu_arch_tune_set = 1;
7925 cpu_arch_tune = cpu_arch [i].type;
7926 cpu_arch_tune_flags = cpu_arch[i].flags;
7927 break;
7928 }
7929 }
7930 if (i >= ARRAY_SIZE (cpu_arch))
7931 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
7932 break;
7933
7934 case OPTION_MMNEMONIC:
7935 if (strcasecmp (arg, "att") == 0)
7936 intel_mnemonic = 0;
7937 else if (strcasecmp (arg, "intel") == 0)
7938 intel_mnemonic = 1;
7939 else
7940 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
7941 break;
7942
7943 case OPTION_MSYNTAX:
7944 if (strcasecmp (arg, "att") == 0)
7945 intel_syntax = 0;
7946 else if (strcasecmp (arg, "intel") == 0)
7947 intel_syntax = 1;
7948 else
7949 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
7950 break;
7951
7952 case OPTION_MINDEX_REG:
7953 allow_index_reg = 1;
7954 break;
7955
7956 case OPTION_MNAKED_REG:
7957 allow_naked_reg = 1;
7958 break;
7959
7960 case OPTION_MOLD_GCC:
7961 old_gcc = 1;
7962 break;
7963
7964 case OPTION_MSSE2AVX:
7965 sse2avx = 1;
7966 break;
7967
7968 case OPTION_MSSE_CHECK:
7969 if (strcasecmp (arg, "error") == 0)
7970 sse_check = sse_check_error;
7971 else if (strcasecmp (arg, "warning") == 0)
7972 sse_check = sse_check_warning;
7973 else if (strcasecmp (arg, "none") == 0)
7974 sse_check = sse_check_none;
7975 else
7976 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
7977 break;
7978
7979 default:
7980 return 0;
7981 }
7982 return 1;
7983 }
7984
7985 void
7986 md_show_usage (stream)
7987 FILE *stream;
7988 {
7989 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7990 fprintf (stream, _("\
7991 -Q ignored\n\
7992 -V print assembler version number\n\
7993 -k ignored\n"));
7994 #endif
7995 fprintf (stream, _("\
7996 -n Do not optimize code alignment\n\
7997 -q quieten some warnings\n"));
7998 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7999 fprintf (stream, _("\
8000 -s ignored\n"));
8001 #endif
8002 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8003 || defined (TE_PE) || defined (TE_PEP))
8004 fprintf (stream, _("\
8005 --32/--64 generate 32bit/64bit code\n"));
8006 #endif
8007 #ifdef SVR4_COMMENT_CHARS
8008 fprintf (stream, _("\
8009 --divide do not treat `/' as a comment character\n"));
8010 #else
8011 fprintf (stream, _("\
8012 --divide ignored\n"));
8013 #endif
8014 fprintf (stream, _("\
8015 -march=CPU[,+EXTENSION...]\n\
8016 generate code for CPU and EXTENSION, CPU is one of:\n\
8017 i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
8018 pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
8019 core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
8020 amdfam10, generic32, generic64\n\
8021 EXTENSION is combination of:\n\
8022 8087, 287, 387, no87, mmx, nommx, sse, sse2, sse3,\n\
8023 ssse3, sse4.1, sse4.2, sse4, nosse, avx, noavx,\n\
8024 vmx, smx, xsave, movbe, ept, aes, pclmul, fma,\n\
8025 clflush, syscall, rdtscp, 3dnow, 3dnowa, sse4a,\n\
8026 svme, abm, padlock, fma4\n"));
8027 fprintf (stream, _("\
8028 -mtune=CPU optimize for CPU, CPU is one of:\n\
8029 i8086, i186, i286, i386, i486, pentium, pentiumpro,\n\
8030 pentiumii, pentiumiii, pentium4, prescott, nocona,\n\
8031 core, core2, corei7, l1om, k6, k6_2, athlon, k8,\n\
8032 amdfam10, generic32, generic64\n"));
8033 fprintf (stream, _("\
8034 -msse2avx encode SSE instructions with VEX prefix\n"));
8035 fprintf (stream, _("\
8036 -msse-check=[none|error|warning]\n\
8037 check SSE instructions\n"));
8038 fprintf (stream, _("\
8039 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8040 fprintf (stream, _("\
8041 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8042 fprintf (stream, _("\
8043 -mindex-reg support pseudo index registers\n"));
8044 fprintf (stream, _("\
8045 -mnaked-reg don't require `%%' prefix for registers\n"));
8046 fprintf (stream, _("\
8047 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8048 }
8049
8050 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8051 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8052 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8053
8054 /* Pick the target format to use. */
8055
8056 const char *
8057 i386_target_format (void)
8058 {
8059 if (!strcmp (default_arch, "x86_64"))
8060 {
8061 set_code_flag (CODE_64BIT);
8062 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8063 {
8064 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8065 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8066 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8067 cpu_arch_isa_flags.bitfield.cpui486 = 1;
8068 cpu_arch_isa_flags.bitfield.cpui586 = 1;
8069 cpu_arch_isa_flags.bitfield.cpui686 = 1;
8070 cpu_arch_isa_flags.bitfield.cpuclflush = 1;
8071 cpu_arch_isa_flags.bitfield.cpummx= 1;
8072 cpu_arch_isa_flags.bitfield.cpusse = 1;
8073 cpu_arch_isa_flags.bitfield.cpusse2 = 1;
8074 cpu_arch_isa_flags.bitfield.cpulm = 1;
8075 }
8076 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8077 {
8078 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8079 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8080 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8081 cpu_arch_tune_flags.bitfield.cpui486 = 1;
8082 cpu_arch_tune_flags.bitfield.cpui586 = 1;
8083 cpu_arch_tune_flags.bitfield.cpui686 = 1;
8084 cpu_arch_tune_flags.bitfield.cpuclflush = 1;
8085 cpu_arch_tune_flags.bitfield.cpummx= 1;
8086 cpu_arch_tune_flags.bitfield.cpusse = 1;
8087 cpu_arch_tune_flags.bitfield.cpusse2 = 1;
8088 }
8089 }
8090 else if (!strcmp (default_arch, "i386"))
8091 {
8092 set_code_flag (CODE_32BIT);
8093 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8094 {
8095 cpu_arch_isa_flags.bitfield.cpui186 = 1;
8096 cpu_arch_isa_flags.bitfield.cpui286 = 1;
8097 cpu_arch_isa_flags.bitfield.cpui386 = 1;
8098 }
8099 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8100 {
8101 cpu_arch_tune_flags.bitfield.cpui186 = 1;
8102 cpu_arch_tune_flags.bitfield.cpui286 = 1;
8103 cpu_arch_tune_flags.bitfield.cpui386 = 1;
8104 }
8105 }
8106 else
8107 as_fatal (_("Unknown architecture"));
8108 switch (OUTPUT_FLAVOR)
8109 {
8110 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8111 case bfd_target_aout_flavour:
8112 return AOUT_TARGET_FORMAT;
8113 #endif
8114 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8115 # if defined (TE_PE) || defined (TE_PEP)
8116 case bfd_target_coff_flavour:
8117 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8118 # elif defined (TE_GO32)
8119 case bfd_target_coff_flavour:
8120 return "coff-go32";
8121 # else
8122 case bfd_target_coff_flavour:
8123 return "coff-i386";
8124 # endif
8125 #endif
8126 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8127 case bfd_target_elf_flavour:
8128 {
8129 if (flag_code == CODE_64BIT)
8130 {
8131 object_64bit = 1;
8132 use_rela_relocations = 1;
8133 }
8134 if (cpu_arch_isa == PROCESSOR_L1OM)
8135 {
8136 if (flag_code != CODE_64BIT)
8137 as_fatal (_("Intel L1OM is 64bit only"));
8138 return ELF_TARGET_L1OM_FORMAT;
8139 }
8140 else
8141 return (flag_code == CODE_64BIT
8142 ? ELF_TARGET_FORMAT64 : ELF_TARGET_FORMAT);
8143 }
8144 #endif
8145 #if defined (OBJ_MACH_O)
8146 case bfd_target_mach_o_flavour:
8147 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8148 #endif
8149 default:
8150 abort ();
8151 return NULL;
8152 }
8153 }
8154
8155 #endif /* OBJ_MAYBE_ more than one */
8156
8157 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8158 void
8159 i386_elf_emit_arch_note (void)
8160 {
8161 if (IS_ELF && cpu_arch_name != NULL)
8162 {
8163 char *p;
8164 asection *seg = now_seg;
8165 subsegT subseg = now_subseg;
8166 Elf_Internal_Note i_note;
8167 Elf_External_Note e_note;
8168 asection *note_secp;
8169 int len;
8170
8171 /* Create the .note section. */
8172 note_secp = subseg_new (".note", 0);
8173 bfd_set_section_flags (stdoutput,
8174 note_secp,
8175 SEC_HAS_CONTENTS | SEC_READONLY);
8176
8177 /* Process the arch string. */
8178 len = strlen (cpu_arch_name);
8179
8180 i_note.namesz = len + 1;
8181 i_note.descsz = 0;
8182 i_note.type = NT_ARCH;
8183 p = frag_more (sizeof (e_note.namesz));
8184 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8185 p = frag_more (sizeof (e_note.descsz));
8186 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8187 p = frag_more (sizeof (e_note.type));
8188 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8189 p = frag_more (len + 1);
8190 strcpy (p, cpu_arch_name);
8191
8192 frag_align (2, 0, 0);
8193
8194 subseg_set (seg, subseg);
8195 }
8196 }
8197 #endif
8198 \f
8199 symbolS *
8200 md_undefined_symbol (name)
8201 char *name;
8202 {
8203 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8204 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8205 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8206 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8207 {
8208 if (!GOT_symbol)
8209 {
8210 if (symbol_find (name))
8211 as_bad (_("GOT already in symbol table"));
8212 GOT_symbol = symbol_new (name, undefined_section,
8213 (valueT) 0, &zero_address_frag);
8214 };
8215 return GOT_symbol;
8216 }
8217 return 0;
8218 }
8219
8220 /* Round up a section size to the appropriate boundary. */
8221
8222 valueT
8223 md_section_align (segment, size)
8224 segT segment ATTRIBUTE_UNUSED;
8225 valueT size;
8226 {
8227 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8228 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8229 {
8230 /* For a.out, force the section size to be aligned. If we don't do
8231 this, BFD will align it for us, but it will not write out the
8232 final bytes of the section. This may be a bug in BFD, but it is
8233 easier to fix it here since that is how the other a.out targets
8234 work. */
8235 int align;
8236
8237 align = bfd_get_section_alignment (stdoutput, segment);
8238 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8239 }
8240 #endif
8241
8242 return size;
8243 }
8244
8245 /* On the i386, PC-relative offsets are relative to the start of the
8246 next instruction. That is, the address of the offset, plus its
8247 size, since the offset is always the last part of the insn. */
8248
8249 long
8250 md_pcrel_from (fixS *fixP)
8251 {
8252 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8253 }
8254
8255 #ifndef I386COFF
8256
8257 static void
8258 s_bss (int ignore ATTRIBUTE_UNUSED)
8259 {
8260 int temp;
8261
8262 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8263 if (IS_ELF)
8264 obj_elf_section_change_hook ();
8265 #endif
8266 temp = get_absolute_expression ();
8267 subseg_set (bss_section, (subsegT) temp);
8268 demand_empty_rest_of_line ();
8269 }
8270
8271 #endif
8272
8273 void
8274 i386_validate_fix (fixS *fixp)
8275 {
8276 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8277 {
8278 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8279 {
8280 if (!object_64bit)
8281 abort ();
8282 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8283 }
8284 else
8285 {
8286 if (!object_64bit)
8287 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8288 else
8289 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8290 }
8291 fixp->fx_subsy = 0;
8292 }
8293 }
8294
8295 arelent *
8296 tc_gen_reloc (section, fixp)
8297 asection *section ATTRIBUTE_UNUSED;
8298 fixS *fixp;
8299 {
8300 arelent *rel;
8301 bfd_reloc_code_real_type code;
8302
8303 switch (fixp->fx_r_type)
8304 {
8305 case BFD_RELOC_X86_64_PLT32:
8306 case BFD_RELOC_X86_64_GOT32:
8307 case BFD_RELOC_X86_64_GOTPCREL:
8308 case BFD_RELOC_386_PLT32:
8309 case BFD_RELOC_386_GOT32:
8310 case BFD_RELOC_386_GOTOFF:
8311 case BFD_RELOC_386_GOTPC:
8312 case BFD_RELOC_386_TLS_GD:
8313 case BFD_RELOC_386_TLS_LDM:
8314 case BFD_RELOC_386_TLS_LDO_32:
8315 case BFD_RELOC_386_TLS_IE_32:
8316 case BFD_RELOC_386_TLS_IE:
8317 case BFD_RELOC_386_TLS_GOTIE:
8318 case BFD_RELOC_386_TLS_LE_32:
8319 case BFD_RELOC_386_TLS_LE:
8320 case BFD_RELOC_386_TLS_GOTDESC:
8321 case BFD_RELOC_386_TLS_DESC_CALL:
8322 case BFD_RELOC_X86_64_TLSGD:
8323 case BFD_RELOC_X86_64_TLSLD:
8324 case BFD_RELOC_X86_64_DTPOFF32:
8325 case BFD_RELOC_X86_64_DTPOFF64:
8326 case BFD_RELOC_X86_64_GOTTPOFF:
8327 case BFD_RELOC_X86_64_TPOFF32:
8328 case BFD_RELOC_X86_64_TPOFF64:
8329 case BFD_RELOC_X86_64_GOTOFF64:
8330 case BFD_RELOC_X86_64_GOTPC32:
8331 case BFD_RELOC_X86_64_GOT64:
8332 case BFD_RELOC_X86_64_GOTPCREL64:
8333 case BFD_RELOC_X86_64_GOTPC64:
8334 case BFD_RELOC_X86_64_GOTPLT64:
8335 case BFD_RELOC_X86_64_PLTOFF64:
8336 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8337 case BFD_RELOC_X86_64_TLSDESC_CALL:
8338 case BFD_RELOC_RVA:
8339 case BFD_RELOC_VTABLE_ENTRY:
8340 case BFD_RELOC_VTABLE_INHERIT:
8341 #ifdef TE_PE
8342 case BFD_RELOC_32_SECREL:
8343 #endif
8344 code = fixp->fx_r_type;
8345 break;
8346 case BFD_RELOC_X86_64_32S:
8347 if (!fixp->fx_pcrel)
8348 {
8349 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8350 code = fixp->fx_r_type;
8351 break;
8352 }
8353 default:
8354 if (fixp->fx_pcrel)
8355 {
8356 switch (fixp->fx_size)
8357 {
8358 default:
8359 as_bad_where (fixp->fx_file, fixp->fx_line,
8360 _("can not do %d byte pc-relative relocation"),
8361 fixp->fx_size);
8362 code = BFD_RELOC_32_PCREL;
8363 break;
8364 case 1: code = BFD_RELOC_8_PCREL; break;
8365 case 2: code = BFD_RELOC_16_PCREL; break;
8366 case 4: code = BFD_RELOC_32_PCREL; break;
8367 #ifdef BFD64
8368 case 8: code = BFD_RELOC_64_PCREL; break;
8369 #endif
8370 }
8371 }
8372 else
8373 {
8374 switch (fixp->fx_size)
8375 {
8376 default:
8377 as_bad_where (fixp->fx_file, fixp->fx_line,
8378 _("can not do %d byte relocation"),
8379 fixp->fx_size);
8380 code = BFD_RELOC_32;
8381 break;
8382 case 1: code = BFD_RELOC_8; break;
8383 case 2: code = BFD_RELOC_16; break;
8384 case 4: code = BFD_RELOC_32; break;
8385 #ifdef BFD64
8386 case 8: code = BFD_RELOC_64; break;
8387 #endif
8388 }
8389 }
8390 break;
8391 }
8392
8393 if ((code == BFD_RELOC_32
8394 || code == BFD_RELOC_32_PCREL
8395 || code == BFD_RELOC_X86_64_32S)
8396 && GOT_symbol
8397 && fixp->fx_addsy == GOT_symbol)
8398 {
8399 if (!object_64bit)
8400 code = BFD_RELOC_386_GOTPC;
8401 else
8402 code = BFD_RELOC_X86_64_GOTPC32;
8403 }
8404 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8405 && GOT_symbol
8406 && fixp->fx_addsy == GOT_symbol)
8407 {
8408 code = BFD_RELOC_X86_64_GOTPC64;
8409 }
8410
8411 rel = (arelent *) xmalloc (sizeof (arelent));
8412 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8413 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8414
8415 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8416
8417 if (!use_rela_relocations)
8418 {
8419 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8420 vtable entry to be used in the relocation's section offset. */
8421 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8422 rel->address = fixp->fx_offset;
8423 #if defined (OBJ_COFF) && defined (TE_PE)
8424 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8425 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8426 else
8427 #endif
8428 rel->addend = 0;
8429 }
8430 /* Use the rela in 64bit mode. */
8431 else
8432 {
8433 if (!fixp->fx_pcrel)
8434 rel->addend = fixp->fx_offset;
8435 else
8436 switch (code)
8437 {
8438 case BFD_RELOC_X86_64_PLT32:
8439 case BFD_RELOC_X86_64_GOT32:
8440 case BFD_RELOC_X86_64_GOTPCREL:
8441 case BFD_RELOC_X86_64_TLSGD:
8442 case BFD_RELOC_X86_64_TLSLD:
8443 case BFD_RELOC_X86_64_GOTTPOFF:
8444 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8445 case BFD_RELOC_X86_64_TLSDESC_CALL:
8446 rel->addend = fixp->fx_offset - fixp->fx_size;
8447 break;
8448 default:
8449 rel->addend = (section->vma
8450 - fixp->fx_size
8451 + fixp->fx_addnumber
8452 + md_pcrel_from (fixp));
8453 break;
8454 }
8455 }
8456
8457 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8458 if (rel->howto == NULL)
8459 {
8460 as_bad_where (fixp->fx_file, fixp->fx_line,
8461 _("cannot represent relocation type %s"),
8462 bfd_get_reloc_code_name (code));
8463 /* Set howto to a garbage value so that we can keep going. */
8464 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8465 gas_assert (rel->howto != NULL);
8466 }
8467
8468 return rel;
8469 }
8470
8471 #include "tc-i386-intel.c"
8472
8473 void
8474 tc_x86_parse_to_dw2regnum (expressionS *exp)
8475 {
8476 int saved_naked_reg;
8477 char saved_register_dot;
8478
8479 saved_naked_reg = allow_naked_reg;
8480 allow_naked_reg = 1;
8481 saved_register_dot = register_chars['.'];
8482 register_chars['.'] = '.';
8483 allow_pseudo_reg = 1;
8484 expression_and_evaluate (exp);
8485 allow_pseudo_reg = 0;
8486 register_chars['.'] = saved_register_dot;
8487 allow_naked_reg = saved_naked_reg;
8488
8489 if (exp->X_op == O_register && exp->X_add_number >= 0)
8490 {
8491 if ((addressT) exp->X_add_number < i386_regtab_size)
8492 {
8493 exp->X_op = O_constant;
8494 exp->X_add_number = i386_regtab[exp->X_add_number]
8495 .dw2_regnum[flag_code >> 1];
8496 }
8497 else
8498 exp->X_op = O_illegal;
8499 }
8500 }
8501
8502 void
8503 tc_x86_frame_initial_instructions (void)
8504 {
8505 static unsigned int sp_regno[2];
8506
8507 if (!sp_regno[flag_code >> 1])
8508 {
8509 char *saved_input = input_line_pointer;
8510 char sp[][4] = {"esp", "rsp"};
8511 expressionS exp;
8512
8513 input_line_pointer = sp[flag_code >> 1];
8514 tc_x86_parse_to_dw2regnum (&exp);
8515 gas_assert (exp.X_op == O_constant);
8516 sp_regno[flag_code >> 1] = exp.X_add_number;
8517 input_line_pointer = saved_input;
8518 }
8519
8520 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
8521 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
8522 }
8523
8524 int
8525 i386_elf_section_type (const char *str, size_t len)
8526 {
8527 if (flag_code == CODE_64BIT
8528 && len == sizeof ("unwind") - 1
8529 && strncmp (str, "unwind", 6) == 0)
8530 return SHT_X86_64_UNWIND;
8531
8532 return -1;
8533 }
8534
8535 #ifdef TE_SOLARIS
8536 void
8537 i386_solaris_fix_up_eh_frame (segT sec)
8538 {
8539 if (flag_code == CODE_64BIT)
8540 elf_section_type (sec) = SHT_X86_64_UNWIND;
8541 }
8542 #endif
8543
8544 #ifdef TE_PE
8545 void
8546 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
8547 {
8548 expressionS expr;
8549
8550 expr.X_op = O_secrel;
8551 expr.X_add_symbol = symbol;
8552 expr.X_add_number = 0;
8553 emit_expr (&expr, size);
8554 }
8555 #endif
8556
8557 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8558 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
8559
8560 bfd_vma
8561 x86_64_section_letter (int letter, char **ptr_msg)
8562 {
8563 if (flag_code == CODE_64BIT)
8564 {
8565 if (letter == 'l')
8566 return SHF_X86_64_LARGE;
8567
8568 *ptr_msg = _("Bad .section directive: want a,l,w,x,M,S,G,T in string");
8569 }
8570 else
8571 *ptr_msg = _("Bad .section directive: want a,w,x,M,S,G,T in string");
8572 return -1;
8573 }
8574
8575 bfd_vma
8576 x86_64_section_word (char *str, size_t len)
8577 {
8578 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
8579 return SHF_X86_64_LARGE;
8580
8581 return -1;
8582 }
8583
8584 static void
8585 handle_large_common (int small ATTRIBUTE_UNUSED)
8586 {
8587 if (flag_code != CODE_64BIT)
8588 {
8589 s_comm_internal (0, elf_common_parse);
8590 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
8591 }
8592 else
8593 {
8594 static segT lbss_section;
8595 asection *saved_com_section_ptr = elf_com_section_ptr;
8596 asection *saved_bss_section = bss_section;
8597
8598 if (lbss_section == NULL)
8599 {
8600 flagword applicable;
8601 segT seg = now_seg;
8602 subsegT subseg = now_subseg;
8603
8604 /* The .lbss section is for local .largecomm symbols. */
8605 lbss_section = subseg_new (".lbss", 0);
8606 applicable = bfd_applicable_section_flags (stdoutput);
8607 bfd_set_section_flags (stdoutput, lbss_section,
8608 applicable & SEC_ALLOC);
8609 seg_info (lbss_section)->bss = 1;
8610
8611 subseg_set (seg, subseg);
8612 }
8613
8614 elf_com_section_ptr = &_bfd_elf_large_com_section;
8615 bss_section = lbss_section;
8616
8617 s_comm_internal (0, elf_common_parse);
8618
8619 elf_com_section_ptr = saved_com_section_ptr;
8620 bss_section = saved_bss_section;
8621 }
8622 }
8623 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.243778 seconds and 5 git commands to generate.