Add support for TBM instructions.
[deliverable/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GAS, the GNU Assembler.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GAS; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* Intel 80386 machine specific gas.
24 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
25 x86_64 support by Jan Hubicka (jh@suse.cz)
26 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
27 Bugs & suggestions are completely welcome. This is free software.
28 Please help us make it better. */
29
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "dwarf2dbg.h"
34 #include "dw2gencfi.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37
38 #ifndef REGISTER_WARNINGS
39 #define REGISTER_WARNINGS 1
40 #endif
41
42 #ifndef INFER_ADDR_PREFIX
43 #define INFER_ADDR_PREFIX 1
44 #endif
45
46 #ifndef DEFAULT_ARCH
47 #define DEFAULT_ARCH "i386"
48 #endif
49
50 #ifndef INLINE
51 #if __GNUC__ >= 2
52 #define INLINE __inline__
53 #else
54 #define INLINE
55 #endif
56 #endif
57
58 /* Prefixes will be emitted in the order defined below.
59 WAIT_PREFIX must be the first prefix since FWAIT is really is an
60 instruction, and so must come before any prefixes.
61 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
62 REP_PREFIX, LOCK_PREFIX. */
63 #define WAIT_PREFIX 0
64 #define SEG_PREFIX 1
65 #define ADDR_PREFIX 2
66 #define DATA_PREFIX 3
67 #define REP_PREFIX 4
68 #define LOCK_PREFIX 5
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 /* Intel Syntax. Use a non-ascii letter since since it never appears
87 in instructions. */
88 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
89
90 #define END_OF_INSN '\0'
91
92 /*
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
97 END.
98 */
99 typedef struct
100 {
101 const insn_template *start;
102 const insn_template *end;
103 }
104 templates;
105
106 /* 386 operand encoding bytes: see 386 book for details of this. */
107 typedef struct
108 {
109 unsigned int regmem; /* codes register or memory operand */
110 unsigned int reg; /* codes register operand (or extended opcode) */
111 unsigned int mode; /* how to interpret regmem & reg */
112 }
113 modrm_byte;
114
115 /* x86-64 extension prefix. */
116 typedef int rex_byte;
117
118 /* 386 opcode byte to code indirect addressing. */
119 typedef struct
120 {
121 unsigned base;
122 unsigned index;
123 unsigned scale;
124 }
125 sib_byte;
126
127 /* x86 arch names, types and features */
128 typedef struct
129 {
130 const char *name; /* arch name */
131 unsigned int len; /* arch string length */
132 enum processor_type type; /* arch type */
133 i386_cpu_flags flags; /* cpu feature flags */
134 unsigned int skip; /* show_arch should skip this. */
135 unsigned int negated; /* turn off indicated flags. */
136 }
137 arch_entry;
138
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_sse_check (int);
146 static void set_cpu_arch (int);
147 #ifdef TE_PE
148 static void pe_directive_secrel (int);
149 #endif
150 static void signed_cons (int);
151 static char *output_invalid (int c);
152 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
153 const char *);
154 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
155 const char *);
156 static int i386_att_operand (char *);
157 static int i386_intel_operand (char *, int);
158 static int i386_intel_simplify (expressionS *);
159 static int i386_intel_parse_name (const char *, expressionS *);
160 static const reg_entry *parse_register (char *, char **);
161 static char *parse_insn (char *, char *);
162 static char *parse_operands (char *, const char *);
163 static void swap_operands (void);
164 static void swap_2_operands (int, int);
165 static void optimize_imm (void);
166 static void optimize_disp (void);
167 static const insn_template *match_template (void);
168 static int check_string (void);
169 static int process_suffix (void);
170 static int check_byte_reg (void);
171 static int check_long_reg (void);
172 static int check_qword_reg (void);
173 static int check_word_reg (void);
174 static int finalize_imm (void);
175 static int process_operands (void);
176 static const seg_entry *build_modrm_byte (void);
177 static void output_insn (void);
178 static void output_imm (fragS *, offsetT);
179 static void output_disp (fragS *, offsetT);
180 #ifndef I386COFF
181 static void s_bss (int);
182 #endif
183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
184 static void handle_large_common (int small ATTRIBUTE_UNUSED);
185 #endif
186
187 static const char *default_arch = DEFAULT_ARCH;
188
189 /* VEX prefix. */
190 typedef struct
191 {
192 /* VEX prefix is either 2 byte or 3 byte. */
193 unsigned char bytes[3];
194 unsigned int length;
195 /* Destination or source register specifier. */
196 const reg_entry *register_specifier;
197 } vex_prefix;
198
199 /* 'md_assemble ()' gathers together information and puts it into a
200 i386_insn. */
201
202 union i386_op
203 {
204 expressionS *disps;
205 expressionS *imms;
206 const reg_entry *regs;
207 };
208
209 enum i386_error
210 {
211 operand_size_mismatch,
212 operand_type_mismatch,
213 register_type_mismatch,
214 number_of_operands_mismatch,
215 invalid_instruction_suffix,
216 bad_imm4,
217 old_gcc_only,
218 unsupported_with_intel_mnemonic,
219 unsupported_syntax,
220 unsupported
221 };
222
223 struct _i386_insn
224 {
225 /* TM holds the template for the insn were currently assembling. */
226 insn_template tm;
227
228 /* SUFFIX holds the instruction size suffix for byte, word, dword
229 or qword, if given. */
230 char suffix;
231
232 /* OPERANDS gives the number of given operands. */
233 unsigned int operands;
234
235 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
236 of given register, displacement, memory operands and immediate
237 operands. */
238 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
239
240 /* TYPES [i] is the type (see above #defines) which tells us how to
241 use OP[i] for the corresponding operand. */
242 i386_operand_type types[MAX_OPERANDS];
243
244 /* Displacement expression, immediate expression, or register for each
245 operand. */
246 union i386_op op[MAX_OPERANDS];
247
248 /* Flags for operands. */
249 unsigned int flags[MAX_OPERANDS];
250 #define Operand_PCrel 1
251
252 /* Relocation type for operand */
253 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
254
255 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
256 the base index byte below. */
257 const reg_entry *base_reg;
258 const reg_entry *index_reg;
259 unsigned int log2_scale_factor;
260
261 /* SEG gives the seg_entries of this insn. They are zero unless
262 explicit segment overrides are given. */
263 const seg_entry *seg[2];
264
265 /* PREFIX holds all the given prefix opcodes (usually null).
266 PREFIXES is the number of prefix opcodes. */
267 unsigned int prefixes;
268 unsigned char prefix[MAX_PREFIXES];
269
270 /* RM and SIB are the modrm byte and the sib byte where the
271 addressing modes of this insn are encoded. */
272 modrm_byte rm;
273 rex_byte rex;
274 sib_byte sib;
275 vex_prefix vex;
276
277 /* Swap operand in encoding. */
278 unsigned int swap_operand;
279
280 /* Force 32bit displacement in encoding. */
281 unsigned int disp32_encoding;
282
283 /* Error message. */
284 enum i386_error error;
285 };
286
287 typedef struct _i386_insn i386_insn;
288
289 /* List of chars besides those in app.c:symbol_chars that can start an
290 operand. Used to prevent the scrubber eating vital white-space. */
291 const char extra_symbol_chars[] = "*%-(["
292 #ifdef LEX_AT
293 "@"
294 #endif
295 #ifdef LEX_QM
296 "?"
297 #endif
298 ;
299
300 #if (defined (TE_I386AIX) \
301 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
302 && !defined (TE_GNU) \
303 && !defined (TE_LINUX) \
304 && !defined (TE_NETWARE) \
305 && !defined (TE_FreeBSD) \
306 && !defined (TE_NetBSD)))
307 /* This array holds the chars that always start a comment. If the
308 pre-processor is disabled, these aren't very useful. The option
309 --divide will remove '/' from this list. */
310 const char *i386_comment_chars = "#/";
311 #define SVR4_COMMENT_CHARS 1
312 #define PREFIX_SEPARATOR '\\'
313
314 #else
315 const char *i386_comment_chars = "#";
316 #define PREFIX_SEPARATOR '/'
317 #endif
318
319 /* This array holds the chars that only start a comment at the beginning of
320 a line. If the line seems to have the form '# 123 filename'
321 .line and .file directives will appear in the pre-processed output.
322 Note that input_file.c hand checks for '#' at the beginning of the
323 first line of the input file. This is because the compiler outputs
324 #NO_APP at the beginning of its output.
325 Also note that comments started like this one will always work if
326 '/' isn't otherwise defined. */
327 const char line_comment_chars[] = "#/";
328
329 const char line_separator_chars[] = ";";
330
331 /* Chars that can be used to separate mant from exp in floating point
332 nums. */
333 const char EXP_CHARS[] = "eE";
334
335 /* Chars that mean this number is a floating point constant
336 As in 0f12.456
337 or 0d1.2345e12. */
338 const char FLT_CHARS[] = "fFdDxX";
339
340 /* Tables for lexical analysis. */
341 static char mnemonic_chars[256];
342 static char register_chars[256];
343 static char operand_chars[256];
344 static char identifier_chars[256];
345 static char digit_chars[256];
346
347 /* Lexical macros. */
348 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
349 #define is_operand_char(x) (operand_chars[(unsigned char) x])
350 #define is_register_char(x) (register_chars[(unsigned char) x])
351 #define is_space_char(x) ((x) == ' ')
352 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
353 #define is_digit_char(x) (digit_chars[(unsigned char) x])
354
355 /* All non-digit non-letter characters that may occur in an operand. */
356 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
357
358 /* md_assemble() always leaves the strings it's passed unaltered. To
359 effect this we maintain a stack of saved characters that we've smashed
360 with '\0's (indicating end of strings for various sub-fields of the
361 assembler instruction). */
362 static char save_stack[32];
363 static char *save_stack_p;
364 #define END_STRING_AND_SAVE(s) \
365 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
366 #define RESTORE_END_STRING(s) \
367 do { *(s) = *--save_stack_p; } while (0)
368
369 /* The instruction we're assembling. */
370 static i386_insn i;
371
372 /* Possible templates for current insn. */
373 static const templates *current_templates;
374
375 /* Per instruction expressionS buffers: max displacements & immediates. */
376 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
377 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
378
379 /* Current operand we are working on. */
380 static int this_operand = -1;
381
382 /* We support four different modes. FLAG_CODE variable is used to distinguish
383 these. */
384
385 enum flag_code {
386 CODE_32BIT,
387 CODE_16BIT,
388 CODE_64BIT };
389
390 static enum flag_code flag_code;
391 static unsigned int object_64bit;
392 static unsigned int disallow_64bit_reloc;
393 static int use_rela_relocations = 0;
394
395 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
396 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
397 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
398
399 /* The ELF ABI to use. */
400 enum x86_elf_abi
401 {
402 I386_ABI,
403 X86_64_ABI,
404 X86_64_X32_ABI
405 };
406
407 static enum x86_elf_abi x86_elf_abi = I386_ABI;
408 #endif
409
410 /* The names used to print error messages. */
411 static const char *flag_code_names[] =
412 {
413 "32",
414 "16",
415 "64"
416 };
417
418 /* 1 for intel syntax,
419 0 if att syntax. */
420 static int intel_syntax = 0;
421
422 /* 1 for intel mnemonic,
423 0 if att mnemonic. */
424 static int intel_mnemonic = !SYSV386_COMPAT;
425
426 /* 1 if support old (<= 2.8.1) versions of gcc. */
427 static int old_gcc = OLDGCC_COMPAT;
428
429 /* 1 if pseudo registers are permitted. */
430 static int allow_pseudo_reg = 0;
431
432 /* 1 if register prefix % not required. */
433 static int allow_naked_reg = 0;
434
435 /* 1 if pseudo index register, eiz/riz, is allowed . */
436 static int allow_index_reg = 0;
437
438 static enum
439 {
440 sse_check_none = 0,
441 sse_check_warning,
442 sse_check_error
443 }
444 sse_check;
445
446 /* Register prefix used for error message. */
447 static const char *register_prefix = "%";
448
449 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
450 leave, push, and pop instructions so that gcc has the same stack
451 frame as in 32 bit mode. */
452 static char stackop_size = '\0';
453
454 /* Non-zero to optimize code alignment. */
455 int optimize_align_code = 1;
456
457 /* Non-zero to quieten some warnings. */
458 static int quiet_warnings = 0;
459
460 /* CPU name. */
461 static const char *cpu_arch_name = NULL;
462 static char *cpu_sub_arch_name = NULL;
463
464 /* CPU feature flags. */
465 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
466
467 /* If we have selected a cpu we are generating instructions for. */
468 static int cpu_arch_tune_set = 0;
469
470 /* Cpu we are generating instructions for. */
471 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
472
473 /* CPU feature flags of cpu we are generating instructions for. */
474 static i386_cpu_flags cpu_arch_tune_flags;
475
476 /* CPU instruction set architecture used. */
477 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
478
479 /* CPU feature flags of instruction set architecture used. */
480 i386_cpu_flags cpu_arch_isa_flags;
481
482 /* If set, conditional jumps are not automatically promoted to handle
483 larger than a byte offset. */
484 static unsigned int no_cond_jump_promotion = 0;
485
486 /* Encode SSE instructions with VEX prefix. */
487 static unsigned int sse2avx;
488
489 /* Encode scalar AVX instructions with specific vector length. */
490 static enum
491 {
492 vex128 = 0,
493 vex256
494 } avxscalar;
495
496 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
497 static symbolS *GOT_symbol;
498
499 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
500 unsigned int x86_dwarf2_return_column;
501
502 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
503 int x86_cie_data_alignment;
504
505 /* Interface to relax_segment.
506 There are 3 major relax states for 386 jump insns because the
507 different types of jumps add different sizes to frags when we're
508 figuring out what sort of jump to choose to reach a given label. */
509
510 /* Types. */
511 #define UNCOND_JUMP 0
512 #define COND_JUMP 1
513 #define COND_JUMP86 2
514
515 /* Sizes. */
516 #define CODE16 1
517 #define SMALL 0
518 #define SMALL16 (SMALL | CODE16)
519 #define BIG 2
520 #define BIG16 (BIG | CODE16)
521
522 #ifndef INLINE
523 #ifdef __GNUC__
524 #define INLINE __inline__
525 #else
526 #define INLINE
527 #endif
528 #endif
529
530 #define ENCODE_RELAX_STATE(type, size) \
531 ((relax_substateT) (((type) << 2) | (size)))
532 #define TYPE_FROM_RELAX_STATE(s) \
533 ((s) >> 2)
534 #define DISP_SIZE_FROM_RELAX_STATE(s) \
535 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
536
537 /* This table is used by relax_frag to promote short jumps to long
538 ones where necessary. SMALL (short) jumps may be promoted to BIG
539 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
540 don't allow a short jump in a 32 bit code segment to be promoted to
541 a 16 bit offset jump because it's slower (requires data size
542 prefix), and doesn't work, unless the destination is in the bottom
543 64k of the code segment (The top 16 bits of eip are zeroed). */
544
545 const relax_typeS md_relax_table[] =
546 {
547 /* The fields are:
548 1) most positive reach of this state,
549 2) most negative reach of this state,
550 3) how many bytes this mode will have in the variable part of the frag
551 4) which index into the table to try if we can't fit into this one. */
552
553 /* UNCOND_JUMP states. */
554 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
555 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
556 /* dword jmp adds 4 bytes to frag:
557 0 extra opcode bytes, 4 displacement bytes. */
558 {0, 0, 4, 0},
559 /* word jmp adds 2 byte2 to frag:
560 0 extra opcode bytes, 2 displacement bytes. */
561 {0, 0, 2, 0},
562
563 /* COND_JUMP states. */
564 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
565 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
566 /* dword conditionals adds 5 bytes to frag:
567 1 extra opcode byte, 4 displacement bytes. */
568 {0, 0, 5, 0},
569 /* word conditionals add 3 bytes to frag:
570 1 extra opcode byte, 2 displacement bytes. */
571 {0, 0, 3, 0},
572
573 /* COND_JUMP86 states. */
574 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
575 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
576 /* dword conditionals adds 5 bytes to frag:
577 1 extra opcode byte, 4 displacement bytes. */
578 {0, 0, 5, 0},
579 /* word conditionals add 4 bytes to frag:
580 1 displacement byte and a 3 byte long branch insn. */
581 {0, 0, 4, 0}
582 };
583
584 static const arch_entry cpu_arch[] =
585 {
586 /* Do not replace the first two entries - i386_target_format()
587 relies on them being there in this order. */
588 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
589 CPU_GENERIC32_FLAGS, 0, 0 },
590 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
591 CPU_GENERIC64_FLAGS, 0, 0 },
592 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
593 CPU_NONE_FLAGS, 0, 0 },
594 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
595 CPU_I186_FLAGS, 0, 0 },
596 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
597 CPU_I286_FLAGS, 0, 0 },
598 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
599 CPU_I386_FLAGS, 0, 0 },
600 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
601 CPU_I486_FLAGS, 0, 0 },
602 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
603 CPU_I586_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
605 CPU_I686_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
607 CPU_I586_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
609 CPU_PENTIUMPRO_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
611 CPU_P2_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
613 CPU_P3_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
615 CPU_P4_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
617 CPU_CORE_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
619 CPU_NOCONA_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
621 CPU_CORE_FLAGS, 1, 0 },
622 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
623 CPU_CORE_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
625 CPU_CORE2_FLAGS, 1, 0 },
626 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
627 CPU_CORE2_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
629 CPU_COREI7_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
631 CPU_L1OM_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
633 CPU_K6_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
635 CPU_K6_2_FLAGS, 0, 0 },
636 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
637 CPU_ATHLON_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
639 CPU_K8_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
641 CPU_K8_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
643 CPU_K8_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
645 CPU_AMDFAM10_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BDVER1,
647 CPU_BDVER1_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
649 CPU_8087_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
651 CPU_287_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
653 CPU_387_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
655 CPU_ANY87_FLAGS, 0, 1 },
656 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
657 CPU_MMX_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
659 CPU_3DNOWA_FLAGS, 0, 1 },
660 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
661 CPU_SSE_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
663 CPU_SSE2_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
665 CPU_SSE3_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
667 CPU_SSSE3_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
669 CPU_SSE4_1_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
671 CPU_SSE4_2_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
673 CPU_SSE4_2_FLAGS, 0, 0 },
674 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
675 CPU_ANY_SSE_FLAGS, 0, 1 },
676 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
677 CPU_AVX_FLAGS, 0, 0 },
678 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
679 CPU_ANY_AVX_FLAGS, 0, 1 },
680 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
681 CPU_VMX_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
683 CPU_SMX_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
685 CPU_XSAVE_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
687 CPU_XSAVEOPT_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
689 CPU_AES_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
691 CPU_PCLMUL_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
693 CPU_PCLMUL_FLAGS, 1, 0 },
694 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
695 CPU_FSGSBASE_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
697 CPU_RDRND_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
699 CPU_F16C_FLAGS, 0, 0 },
700 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
701 CPU_FMA_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
703 CPU_FMA4_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
705 CPU_XOP_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
707 CPU_LWP_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
709 CPU_MOVBE_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
711 CPU_EPT_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
713 CPU_CLFLUSH_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
715 CPU_NOP_FLAGS, 0, 0 },
716 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
717 CPU_SYSCALL_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
719 CPU_RDTSCP_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
721 CPU_3DNOW_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
723 CPU_3DNOWA_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
725 CPU_PADLOCK_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
727 CPU_SVME_FLAGS, 1, 0 },
728 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
729 CPU_SVME_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
731 CPU_SSE4A_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
733 CPU_ABM_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
735 CPU_BMI_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
737 CPU_TBM_FLAGS, 0, 0 },
738 };
739
740 #ifdef I386COFF
741 /* Like s_lcomm_internal in gas/read.c but the alignment string
742 is allowed to be optional. */
743
744 static symbolS *
745 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
746 {
747 addressT align = 0;
748
749 SKIP_WHITESPACE ();
750
751 if (needs_align
752 && *input_line_pointer == ',')
753 {
754 align = parse_align (needs_align - 1);
755
756 if (align == (addressT) -1)
757 return NULL;
758 }
759 else
760 {
761 if (size >= 8)
762 align = 3;
763 else if (size >= 4)
764 align = 2;
765 else if (size >= 2)
766 align = 1;
767 else
768 align = 0;
769 }
770
771 bss_alloc (symbolP, size, align);
772 return symbolP;
773 }
774
775 static void
776 pe_lcomm (int needs_align)
777 {
778 s_comm_internal (needs_align * 2, pe_lcomm_internal);
779 }
780 #endif
781
782 const pseudo_typeS md_pseudo_table[] =
783 {
784 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
785 {"align", s_align_bytes, 0},
786 #else
787 {"align", s_align_ptwo, 0},
788 #endif
789 {"arch", set_cpu_arch, 0},
790 #ifndef I386COFF
791 {"bss", s_bss, 0},
792 #else
793 {"lcomm", pe_lcomm, 1},
794 #endif
795 {"ffloat", float_cons, 'f'},
796 {"dfloat", float_cons, 'd'},
797 {"tfloat", float_cons, 'x'},
798 {"value", cons, 2},
799 {"slong", signed_cons, 4},
800 {"noopt", s_ignore, 0},
801 {"optim", s_ignore, 0},
802 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
803 {"code16", set_code_flag, CODE_16BIT},
804 {"code32", set_code_flag, CODE_32BIT},
805 {"code64", set_code_flag, CODE_64BIT},
806 {"intel_syntax", set_intel_syntax, 1},
807 {"att_syntax", set_intel_syntax, 0},
808 {"intel_mnemonic", set_intel_mnemonic, 1},
809 {"att_mnemonic", set_intel_mnemonic, 0},
810 {"allow_index_reg", set_allow_index_reg, 1},
811 {"disallow_index_reg", set_allow_index_reg, 0},
812 {"sse_check", set_sse_check, 0},
813 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
814 {"largecomm", handle_large_common, 0},
815 #else
816 {"file", (void (*) (int)) dwarf2_directive_file, 0},
817 {"loc", dwarf2_directive_loc, 0},
818 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
819 #endif
820 #ifdef TE_PE
821 {"secrel32", pe_directive_secrel, 0},
822 #endif
823 {0, 0, 0}
824 };
825
826 /* For interface with expression (). */
827 extern char *input_line_pointer;
828
829 /* Hash table for instruction mnemonic lookup. */
830 static struct hash_control *op_hash;
831
832 /* Hash table for register lookup. */
833 static struct hash_control *reg_hash;
834 \f
835 void
836 i386_align_code (fragS *fragP, int count)
837 {
838 /* Various efficient no-op patterns for aligning code labels.
839 Note: Don't try to assemble the instructions in the comments.
840 0L and 0w are not legal. */
841 static const char f32_1[] =
842 {0x90}; /* nop */
843 static const char f32_2[] =
844 {0x66,0x90}; /* xchg %ax,%ax */
845 static const char f32_3[] =
846 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
847 static const char f32_4[] =
848 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
849 static const char f32_5[] =
850 {0x90, /* nop */
851 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
852 static const char f32_6[] =
853 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
854 static const char f32_7[] =
855 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
856 static const char f32_8[] =
857 {0x90, /* nop */
858 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
859 static const char f32_9[] =
860 {0x89,0xf6, /* movl %esi,%esi */
861 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
862 static const char f32_10[] =
863 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
864 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
865 static const char f32_11[] =
866 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
867 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
868 static const char f32_12[] =
869 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
870 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
871 static const char f32_13[] =
872 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
873 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
874 static const char f32_14[] =
875 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
876 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
877 static const char f16_3[] =
878 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
879 static const char f16_4[] =
880 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
881 static const char f16_5[] =
882 {0x90, /* nop */
883 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
884 static const char f16_6[] =
885 {0x89,0xf6, /* mov %si,%si */
886 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
887 static const char f16_7[] =
888 {0x8d,0x74,0x00, /* lea 0(%si),%si */
889 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
890 static const char f16_8[] =
891 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
892 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
893 static const char jump_31[] =
894 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
895 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
896 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
897 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
898 static const char *const f32_patt[] = {
899 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
900 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
901 };
902 static const char *const f16_patt[] = {
903 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
904 };
905 /* nopl (%[re]ax) */
906 static const char alt_3[] =
907 {0x0f,0x1f,0x00};
908 /* nopl 0(%[re]ax) */
909 static const char alt_4[] =
910 {0x0f,0x1f,0x40,0x00};
911 /* nopl 0(%[re]ax,%[re]ax,1) */
912 static const char alt_5[] =
913 {0x0f,0x1f,0x44,0x00,0x00};
914 /* nopw 0(%[re]ax,%[re]ax,1) */
915 static const char alt_6[] =
916 {0x66,0x0f,0x1f,0x44,0x00,0x00};
917 /* nopl 0L(%[re]ax) */
918 static const char alt_7[] =
919 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
920 /* nopl 0L(%[re]ax,%[re]ax,1) */
921 static const char alt_8[] =
922 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
923 /* nopw 0L(%[re]ax,%[re]ax,1) */
924 static const char alt_9[] =
925 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
926 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
927 static const char alt_10[] =
928 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
929 /* data16
930 nopw %cs:0L(%[re]ax,%[re]ax,1) */
931 static const char alt_long_11[] =
932 {0x66,
933 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
934 /* data16
935 data16
936 nopw %cs:0L(%[re]ax,%[re]ax,1) */
937 static const char alt_long_12[] =
938 {0x66,
939 0x66,
940 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
941 /* data16
942 data16
943 data16
944 nopw %cs:0L(%[re]ax,%[re]ax,1) */
945 static const char alt_long_13[] =
946 {0x66,
947 0x66,
948 0x66,
949 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
950 /* data16
951 data16
952 data16
953 data16
954 nopw %cs:0L(%[re]ax,%[re]ax,1) */
955 static const char alt_long_14[] =
956 {0x66,
957 0x66,
958 0x66,
959 0x66,
960 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
961 /* data16
962 data16
963 data16
964 data16
965 data16
966 nopw %cs:0L(%[re]ax,%[re]ax,1) */
967 static const char alt_long_15[] =
968 {0x66,
969 0x66,
970 0x66,
971 0x66,
972 0x66,
973 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
974 /* nopl 0(%[re]ax,%[re]ax,1)
975 nopw 0(%[re]ax,%[re]ax,1) */
976 static const char alt_short_11[] =
977 {0x0f,0x1f,0x44,0x00,0x00,
978 0x66,0x0f,0x1f,0x44,0x00,0x00};
979 /* nopw 0(%[re]ax,%[re]ax,1)
980 nopw 0(%[re]ax,%[re]ax,1) */
981 static const char alt_short_12[] =
982 {0x66,0x0f,0x1f,0x44,0x00,0x00,
983 0x66,0x0f,0x1f,0x44,0x00,0x00};
984 /* nopw 0(%[re]ax,%[re]ax,1)
985 nopl 0L(%[re]ax) */
986 static const char alt_short_13[] =
987 {0x66,0x0f,0x1f,0x44,0x00,0x00,
988 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
989 /* nopl 0L(%[re]ax)
990 nopl 0L(%[re]ax) */
991 static const char alt_short_14[] =
992 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
993 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
994 /* nopl 0L(%[re]ax)
995 nopl 0L(%[re]ax,%[re]ax,1) */
996 static const char alt_short_15[] =
997 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
998 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
999 static const char *const alt_short_patt[] = {
1000 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1001 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1002 alt_short_14, alt_short_15
1003 };
1004 static const char *const alt_long_patt[] = {
1005 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1006 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1007 alt_long_14, alt_long_15
1008 };
1009
1010 /* Only align for at least a positive non-zero boundary. */
1011 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1012 return;
1013
1014 /* We need to decide which NOP sequence to use for 32bit and
1015 64bit. When -mtune= is used:
1016
1017 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1018 PROCESSOR_GENERIC32, f32_patt will be used.
1019 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1020 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1021 PROCESSOR_GENERIC64, alt_long_patt will be used.
1022 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1023 PROCESSOR_AMDFAM10, and PROCESSOR_BDVER1, alt_short_patt
1024 will be used.
1025
1026 When -mtune= isn't used, alt_long_patt will be used if
1027 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1028 be used.
1029
1030 When -march= or .arch is used, we can't use anything beyond
1031 cpu_arch_isa_flags. */
1032
1033 if (flag_code == CODE_16BIT)
1034 {
1035 if (count > 8)
1036 {
1037 memcpy (fragP->fr_literal + fragP->fr_fix,
1038 jump_31, count);
1039 /* Adjust jump offset. */
1040 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1041 }
1042 else
1043 memcpy (fragP->fr_literal + fragP->fr_fix,
1044 f16_patt[count - 1], count);
1045 }
1046 else
1047 {
1048 const char *const *patt = NULL;
1049
1050 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1051 {
1052 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1053 switch (cpu_arch_tune)
1054 {
1055 case PROCESSOR_UNKNOWN:
1056 /* We use cpu_arch_isa_flags to check if we SHOULD
1057 optimize with nops. */
1058 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1059 patt = alt_long_patt;
1060 else
1061 patt = f32_patt;
1062 break;
1063 case PROCESSOR_PENTIUMPRO:
1064 case PROCESSOR_PENTIUM4:
1065 case PROCESSOR_NOCONA:
1066 case PROCESSOR_CORE:
1067 case PROCESSOR_CORE2:
1068 case PROCESSOR_COREI7:
1069 case PROCESSOR_L1OM:
1070 case PROCESSOR_GENERIC64:
1071 patt = alt_long_patt;
1072 break;
1073 case PROCESSOR_K6:
1074 case PROCESSOR_ATHLON:
1075 case PROCESSOR_K8:
1076 case PROCESSOR_AMDFAM10:
1077 case PROCESSOR_BDVER1:
1078 patt = alt_short_patt;
1079 break;
1080 case PROCESSOR_I386:
1081 case PROCESSOR_I486:
1082 case PROCESSOR_PENTIUM:
1083 case PROCESSOR_GENERIC32:
1084 patt = f32_patt;
1085 break;
1086 }
1087 }
1088 else
1089 {
1090 switch (fragP->tc_frag_data.tune)
1091 {
1092 case PROCESSOR_UNKNOWN:
1093 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1094 PROCESSOR_UNKNOWN. */
1095 abort ();
1096 break;
1097
1098 case PROCESSOR_I386:
1099 case PROCESSOR_I486:
1100 case PROCESSOR_PENTIUM:
1101 case PROCESSOR_K6:
1102 case PROCESSOR_ATHLON:
1103 case PROCESSOR_K8:
1104 case PROCESSOR_AMDFAM10:
1105 case PROCESSOR_BDVER1:
1106 case PROCESSOR_GENERIC32:
1107 /* We use cpu_arch_isa_flags to check if we CAN optimize
1108 with nops. */
1109 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1110 patt = alt_short_patt;
1111 else
1112 patt = f32_patt;
1113 break;
1114 case PROCESSOR_PENTIUMPRO:
1115 case PROCESSOR_PENTIUM4:
1116 case PROCESSOR_NOCONA:
1117 case PROCESSOR_CORE:
1118 case PROCESSOR_CORE2:
1119 case PROCESSOR_COREI7:
1120 case PROCESSOR_L1OM:
1121 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1122 patt = alt_long_patt;
1123 else
1124 patt = f32_patt;
1125 break;
1126 case PROCESSOR_GENERIC64:
1127 patt = alt_long_patt;
1128 break;
1129 }
1130 }
1131
1132 if (patt == f32_patt)
1133 {
1134 /* If the padding is less than 15 bytes, we use the normal
1135 ones. Otherwise, we use a jump instruction and adjust
1136 its offset. */
1137 int limit;
1138
1139 /* For 64bit, the limit is 3 bytes. */
1140 if (flag_code == CODE_64BIT
1141 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1142 limit = 3;
1143 else
1144 limit = 15;
1145 if (count < limit)
1146 memcpy (fragP->fr_literal + fragP->fr_fix,
1147 patt[count - 1], count);
1148 else
1149 {
1150 memcpy (fragP->fr_literal + fragP->fr_fix,
1151 jump_31, count);
1152 /* Adjust jump offset. */
1153 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1154 }
1155 }
1156 else
1157 {
1158 /* Maximum length of an instruction is 15 byte. If the
1159 padding is greater than 15 bytes and we don't use jump,
1160 we have to break it into smaller pieces. */
1161 int padding = count;
1162 while (padding > 15)
1163 {
1164 padding -= 15;
1165 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1166 patt [14], 15);
1167 }
1168
1169 if (padding)
1170 memcpy (fragP->fr_literal + fragP->fr_fix,
1171 patt [padding - 1], padding);
1172 }
1173 }
1174 fragP->fr_var = count;
1175 }
1176
1177 static INLINE int
1178 operand_type_all_zero (const union i386_operand_type *x)
1179 {
1180 switch (ARRAY_SIZE(x->array))
1181 {
1182 case 3:
1183 if (x->array[2])
1184 return 0;
1185 case 2:
1186 if (x->array[1])
1187 return 0;
1188 case 1:
1189 return !x->array[0];
1190 default:
1191 abort ();
1192 }
1193 }
1194
1195 static INLINE void
1196 operand_type_set (union i386_operand_type *x, unsigned int v)
1197 {
1198 switch (ARRAY_SIZE(x->array))
1199 {
1200 case 3:
1201 x->array[2] = v;
1202 case 2:
1203 x->array[1] = v;
1204 case 1:
1205 x->array[0] = v;
1206 break;
1207 default:
1208 abort ();
1209 }
1210 }
1211
1212 static INLINE int
1213 operand_type_equal (const union i386_operand_type *x,
1214 const union i386_operand_type *y)
1215 {
1216 switch (ARRAY_SIZE(x->array))
1217 {
1218 case 3:
1219 if (x->array[2] != y->array[2])
1220 return 0;
1221 case 2:
1222 if (x->array[1] != y->array[1])
1223 return 0;
1224 case 1:
1225 return x->array[0] == y->array[0];
1226 break;
1227 default:
1228 abort ();
1229 }
1230 }
1231
1232 static INLINE int
1233 cpu_flags_all_zero (const union i386_cpu_flags *x)
1234 {
1235 switch (ARRAY_SIZE(x->array))
1236 {
1237 case 3:
1238 if (x->array[2])
1239 return 0;
1240 case 2:
1241 if (x->array[1])
1242 return 0;
1243 case 1:
1244 return !x->array[0];
1245 default:
1246 abort ();
1247 }
1248 }
1249
1250 static INLINE void
1251 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1252 {
1253 switch (ARRAY_SIZE(x->array))
1254 {
1255 case 3:
1256 x->array[2] = v;
1257 case 2:
1258 x->array[1] = v;
1259 case 1:
1260 x->array[0] = v;
1261 break;
1262 default:
1263 abort ();
1264 }
1265 }
1266
1267 static INLINE int
1268 cpu_flags_equal (const union i386_cpu_flags *x,
1269 const union i386_cpu_flags *y)
1270 {
1271 switch (ARRAY_SIZE(x->array))
1272 {
1273 case 3:
1274 if (x->array[2] != y->array[2])
1275 return 0;
1276 case 2:
1277 if (x->array[1] != y->array[1])
1278 return 0;
1279 case 1:
1280 return x->array[0] == y->array[0];
1281 break;
1282 default:
1283 abort ();
1284 }
1285 }
1286
1287 static INLINE int
1288 cpu_flags_check_cpu64 (i386_cpu_flags f)
1289 {
1290 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1291 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1292 }
1293
1294 static INLINE i386_cpu_flags
1295 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1296 {
1297 switch (ARRAY_SIZE (x.array))
1298 {
1299 case 3:
1300 x.array [2] &= y.array [2];
1301 case 2:
1302 x.array [1] &= y.array [1];
1303 case 1:
1304 x.array [0] &= y.array [0];
1305 break;
1306 default:
1307 abort ();
1308 }
1309 return x;
1310 }
1311
1312 static INLINE i386_cpu_flags
1313 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1314 {
1315 switch (ARRAY_SIZE (x.array))
1316 {
1317 case 3:
1318 x.array [2] |= y.array [2];
1319 case 2:
1320 x.array [1] |= y.array [1];
1321 case 1:
1322 x.array [0] |= y.array [0];
1323 break;
1324 default:
1325 abort ();
1326 }
1327 return x;
1328 }
1329
1330 static INLINE i386_cpu_flags
1331 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1332 {
1333 switch (ARRAY_SIZE (x.array))
1334 {
1335 case 3:
1336 x.array [2] &= ~y.array [2];
1337 case 2:
1338 x.array [1] &= ~y.array [1];
1339 case 1:
1340 x.array [0] &= ~y.array [0];
1341 break;
1342 default:
1343 abort ();
1344 }
1345 return x;
1346 }
1347
1348 #define CPU_FLAGS_ARCH_MATCH 0x1
1349 #define CPU_FLAGS_64BIT_MATCH 0x2
1350 #define CPU_FLAGS_AES_MATCH 0x4
1351 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1352 #define CPU_FLAGS_AVX_MATCH 0x10
1353
1354 #define CPU_FLAGS_32BIT_MATCH \
1355 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1356 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1357 #define CPU_FLAGS_PERFECT_MATCH \
1358 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1359
1360 /* Return CPU flags match bits. */
1361
1362 static int
1363 cpu_flags_match (const insn_template *t)
1364 {
1365 i386_cpu_flags x = t->cpu_flags;
1366 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1367
1368 x.bitfield.cpu64 = 0;
1369 x.bitfield.cpuno64 = 0;
1370
1371 if (cpu_flags_all_zero (&x))
1372 {
1373 /* This instruction is available on all archs. */
1374 match |= CPU_FLAGS_32BIT_MATCH;
1375 }
1376 else
1377 {
1378 /* This instruction is available only on some archs. */
1379 i386_cpu_flags cpu = cpu_arch_flags;
1380
1381 cpu.bitfield.cpu64 = 0;
1382 cpu.bitfield.cpuno64 = 0;
1383 cpu = cpu_flags_and (x, cpu);
1384 if (!cpu_flags_all_zero (&cpu))
1385 {
1386 if (x.bitfield.cpuavx)
1387 {
1388 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1389 if (cpu.bitfield.cpuavx)
1390 {
1391 /* Check SSE2AVX. */
1392 if (!t->opcode_modifier.sse2avx|| sse2avx)
1393 {
1394 match |= (CPU_FLAGS_ARCH_MATCH
1395 | CPU_FLAGS_AVX_MATCH);
1396 /* Check AES. */
1397 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1398 match |= CPU_FLAGS_AES_MATCH;
1399 /* Check PCLMUL. */
1400 if (!x.bitfield.cpupclmul
1401 || cpu.bitfield.cpupclmul)
1402 match |= CPU_FLAGS_PCLMUL_MATCH;
1403 }
1404 }
1405 else
1406 match |= CPU_FLAGS_ARCH_MATCH;
1407 }
1408 else
1409 match |= CPU_FLAGS_32BIT_MATCH;
1410 }
1411 }
1412 return match;
1413 }
1414
1415 static INLINE i386_operand_type
1416 operand_type_and (i386_operand_type x, i386_operand_type y)
1417 {
1418 switch (ARRAY_SIZE (x.array))
1419 {
1420 case 3:
1421 x.array [2] &= y.array [2];
1422 case 2:
1423 x.array [1] &= y.array [1];
1424 case 1:
1425 x.array [0] &= y.array [0];
1426 break;
1427 default:
1428 abort ();
1429 }
1430 return x;
1431 }
1432
1433 static INLINE i386_operand_type
1434 operand_type_or (i386_operand_type x, i386_operand_type y)
1435 {
1436 switch (ARRAY_SIZE (x.array))
1437 {
1438 case 3:
1439 x.array [2] |= y.array [2];
1440 case 2:
1441 x.array [1] |= y.array [1];
1442 case 1:
1443 x.array [0] |= y.array [0];
1444 break;
1445 default:
1446 abort ();
1447 }
1448 return x;
1449 }
1450
1451 static INLINE i386_operand_type
1452 operand_type_xor (i386_operand_type x, i386_operand_type y)
1453 {
1454 switch (ARRAY_SIZE (x.array))
1455 {
1456 case 3:
1457 x.array [2] ^= y.array [2];
1458 case 2:
1459 x.array [1] ^= y.array [1];
1460 case 1:
1461 x.array [0] ^= y.array [0];
1462 break;
1463 default:
1464 abort ();
1465 }
1466 return x;
1467 }
1468
1469 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1470 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1471 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1472 static const i386_operand_type inoutportreg
1473 = OPERAND_TYPE_INOUTPORTREG;
1474 static const i386_operand_type reg16_inoutportreg
1475 = OPERAND_TYPE_REG16_INOUTPORTREG;
1476 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1477 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1478 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1479 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1480 static const i386_operand_type anydisp
1481 = OPERAND_TYPE_ANYDISP;
1482 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1483 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1484 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1485 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1486 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1487 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1488 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1489 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1490 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1491 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1492 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1493 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1494
1495 enum operand_type
1496 {
1497 reg,
1498 imm,
1499 disp,
1500 anymem
1501 };
1502
1503 static INLINE int
1504 operand_type_check (i386_operand_type t, enum operand_type c)
1505 {
1506 switch (c)
1507 {
1508 case reg:
1509 return (t.bitfield.reg8
1510 || t.bitfield.reg16
1511 || t.bitfield.reg32
1512 || t.bitfield.reg64);
1513
1514 case imm:
1515 return (t.bitfield.imm8
1516 || t.bitfield.imm8s
1517 || t.bitfield.imm16
1518 || t.bitfield.imm32
1519 || t.bitfield.imm32s
1520 || t.bitfield.imm64);
1521
1522 case disp:
1523 return (t.bitfield.disp8
1524 || t.bitfield.disp16
1525 || t.bitfield.disp32
1526 || t.bitfield.disp32s
1527 || t.bitfield.disp64);
1528
1529 case anymem:
1530 return (t.bitfield.disp8
1531 || t.bitfield.disp16
1532 || t.bitfield.disp32
1533 || t.bitfield.disp32s
1534 || t.bitfield.disp64
1535 || t.bitfield.baseindex);
1536
1537 default:
1538 abort ();
1539 }
1540
1541 return 0;
1542 }
1543
1544 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1545 operand J for instruction template T. */
1546
1547 static INLINE int
1548 match_reg_size (const insn_template *t, unsigned int j)
1549 {
1550 return !((i.types[j].bitfield.byte
1551 && !t->operand_types[j].bitfield.byte)
1552 || (i.types[j].bitfield.word
1553 && !t->operand_types[j].bitfield.word)
1554 || (i.types[j].bitfield.dword
1555 && !t->operand_types[j].bitfield.dword)
1556 || (i.types[j].bitfield.qword
1557 && !t->operand_types[j].bitfield.qword));
1558 }
1559
1560 /* Return 1 if there is no conflict in any size on operand J for
1561 instruction template T. */
1562
1563 static INLINE int
1564 match_mem_size (const insn_template *t, unsigned int j)
1565 {
1566 return (match_reg_size (t, j)
1567 && !((i.types[j].bitfield.unspecified
1568 && !t->operand_types[j].bitfield.unspecified)
1569 || (i.types[j].bitfield.fword
1570 && !t->operand_types[j].bitfield.fword)
1571 || (i.types[j].bitfield.tbyte
1572 && !t->operand_types[j].bitfield.tbyte)
1573 || (i.types[j].bitfield.xmmword
1574 && !t->operand_types[j].bitfield.xmmword)
1575 || (i.types[j].bitfield.ymmword
1576 && !t->operand_types[j].bitfield.ymmword)));
1577 }
1578
1579 /* Return 1 if there is no size conflict on any operands for
1580 instruction template T. */
1581
1582 static INLINE int
1583 operand_size_match (const insn_template *t)
1584 {
1585 unsigned int j;
1586 int match = 1;
1587
1588 /* Don't check jump instructions. */
1589 if (t->opcode_modifier.jump
1590 || t->opcode_modifier.jumpbyte
1591 || t->opcode_modifier.jumpdword
1592 || t->opcode_modifier.jumpintersegment)
1593 return match;
1594
1595 /* Check memory and accumulator operand size. */
1596 for (j = 0; j < i.operands; j++)
1597 {
1598 if (t->operand_types[j].bitfield.anysize)
1599 continue;
1600
1601 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1602 {
1603 match = 0;
1604 break;
1605 }
1606
1607 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1608 {
1609 match = 0;
1610 break;
1611 }
1612 }
1613
1614 if (match)
1615 return match;
1616 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1617 {
1618 mismatch:
1619 i.error = operand_size_mismatch;
1620 return 0;
1621 }
1622
1623 /* Check reverse. */
1624 gas_assert (i.operands == 2);
1625
1626 match = 1;
1627 for (j = 0; j < 2; j++)
1628 {
1629 if (t->operand_types[j].bitfield.acc
1630 && !match_reg_size (t, j ? 0 : 1))
1631 goto mismatch;
1632
1633 if (i.types[j].bitfield.mem
1634 && !match_mem_size (t, j ? 0 : 1))
1635 goto mismatch;
1636 }
1637
1638 return match;
1639 }
1640
1641 static INLINE int
1642 operand_type_match (i386_operand_type overlap,
1643 i386_operand_type given)
1644 {
1645 i386_operand_type temp = overlap;
1646
1647 temp.bitfield.jumpabsolute = 0;
1648 temp.bitfield.unspecified = 0;
1649 temp.bitfield.byte = 0;
1650 temp.bitfield.word = 0;
1651 temp.bitfield.dword = 0;
1652 temp.bitfield.fword = 0;
1653 temp.bitfield.qword = 0;
1654 temp.bitfield.tbyte = 0;
1655 temp.bitfield.xmmword = 0;
1656 temp.bitfield.ymmword = 0;
1657 if (operand_type_all_zero (&temp))
1658 goto mismatch;
1659
1660 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1661 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1662 return 1;
1663
1664 mismatch:
1665 i.error = operand_type_mismatch;
1666 return 0;
1667 }
1668
1669 /* If given types g0 and g1 are registers they must be of the same type
1670 unless the expected operand type register overlap is null.
1671 Note that Acc in a template matches every size of reg. */
1672
1673 static INLINE int
1674 operand_type_register_match (i386_operand_type m0,
1675 i386_operand_type g0,
1676 i386_operand_type t0,
1677 i386_operand_type m1,
1678 i386_operand_type g1,
1679 i386_operand_type t1)
1680 {
1681 if (!operand_type_check (g0, reg))
1682 return 1;
1683
1684 if (!operand_type_check (g1, reg))
1685 return 1;
1686
1687 if (g0.bitfield.reg8 == g1.bitfield.reg8
1688 && g0.bitfield.reg16 == g1.bitfield.reg16
1689 && g0.bitfield.reg32 == g1.bitfield.reg32
1690 && g0.bitfield.reg64 == g1.bitfield.reg64)
1691 return 1;
1692
1693 if (m0.bitfield.acc)
1694 {
1695 t0.bitfield.reg8 = 1;
1696 t0.bitfield.reg16 = 1;
1697 t0.bitfield.reg32 = 1;
1698 t0.bitfield.reg64 = 1;
1699 }
1700
1701 if (m1.bitfield.acc)
1702 {
1703 t1.bitfield.reg8 = 1;
1704 t1.bitfield.reg16 = 1;
1705 t1.bitfield.reg32 = 1;
1706 t1.bitfield.reg64 = 1;
1707 }
1708
1709 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1710 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1711 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1712 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1713 return 1;
1714
1715 i.error = register_type_mismatch;
1716
1717 return 0;
1718 }
1719
1720 static INLINE unsigned int
1721 mode_from_disp_size (i386_operand_type t)
1722 {
1723 if (t.bitfield.disp8)
1724 return 1;
1725 else if (t.bitfield.disp16
1726 || t.bitfield.disp32
1727 || t.bitfield.disp32s)
1728 return 2;
1729 else
1730 return 0;
1731 }
1732
1733 static INLINE int
1734 fits_in_signed_byte (offsetT num)
1735 {
1736 return (num >= -128) && (num <= 127);
1737 }
1738
1739 static INLINE int
1740 fits_in_unsigned_byte (offsetT num)
1741 {
1742 return (num & 0xff) == num;
1743 }
1744
1745 static INLINE int
1746 fits_in_unsigned_word (offsetT num)
1747 {
1748 return (num & 0xffff) == num;
1749 }
1750
1751 static INLINE int
1752 fits_in_signed_word (offsetT num)
1753 {
1754 return (-32768 <= num) && (num <= 32767);
1755 }
1756
1757 static INLINE int
1758 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1759 {
1760 #ifndef BFD64
1761 return 1;
1762 #else
1763 return (!(((offsetT) -1 << 31) & num)
1764 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1765 #endif
1766 } /* fits_in_signed_long() */
1767
1768 static INLINE int
1769 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1770 {
1771 #ifndef BFD64
1772 return 1;
1773 #else
1774 return (num & (((offsetT) 2 << 31) - 1)) == num;
1775 #endif
1776 } /* fits_in_unsigned_long() */
1777
1778 static INLINE int
1779 fits_in_imm4 (offsetT num)
1780 {
1781 return (num & 0xf) == num;
1782 }
1783
1784 static i386_operand_type
1785 smallest_imm_type (offsetT num)
1786 {
1787 i386_operand_type t;
1788
1789 operand_type_set (&t, 0);
1790 t.bitfield.imm64 = 1;
1791
1792 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1793 {
1794 /* This code is disabled on the 486 because all the Imm1 forms
1795 in the opcode table are slower on the i486. They're the
1796 versions with the implicitly specified single-position
1797 displacement, which has another syntax if you really want to
1798 use that form. */
1799 t.bitfield.imm1 = 1;
1800 t.bitfield.imm8 = 1;
1801 t.bitfield.imm8s = 1;
1802 t.bitfield.imm16 = 1;
1803 t.bitfield.imm32 = 1;
1804 t.bitfield.imm32s = 1;
1805 }
1806 else if (fits_in_signed_byte (num))
1807 {
1808 t.bitfield.imm8 = 1;
1809 t.bitfield.imm8s = 1;
1810 t.bitfield.imm16 = 1;
1811 t.bitfield.imm32 = 1;
1812 t.bitfield.imm32s = 1;
1813 }
1814 else if (fits_in_unsigned_byte (num))
1815 {
1816 t.bitfield.imm8 = 1;
1817 t.bitfield.imm16 = 1;
1818 t.bitfield.imm32 = 1;
1819 t.bitfield.imm32s = 1;
1820 }
1821 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1822 {
1823 t.bitfield.imm16 = 1;
1824 t.bitfield.imm32 = 1;
1825 t.bitfield.imm32s = 1;
1826 }
1827 else if (fits_in_signed_long (num))
1828 {
1829 t.bitfield.imm32 = 1;
1830 t.bitfield.imm32s = 1;
1831 }
1832 else if (fits_in_unsigned_long (num))
1833 t.bitfield.imm32 = 1;
1834
1835 return t;
1836 }
1837
1838 static offsetT
1839 offset_in_range (offsetT val, int size)
1840 {
1841 addressT mask;
1842
1843 switch (size)
1844 {
1845 case 1: mask = ((addressT) 1 << 8) - 1; break;
1846 case 2: mask = ((addressT) 1 << 16) - 1; break;
1847 case 4: mask = ((addressT) 2 << 31) - 1; break;
1848 #ifdef BFD64
1849 case 8: mask = ((addressT) 2 << 63) - 1; break;
1850 #endif
1851 default: abort ();
1852 }
1853
1854 #ifdef BFD64
1855 /* If BFD64, sign extend val for 32bit address mode. */
1856 if (flag_code != CODE_64BIT
1857 || i.prefix[ADDR_PREFIX])
1858 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1859 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1860 #endif
1861
1862 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1863 {
1864 char buf1[40], buf2[40];
1865
1866 sprint_value (buf1, val);
1867 sprint_value (buf2, val & mask);
1868 as_warn (_("%s shortened to %s"), buf1, buf2);
1869 }
1870 return val & mask;
1871 }
1872
1873 enum PREFIX_GROUP
1874 {
1875 PREFIX_EXIST = 0,
1876 PREFIX_LOCK,
1877 PREFIX_REP,
1878 PREFIX_OTHER
1879 };
1880
1881 /* Returns
1882 a. PREFIX_EXIST if attempting to add a prefix where one from the
1883 same class already exists.
1884 b. PREFIX_LOCK if lock prefix is added.
1885 c. PREFIX_REP if rep/repne prefix is added.
1886 d. PREFIX_OTHER if other prefix is added.
1887 */
1888
1889 static enum PREFIX_GROUP
1890 add_prefix (unsigned int prefix)
1891 {
1892 enum PREFIX_GROUP ret = PREFIX_OTHER;
1893 unsigned int q;
1894
1895 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1896 && flag_code == CODE_64BIT)
1897 {
1898 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1899 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1900 && (prefix & (REX_R | REX_X | REX_B))))
1901 ret = PREFIX_EXIST;
1902 q = REX_PREFIX;
1903 }
1904 else
1905 {
1906 switch (prefix)
1907 {
1908 default:
1909 abort ();
1910
1911 case CS_PREFIX_OPCODE:
1912 case DS_PREFIX_OPCODE:
1913 case ES_PREFIX_OPCODE:
1914 case FS_PREFIX_OPCODE:
1915 case GS_PREFIX_OPCODE:
1916 case SS_PREFIX_OPCODE:
1917 q = SEG_PREFIX;
1918 break;
1919
1920 case REPNE_PREFIX_OPCODE:
1921 case REPE_PREFIX_OPCODE:
1922 q = REP_PREFIX;
1923 ret = PREFIX_REP;
1924 break;
1925
1926 case LOCK_PREFIX_OPCODE:
1927 q = LOCK_PREFIX;
1928 ret = PREFIX_LOCK;
1929 break;
1930
1931 case FWAIT_OPCODE:
1932 q = WAIT_PREFIX;
1933 break;
1934
1935 case ADDR_PREFIX_OPCODE:
1936 q = ADDR_PREFIX;
1937 break;
1938
1939 case DATA_PREFIX_OPCODE:
1940 q = DATA_PREFIX;
1941 break;
1942 }
1943 if (i.prefix[q] != 0)
1944 ret = PREFIX_EXIST;
1945 }
1946
1947 if (ret)
1948 {
1949 if (!i.prefix[q])
1950 ++i.prefixes;
1951 i.prefix[q] |= prefix;
1952 }
1953 else
1954 as_bad (_("same type of prefix used twice"));
1955
1956 return ret;
1957 }
1958
1959 static void
1960 update_code_flag (int value, int check)
1961 {
1962 PRINTF_LIKE ((*as_error));
1963
1964 flag_code = (enum flag_code) value;
1965 if (flag_code == CODE_64BIT)
1966 {
1967 cpu_arch_flags.bitfield.cpu64 = 1;
1968 cpu_arch_flags.bitfield.cpuno64 = 0;
1969 }
1970 else
1971 {
1972 cpu_arch_flags.bitfield.cpu64 = 0;
1973 cpu_arch_flags.bitfield.cpuno64 = 1;
1974 }
1975 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
1976 {
1977 if (check)
1978 as_error = as_fatal;
1979 else
1980 as_error = as_bad;
1981 (*as_error) (_("64bit mode not supported on `%s'."),
1982 cpu_arch_name ? cpu_arch_name : default_arch);
1983 }
1984 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
1985 {
1986 if (check)
1987 as_error = as_fatal;
1988 else
1989 as_error = as_bad;
1990 (*as_error) (_("32bit mode not supported on `%s'."),
1991 cpu_arch_name ? cpu_arch_name : default_arch);
1992 }
1993 stackop_size = '\0';
1994 }
1995
1996 static void
1997 set_code_flag (int value)
1998 {
1999 update_code_flag (value, 0);
2000 }
2001
2002 static void
2003 set_16bit_gcc_code_flag (int new_code_flag)
2004 {
2005 flag_code = (enum flag_code) new_code_flag;
2006 if (flag_code != CODE_16BIT)
2007 abort ();
2008 cpu_arch_flags.bitfield.cpu64 = 0;
2009 cpu_arch_flags.bitfield.cpuno64 = 1;
2010 stackop_size = LONG_MNEM_SUFFIX;
2011 }
2012
2013 static void
2014 set_intel_syntax (int syntax_flag)
2015 {
2016 /* Find out if register prefixing is specified. */
2017 int ask_naked_reg = 0;
2018
2019 SKIP_WHITESPACE ();
2020 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2021 {
2022 char *string = input_line_pointer;
2023 int e = get_symbol_end ();
2024
2025 if (strcmp (string, "prefix") == 0)
2026 ask_naked_reg = 1;
2027 else if (strcmp (string, "noprefix") == 0)
2028 ask_naked_reg = -1;
2029 else
2030 as_bad (_("bad argument to syntax directive."));
2031 *input_line_pointer = e;
2032 }
2033 demand_empty_rest_of_line ();
2034
2035 intel_syntax = syntax_flag;
2036
2037 if (ask_naked_reg == 0)
2038 allow_naked_reg = (intel_syntax
2039 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2040 else
2041 allow_naked_reg = (ask_naked_reg < 0);
2042
2043 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2044
2045 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2046 identifier_chars['$'] = intel_syntax ? '$' : 0;
2047 register_prefix = allow_naked_reg ? "" : "%";
2048 }
2049
2050 static void
2051 set_intel_mnemonic (int mnemonic_flag)
2052 {
2053 intel_mnemonic = mnemonic_flag;
2054 }
2055
2056 static void
2057 set_allow_index_reg (int flag)
2058 {
2059 allow_index_reg = flag;
2060 }
2061
2062 static void
2063 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2064 {
2065 SKIP_WHITESPACE ();
2066
2067 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2068 {
2069 char *string = input_line_pointer;
2070 int e = get_symbol_end ();
2071
2072 if (strcmp (string, "none") == 0)
2073 sse_check = sse_check_none;
2074 else if (strcmp (string, "warning") == 0)
2075 sse_check = sse_check_warning;
2076 else if (strcmp (string, "error") == 0)
2077 sse_check = sse_check_error;
2078 else
2079 as_bad (_("bad argument to sse_check directive."));
2080 *input_line_pointer = e;
2081 }
2082 else
2083 as_bad (_("missing argument for sse_check directive"));
2084
2085 demand_empty_rest_of_line ();
2086 }
2087
2088 static void
2089 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2090 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2091 {
2092 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2093 static const char *arch;
2094
2095 /* Intel LIOM is only supported on ELF. */
2096 if (!IS_ELF)
2097 return;
2098
2099 if (!arch)
2100 {
2101 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2102 use default_arch. */
2103 arch = cpu_arch_name;
2104 if (!arch)
2105 arch = default_arch;
2106 }
2107
2108 /* If we are targeting Intel L1OM, we must enable it. */
2109 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2110 || new_flag.bitfield.cpul1om)
2111 return;
2112
2113 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2114 #endif
2115 }
2116
2117 static void
2118 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2119 {
2120 SKIP_WHITESPACE ();
2121
2122 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2123 {
2124 char *string = input_line_pointer;
2125 int e = get_symbol_end ();
2126 unsigned int j;
2127 i386_cpu_flags flags;
2128
2129 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2130 {
2131 if (strcmp (string, cpu_arch[j].name) == 0)
2132 {
2133 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2134
2135 if (*string != '.')
2136 {
2137 cpu_arch_name = cpu_arch[j].name;
2138 cpu_sub_arch_name = NULL;
2139 cpu_arch_flags = cpu_arch[j].flags;
2140 if (flag_code == CODE_64BIT)
2141 {
2142 cpu_arch_flags.bitfield.cpu64 = 1;
2143 cpu_arch_flags.bitfield.cpuno64 = 0;
2144 }
2145 else
2146 {
2147 cpu_arch_flags.bitfield.cpu64 = 0;
2148 cpu_arch_flags.bitfield.cpuno64 = 1;
2149 }
2150 cpu_arch_isa = cpu_arch[j].type;
2151 cpu_arch_isa_flags = cpu_arch[j].flags;
2152 if (!cpu_arch_tune_set)
2153 {
2154 cpu_arch_tune = cpu_arch_isa;
2155 cpu_arch_tune_flags = cpu_arch_isa_flags;
2156 }
2157 break;
2158 }
2159
2160 if (!cpu_arch[j].negated)
2161 flags = cpu_flags_or (cpu_arch_flags,
2162 cpu_arch[j].flags);
2163 else
2164 flags = cpu_flags_and_not (cpu_arch_flags,
2165 cpu_arch[j].flags);
2166 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2167 {
2168 if (cpu_sub_arch_name)
2169 {
2170 char *name = cpu_sub_arch_name;
2171 cpu_sub_arch_name = concat (name,
2172 cpu_arch[j].name,
2173 (const char *) NULL);
2174 free (name);
2175 }
2176 else
2177 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2178 cpu_arch_flags = flags;
2179 }
2180 *input_line_pointer = e;
2181 demand_empty_rest_of_line ();
2182 return;
2183 }
2184 }
2185 if (j >= ARRAY_SIZE (cpu_arch))
2186 as_bad (_("no such architecture: `%s'"), string);
2187
2188 *input_line_pointer = e;
2189 }
2190 else
2191 as_bad (_("missing cpu architecture"));
2192
2193 no_cond_jump_promotion = 0;
2194 if (*input_line_pointer == ','
2195 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2196 {
2197 char *string = ++input_line_pointer;
2198 int e = get_symbol_end ();
2199
2200 if (strcmp (string, "nojumps") == 0)
2201 no_cond_jump_promotion = 1;
2202 else if (strcmp (string, "jumps") == 0)
2203 ;
2204 else
2205 as_bad (_("no such architecture modifier: `%s'"), string);
2206
2207 *input_line_pointer = e;
2208 }
2209
2210 demand_empty_rest_of_line ();
2211 }
2212
2213 enum bfd_architecture
2214 i386_arch (void)
2215 {
2216 if (cpu_arch_isa == PROCESSOR_L1OM)
2217 {
2218 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2219 || flag_code != CODE_64BIT)
2220 as_fatal (_("Intel L1OM is 64bit ELF only"));
2221 return bfd_arch_l1om;
2222 }
2223 else
2224 return bfd_arch_i386;
2225 }
2226
2227 unsigned long
2228 i386_mach ()
2229 {
2230 if (!strncmp (default_arch, "x86_64", 6))
2231 {
2232 if (cpu_arch_isa == PROCESSOR_L1OM)
2233 {
2234 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2235 || default_arch[6] != '\0')
2236 as_fatal (_("Intel L1OM is 64bit ELF only"));
2237 return bfd_mach_l1om;
2238 }
2239 else if (default_arch[6] == '\0')
2240 return bfd_mach_x86_64;
2241 else
2242 return bfd_mach_x64_32;
2243 }
2244 else if (!strcmp (default_arch, "i386"))
2245 return bfd_mach_i386_i386;
2246 else
2247 as_fatal (_("Unknown architecture"));
2248 }
2249 \f
2250 void
2251 md_begin ()
2252 {
2253 const char *hash_err;
2254
2255 /* Initialize op_hash hash table. */
2256 op_hash = hash_new ();
2257
2258 {
2259 const insn_template *optab;
2260 templates *core_optab;
2261
2262 /* Setup for loop. */
2263 optab = i386_optab;
2264 core_optab = (templates *) xmalloc (sizeof (templates));
2265 core_optab->start = optab;
2266
2267 while (1)
2268 {
2269 ++optab;
2270 if (optab->name == NULL
2271 || strcmp (optab->name, (optab - 1)->name) != 0)
2272 {
2273 /* different name --> ship out current template list;
2274 add to hash table; & begin anew. */
2275 core_optab->end = optab;
2276 hash_err = hash_insert (op_hash,
2277 (optab - 1)->name,
2278 (void *) core_optab);
2279 if (hash_err)
2280 {
2281 as_fatal (_("Internal Error: Can't hash %s: %s"),
2282 (optab - 1)->name,
2283 hash_err);
2284 }
2285 if (optab->name == NULL)
2286 break;
2287 core_optab = (templates *) xmalloc (sizeof (templates));
2288 core_optab->start = optab;
2289 }
2290 }
2291 }
2292
2293 /* Initialize reg_hash hash table. */
2294 reg_hash = hash_new ();
2295 {
2296 const reg_entry *regtab;
2297 unsigned int regtab_size = i386_regtab_size;
2298
2299 for (regtab = i386_regtab; regtab_size--; regtab++)
2300 {
2301 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2302 if (hash_err)
2303 as_fatal (_("Internal Error: Can't hash %s: %s"),
2304 regtab->reg_name,
2305 hash_err);
2306 }
2307 }
2308
2309 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2310 {
2311 int c;
2312 char *p;
2313
2314 for (c = 0; c < 256; c++)
2315 {
2316 if (ISDIGIT (c))
2317 {
2318 digit_chars[c] = c;
2319 mnemonic_chars[c] = c;
2320 register_chars[c] = c;
2321 operand_chars[c] = c;
2322 }
2323 else if (ISLOWER (c))
2324 {
2325 mnemonic_chars[c] = c;
2326 register_chars[c] = c;
2327 operand_chars[c] = c;
2328 }
2329 else if (ISUPPER (c))
2330 {
2331 mnemonic_chars[c] = TOLOWER (c);
2332 register_chars[c] = mnemonic_chars[c];
2333 operand_chars[c] = c;
2334 }
2335
2336 if (ISALPHA (c) || ISDIGIT (c))
2337 identifier_chars[c] = c;
2338 else if (c >= 128)
2339 {
2340 identifier_chars[c] = c;
2341 operand_chars[c] = c;
2342 }
2343 }
2344
2345 #ifdef LEX_AT
2346 identifier_chars['@'] = '@';
2347 #endif
2348 #ifdef LEX_QM
2349 identifier_chars['?'] = '?';
2350 operand_chars['?'] = '?';
2351 #endif
2352 digit_chars['-'] = '-';
2353 mnemonic_chars['_'] = '_';
2354 mnemonic_chars['-'] = '-';
2355 mnemonic_chars['.'] = '.';
2356 identifier_chars['_'] = '_';
2357 identifier_chars['.'] = '.';
2358
2359 for (p = operand_special_chars; *p != '\0'; p++)
2360 operand_chars[(unsigned char) *p] = *p;
2361 }
2362
2363 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2364 if (IS_ELF)
2365 {
2366 record_alignment (text_section, 2);
2367 record_alignment (data_section, 2);
2368 record_alignment (bss_section, 2);
2369 }
2370 #endif
2371
2372 if (flag_code == CODE_64BIT)
2373 {
2374 x86_dwarf2_return_column = 16;
2375 x86_cie_data_alignment = -8;
2376 }
2377 else
2378 {
2379 x86_dwarf2_return_column = 8;
2380 x86_cie_data_alignment = -4;
2381 }
2382 }
2383
2384 void
2385 i386_print_statistics (FILE *file)
2386 {
2387 hash_print_statistics (file, "i386 opcode", op_hash);
2388 hash_print_statistics (file, "i386 register", reg_hash);
2389 }
2390 \f
2391 #ifdef DEBUG386
2392
2393 /* Debugging routines for md_assemble. */
2394 static void pte (insn_template *);
2395 static void pt (i386_operand_type);
2396 static void pe (expressionS *);
2397 static void ps (symbolS *);
2398
2399 static void
2400 pi (char *line, i386_insn *x)
2401 {
2402 unsigned int j;
2403
2404 fprintf (stdout, "%s: template ", line);
2405 pte (&x->tm);
2406 fprintf (stdout, " address: base %s index %s scale %x\n",
2407 x->base_reg ? x->base_reg->reg_name : "none",
2408 x->index_reg ? x->index_reg->reg_name : "none",
2409 x->log2_scale_factor);
2410 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2411 x->rm.mode, x->rm.reg, x->rm.regmem);
2412 fprintf (stdout, " sib: base %x index %x scale %x\n",
2413 x->sib.base, x->sib.index, x->sib.scale);
2414 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2415 (x->rex & REX_W) != 0,
2416 (x->rex & REX_R) != 0,
2417 (x->rex & REX_X) != 0,
2418 (x->rex & REX_B) != 0);
2419 for (j = 0; j < x->operands; j++)
2420 {
2421 fprintf (stdout, " #%d: ", j + 1);
2422 pt (x->types[j]);
2423 fprintf (stdout, "\n");
2424 if (x->types[j].bitfield.reg8
2425 || x->types[j].bitfield.reg16
2426 || x->types[j].bitfield.reg32
2427 || x->types[j].bitfield.reg64
2428 || x->types[j].bitfield.regmmx
2429 || x->types[j].bitfield.regxmm
2430 || x->types[j].bitfield.regymm
2431 || x->types[j].bitfield.sreg2
2432 || x->types[j].bitfield.sreg3
2433 || x->types[j].bitfield.control
2434 || x->types[j].bitfield.debug
2435 || x->types[j].bitfield.test)
2436 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2437 if (operand_type_check (x->types[j], imm))
2438 pe (x->op[j].imms);
2439 if (operand_type_check (x->types[j], disp))
2440 pe (x->op[j].disps);
2441 }
2442 }
2443
2444 static void
2445 pte (insn_template *t)
2446 {
2447 unsigned int j;
2448 fprintf (stdout, " %d operands ", t->operands);
2449 fprintf (stdout, "opcode %x ", t->base_opcode);
2450 if (t->extension_opcode != None)
2451 fprintf (stdout, "ext %x ", t->extension_opcode);
2452 if (t->opcode_modifier.d)
2453 fprintf (stdout, "D");
2454 if (t->opcode_modifier.w)
2455 fprintf (stdout, "W");
2456 fprintf (stdout, "\n");
2457 for (j = 0; j < t->operands; j++)
2458 {
2459 fprintf (stdout, " #%d type ", j + 1);
2460 pt (t->operand_types[j]);
2461 fprintf (stdout, "\n");
2462 }
2463 }
2464
2465 static void
2466 pe (expressionS *e)
2467 {
2468 fprintf (stdout, " operation %d\n", e->X_op);
2469 fprintf (stdout, " add_number %ld (%lx)\n",
2470 (long) e->X_add_number, (long) e->X_add_number);
2471 if (e->X_add_symbol)
2472 {
2473 fprintf (stdout, " add_symbol ");
2474 ps (e->X_add_symbol);
2475 fprintf (stdout, "\n");
2476 }
2477 if (e->X_op_symbol)
2478 {
2479 fprintf (stdout, " op_symbol ");
2480 ps (e->X_op_symbol);
2481 fprintf (stdout, "\n");
2482 }
2483 }
2484
2485 static void
2486 ps (symbolS *s)
2487 {
2488 fprintf (stdout, "%s type %s%s",
2489 S_GET_NAME (s),
2490 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2491 segment_name (S_GET_SEGMENT (s)));
2492 }
2493
2494 static struct type_name
2495 {
2496 i386_operand_type mask;
2497 const char *name;
2498 }
2499 const type_names[] =
2500 {
2501 { OPERAND_TYPE_REG8, "r8" },
2502 { OPERAND_TYPE_REG16, "r16" },
2503 { OPERAND_TYPE_REG32, "r32" },
2504 { OPERAND_TYPE_REG64, "r64" },
2505 { OPERAND_TYPE_IMM8, "i8" },
2506 { OPERAND_TYPE_IMM8, "i8s" },
2507 { OPERAND_TYPE_IMM16, "i16" },
2508 { OPERAND_TYPE_IMM32, "i32" },
2509 { OPERAND_TYPE_IMM32S, "i32s" },
2510 { OPERAND_TYPE_IMM64, "i64" },
2511 { OPERAND_TYPE_IMM1, "i1" },
2512 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2513 { OPERAND_TYPE_DISP8, "d8" },
2514 { OPERAND_TYPE_DISP16, "d16" },
2515 { OPERAND_TYPE_DISP32, "d32" },
2516 { OPERAND_TYPE_DISP32S, "d32s" },
2517 { OPERAND_TYPE_DISP64, "d64" },
2518 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2519 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2520 { OPERAND_TYPE_CONTROL, "control reg" },
2521 { OPERAND_TYPE_TEST, "test reg" },
2522 { OPERAND_TYPE_DEBUG, "debug reg" },
2523 { OPERAND_TYPE_FLOATREG, "FReg" },
2524 { OPERAND_TYPE_FLOATACC, "FAcc" },
2525 { OPERAND_TYPE_SREG2, "SReg2" },
2526 { OPERAND_TYPE_SREG3, "SReg3" },
2527 { OPERAND_TYPE_ACC, "Acc" },
2528 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2529 { OPERAND_TYPE_REGMMX, "rMMX" },
2530 { OPERAND_TYPE_REGXMM, "rXMM" },
2531 { OPERAND_TYPE_REGYMM, "rYMM" },
2532 { OPERAND_TYPE_ESSEG, "es" },
2533 };
2534
2535 static void
2536 pt (i386_operand_type t)
2537 {
2538 unsigned int j;
2539 i386_operand_type a;
2540
2541 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2542 {
2543 a = operand_type_and (t, type_names[j].mask);
2544 if (!operand_type_all_zero (&a))
2545 fprintf (stdout, "%s, ", type_names[j].name);
2546 }
2547 fflush (stdout);
2548 }
2549
2550 #endif /* DEBUG386 */
2551 \f
2552 static bfd_reloc_code_real_type
2553 reloc (unsigned int size,
2554 int pcrel,
2555 int sign,
2556 bfd_reloc_code_real_type other)
2557 {
2558 if (other != NO_RELOC)
2559 {
2560 reloc_howto_type *rel;
2561
2562 if (size == 8)
2563 switch (other)
2564 {
2565 case BFD_RELOC_X86_64_GOT32:
2566 return BFD_RELOC_X86_64_GOT64;
2567 break;
2568 case BFD_RELOC_X86_64_PLTOFF64:
2569 return BFD_RELOC_X86_64_PLTOFF64;
2570 break;
2571 case BFD_RELOC_X86_64_GOTPC32:
2572 other = BFD_RELOC_X86_64_GOTPC64;
2573 break;
2574 case BFD_RELOC_X86_64_GOTPCREL:
2575 other = BFD_RELOC_X86_64_GOTPCREL64;
2576 break;
2577 case BFD_RELOC_X86_64_TPOFF32:
2578 other = BFD_RELOC_X86_64_TPOFF64;
2579 break;
2580 case BFD_RELOC_X86_64_DTPOFF32:
2581 other = BFD_RELOC_X86_64_DTPOFF64;
2582 break;
2583 default:
2584 break;
2585 }
2586
2587 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2588 if (size == 4 && flag_code != CODE_64BIT)
2589 sign = -1;
2590
2591 rel = bfd_reloc_type_lookup (stdoutput, other);
2592 if (!rel)
2593 as_bad (_("unknown relocation (%u)"), other);
2594 else if (size != bfd_get_reloc_size (rel))
2595 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2596 bfd_get_reloc_size (rel),
2597 size);
2598 else if (pcrel && !rel->pc_relative)
2599 as_bad (_("non-pc-relative relocation for pc-relative field"));
2600 else if ((rel->complain_on_overflow == complain_overflow_signed
2601 && !sign)
2602 || (rel->complain_on_overflow == complain_overflow_unsigned
2603 && sign > 0))
2604 as_bad (_("relocated field and relocation type differ in signedness"));
2605 else
2606 return other;
2607 return NO_RELOC;
2608 }
2609
2610 if (pcrel)
2611 {
2612 if (!sign)
2613 as_bad (_("there are no unsigned pc-relative relocations"));
2614 switch (size)
2615 {
2616 case 1: return BFD_RELOC_8_PCREL;
2617 case 2: return BFD_RELOC_16_PCREL;
2618 case 4: return BFD_RELOC_32_PCREL;
2619 case 8: return BFD_RELOC_64_PCREL;
2620 }
2621 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2622 }
2623 else
2624 {
2625 if (sign > 0)
2626 switch (size)
2627 {
2628 case 4: return BFD_RELOC_X86_64_32S;
2629 }
2630 else
2631 switch (size)
2632 {
2633 case 1: return BFD_RELOC_8;
2634 case 2: return BFD_RELOC_16;
2635 case 4: return BFD_RELOC_32;
2636 case 8: return BFD_RELOC_64;
2637 }
2638 as_bad (_("cannot do %s %u byte relocation"),
2639 sign > 0 ? "signed" : "unsigned", size);
2640 }
2641
2642 return NO_RELOC;
2643 }
2644
2645 /* Here we decide which fixups can be adjusted to make them relative to
2646 the beginning of the section instead of the symbol. Basically we need
2647 to make sure that the dynamic relocations are done correctly, so in
2648 some cases we force the original symbol to be used. */
2649
2650 int
2651 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2652 {
2653 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2654 if (!IS_ELF)
2655 return 1;
2656
2657 /* Don't adjust pc-relative references to merge sections in 64-bit
2658 mode. */
2659 if (use_rela_relocations
2660 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2661 && fixP->fx_pcrel)
2662 return 0;
2663
2664 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2665 and changed later by validate_fix. */
2666 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2667 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2668 return 0;
2669
2670 /* adjust_reloc_syms doesn't know about the GOT. */
2671 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2672 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2673 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2674 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2675 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2676 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2677 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2678 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2679 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2680 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2681 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2682 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2683 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2684 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2685 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2686 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2687 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2688 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2689 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2690 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2691 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2692 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2693 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2694 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2695 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2696 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2697 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2698 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2699 return 0;
2700 #endif
2701 return 1;
2702 }
2703
2704 static int
2705 intel_float_operand (const char *mnemonic)
2706 {
2707 /* Note that the value returned is meaningful only for opcodes with (memory)
2708 operands, hence the code here is free to improperly handle opcodes that
2709 have no operands (for better performance and smaller code). */
2710
2711 if (mnemonic[0] != 'f')
2712 return 0; /* non-math */
2713
2714 switch (mnemonic[1])
2715 {
2716 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2717 the fs segment override prefix not currently handled because no
2718 call path can make opcodes without operands get here */
2719 case 'i':
2720 return 2 /* integer op */;
2721 case 'l':
2722 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2723 return 3; /* fldcw/fldenv */
2724 break;
2725 case 'n':
2726 if (mnemonic[2] != 'o' /* fnop */)
2727 return 3; /* non-waiting control op */
2728 break;
2729 case 'r':
2730 if (mnemonic[2] == 's')
2731 return 3; /* frstor/frstpm */
2732 break;
2733 case 's':
2734 if (mnemonic[2] == 'a')
2735 return 3; /* fsave */
2736 if (mnemonic[2] == 't')
2737 {
2738 switch (mnemonic[3])
2739 {
2740 case 'c': /* fstcw */
2741 case 'd': /* fstdw */
2742 case 'e': /* fstenv */
2743 case 's': /* fsts[gw] */
2744 return 3;
2745 }
2746 }
2747 break;
2748 case 'x':
2749 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2750 return 0; /* fxsave/fxrstor are not really math ops */
2751 break;
2752 }
2753
2754 return 1;
2755 }
2756
2757 /* Build the VEX prefix. */
2758
2759 static void
2760 build_vex_prefix (const insn_template *t)
2761 {
2762 unsigned int register_specifier;
2763 unsigned int implied_prefix;
2764 unsigned int vector_length;
2765
2766 /* Check register specifier. */
2767 if (i.vex.register_specifier)
2768 {
2769 register_specifier = i.vex.register_specifier->reg_num;
2770 if ((i.vex.register_specifier->reg_flags & RegRex))
2771 register_specifier += 8;
2772 register_specifier = ~register_specifier & 0xf;
2773 }
2774 else
2775 register_specifier = 0xf;
2776
2777 /* Use 2-byte VEX prefix by swappping destination and source
2778 operand. */
2779 if (!i.swap_operand
2780 && i.operands == i.reg_operands
2781 && i.tm.opcode_modifier.vexopcode == VEX0F
2782 && i.tm.opcode_modifier.s
2783 && i.rex == REX_B)
2784 {
2785 unsigned int xchg = i.operands - 1;
2786 union i386_op temp_op;
2787 i386_operand_type temp_type;
2788
2789 temp_type = i.types[xchg];
2790 i.types[xchg] = i.types[0];
2791 i.types[0] = temp_type;
2792 temp_op = i.op[xchg];
2793 i.op[xchg] = i.op[0];
2794 i.op[0] = temp_op;
2795
2796 gas_assert (i.rm.mode == 3);
2797
2798 i.rex = REX_R;
2799 xchg = i.rm.regmem;
2800 i.rm.regmem = i.rm.reg;
2801 i.rm.reg = xchg;
2802
2803 /* Use the next insn. */
2804 i.tm = t[1];
2805 }
2806
2807 if (i.tm.opcode_modifier.vex == VEXScalar)
2808 vector_length = avxscalar;
2809 else
2810 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2811
2812 switch ((i.tm.base_opcode >> 8) & 0xff)
2813 {
2814 case 0:
2815 implied_prefix = 0;
2816 break;
2817 case DATA_PREFIX_OPCODE:
2818 implied_prefix = 1;
2819 break;
2820 case REPE_PREFIX_OPCODE:
2821 implied_prefix = 2;
2822 break;
2823 case REPNE_PREFIX_OPCODE:
2824 implied_prefix = 3;
2825 break;
2826 default:
2827 abort ();
2828 }
2829
2830 /* Use 2-byte VEX prefix if possible. */
2831 if (i.tm.opcode_modifier.vexopcode == VEX0F
2832 && i.tm.opcode_modifier.vexw != VEXW1
2833 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2834 {
2835 /* 2-byte VEX prefix. */
2836 unsigned int r;
2837
2838 i.vex.length = 2;
2839 i.vex.bytes[0] = 0xc5;
2840
2841 /* Check the REX.R bit. */
2842 r = (i.rex & REX_R) ? 0 : 1;
2843 i.vex.bytes[1] = (r << 7
2844 | register_specifier << 3
2845 | vector_length << 2
2846 | implied_prefix);
2847 }
2848 else
2849 {
2850 /* 3-byte VEX prefix. */
2851 unsigned int m, w;
2852
2853 i.vex.length = 3;
2854
2855 switch (i.tm.opcode_modifier.vexopcode)
2856 {
2857 case VEX0F:
2858 m = 0x1;
2859 i.vex.bytes[0] = 0xc4;
2860 break;
2861 case VEX0F38:
2862 m = 0x2;
2863 i.vex.bytes[0] = 0xc4;
2864 break;
2865 case VEX0F3A:
2866 m = 0x3;
2867 i.vex.bytes[0] = 0xc4;
2868 break;
2869 case XOP08:
2870 m = 0x8;
2871 i.vex.bytes[0] = 0x8f;
2872 break;
2873 case XOP09:
2874 m = 0x9;
2875 i.vex.bytes[0] = 0x8f;
2876 break;
2877 case XOP0A:
2878 m = 0xa;
2879 i.vex.bytes[0] = 0x8f;
2880 break;
2881 default:
2882 abort ();
2883 }
2884
2885 /* The high 3 bits of the second VEX byte are 1's compliment
2886 of RXB bits from REX. */
2887 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2888
2889 /* Check the REX.W bit. */
2890 w = (i.rex & REX_W) ? 1 : 0;
2891 if (i.tm.opcode_modifier.vexw)
2892 {
2893 if (w)
2894 abort ();
2895
2896 if (i.tm.opcode_modifier.vexw == VEXW1)
2897 w = 1;
2898 }
2899
2900 i.vex.bytes[2] = (w << 7
2901 | register_specifier << 3
2902 | vector_length << 2
2903 | implied_prefix);
2904 }
2905 }
2906
2907 static void
2908 process_immext (void)
2909 {
2910 expressionS *exp;
2911
2912 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2913 {
2914 /* SSE3 Instructions have the fixed operands with an opcode
2915 suffix which is coded in the same place as an 8-bit immediate
2916 field would be. Here we check those operands and remove them
2917 afterwards. */
2918 unsigned int x;
2919
2920 for (x = 0; x < i.operands; x++)
2921 if (i.op[x].regs->reg_num != x)
2922 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2923 register_prefix, i.op[x].regs->reg_name, x + 1,
2924 i.tm.name);
2925
2926 i.operands = 0;
2927 }
2928
2929 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2930 which is coded in the same place as an 8-bit immediate field
2931 would be. Here we fake an 8-bit immediate operand from the
2932 opcode suffix stored in tm.extension_opcode.
2933
2934 AVX instructions also use this encoding, for some of
2935 3 argument instructions. */
2936
2937 gas_assert (i.imm_operands == 0
2938 && (i.operands <= 2
2939 || (i.tm.opcode_modifier.vex
2940 && i.operands <= 4)));
2941
2942 exp = &im_expressions[i.imm_operands++];
2943 i.op[i.operands].imms = exp;
2944 i.types[i.operands] = imm8;
2945 i.operands++;
2946 exp->X_op = O_constant;
2947 exp->X_add_number = i.tm.extension_opcode;
2948 i.tm.extension_opcode = None;
2949 }
2950
2951 /* This is the guts of the machine-dependent assembler. LINE points to a
2952 machine dependent instruction. This function is supposed to emit
2953 the frags/bytes it assembles to. */
2954
2955 void
2956 md_assemble (char *line)
2957 {
2958 unsigned int j;
2959 char mnemonic[MAX_MNEM_SIZE];
2960 const insn_template *t;
2961
2962 /* Initialize globals. */
2963 memset (&i, '\0', sizeof (i));
2964 for (j = 0; j < MAX_OPERANDS; j++)
2965 i.reloc[j] = NO_RELOC;
2966 memset (disp_expressions, '\0', sizeof (disp_expressions));
2967 memset (im_expressions, '\0', sizeof (im_expressions));
2968 save_stack_p = save_stack;
2969
2970 /* First parse an instruction mnemonic & call i386_operand for the operands.
2971 We assume that the scrubber has arranged it so that line[0] is the valid
2972 start of a (possibly prefixed) mnemonic. */
2973
2974 line = parse_insn (line, mnemonic);
2975 if (line == NULL)
2976 return;
2977
2978 line = parse_operands (line, mnemonic);
2979 this_operand = -1;
2980 if (line == NULL)
2981 return;
2982
2983 /* Now we've parsed the mnemonic into a set of templates, and have the
2984 operands at hand. */
2985
2986 /* All intel opcodes have reversed operands except for "bound" and
2987 "enter". We also don't reverse intersegment "jmp" and "call"
2988 instructions with 2 immediate operands so that the immediate segment
2989 precedes the offset, as it does when in AT&T mode. */
2990 if (intel_syntax
2991 && i.operands > 1
2992 && (strcmp (mnemonic, "bound") != 0)
2993 && (strcmp (mnemonic, "invlpga") != 0)
2994 && !(operand_type_check (i.types[0], imm)
2995 && operand_type_check (i.types[1], imm)))
2996 swap_operands ();
2997
2998 /* The order of the immediates should be reversed
2999 for 2 immediates extrq and insertq instructions */
3000 if (i.imm_operands == 2
3001 && (strcmp (mnemonic, "extrq") == 0
3002 || strcmp (mnemonic, "insertq") == 0))
3003 swap_2_operands (0, 1);
3004
3005 if (i.imm_operands)
3006 optimize_imm ();
3007
3008 /* Don't optimize displacement for movabs since it only takes 64bit
3009 displacement. */
3010 if (i.disp_operands
3011 && !i.disp32_encoding
3012 && (flag_code != CODE_64BIT
3013 || strcmp (mnemonic, "movabs") != 0))
3014 optimize_disp ();
3015
3016 /* Next, we find a template that matches the given insn,
3017 making sure the overlap of the given operands types is consistent
3018 with the template operand types. */
3019
3020 if (!(t = match_template ()))
3021 return;
3022
3023 if (sse_check != sse_check_none
3024 && !i.tm.opcode_modifier.noavx
3025 && (i.tm.cpu_flags.bitfield.cpusse
3026 || i.tm.cpu_flags.bitfield.cpusse2
3027 || i.tm.cpu_flags.bitfield.cpusse3
3028 || i.tm.cpu_flags.bitfield.cpussse3
3029 || i.tm.cpu_flags.bitfield.cpusse4_1
3030 || i.tm.cpu_flags.bitfield.cpusse4_2))
3031 {
3032 (sse_check == sse_check_warning
3033 ? as_warn
3034 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3035 }
3036
3037 /* Zap movzx and movsx suffix. The suffix has been set from
3038 "word ptr" or "byte ptr" on the source operand in Intel syntax
3039 or extracted from mnemonic in AT&T syntax. But we'll use
3040 the destination register to choose the suffix for encoding. */
3041 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3042 {
3043 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3044 there is no suffix, the default will be byte extension. */
3045 if (i.reg_operands != 2
3046 && !i.suffix
3047 && intel_syntax)
3048 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3049
3050 i.suffix = 0;
3051 }
3052
3053 if (i.tm.opcode_modifier.fwait)
3054 if (!add_prefix (FWAIT_OPCODE))
3055 return;
3056
3057 /* Check for lock without a lockable instruction. Destination operand
3058 must be memory unless it is xchg (0x86). */
3059 if (i.prefix[LOCK_PREFIX]
3060 && (!i.tm.opcode_modifier.islockable
3061 || i.mem_operands == 0
3062 || (i.tm.base_opcode != 0x86
3063 && !operand_type_check (i.types[i.operands - 1], anymem))))
3064 {
3065 as_bad (_("expecting lockable instruction after `lock'"));
3066 return;
3067 }
3068
3069 /* Check string instruction segment overrides. */
3070 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3071 {
3072 if (!check_string ())
3073 return;
3074 i.disp_operands = 0;
3075 }
3076
3077 if (!process_suffix ())
3078 return;
3079
3080 /* Update operand types. */
3081 for (j = 0; j < i.operands; j++)
3082 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3083
3084 /* Make still unresolved immediate matches conform to size of immediate
3085 given in i.suffix. */
3086 if (!finalize_imm ())
3087 return;
3088
3089 if (i.types[0].bitfield.imm1)
3090 i.imm_operands = 0; /* kludge for shift insns. */
3091
3092 /* We only need to check those implicit registers for instructions
3093 with 3 operands or less. */
3094 if (i.operands <= 3)
3095 for (j = 0; j < i.operands; j++)
3096 if (i.types[j].bitfield.inoutportreg
3097 || i.types[j].bitfield.shiftcount
3098 || i.types[j].bitfield.acc
3099 || i.types[j].bitfield.floatacc)
3100 i.reg_operands--;
3101
3102 /* ImmExt should be processed after SSE2AVX. */
3103 if (!i.tm.opcode_modifier.sse2avx
3104 && i.tm.opcode_modifier.immext)
3105 process_immext ();
3106
3107 /* For insns with operands there are more diddles to do to the opcode. */
3108 if (i.operands)
3109 {
3110 if (!process_operands ())
3111 return;
3112 }
3113 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3114 {
3115 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3116 as_warn (_("translating to `%sp'"), i.tm.name);
3117 }
3118
3119 if (i.tm.opcode_modifier.vex)
3120 build_vex_prefix (t);
3121
3122 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3123 instructions may define INT_OPCODE as well, so avoid this corner
3124 case for those instructions that use MODRM. */
3125 if (i.tm.base_opcode == INT_OPCODE
3126 && !i.tm.opcode_modifier.modrm
3127 && i.op[0].imms->X_add_number == 3)
3128 {
3129 i.tm.base_opcode = INT3_OPCODE;
3130 i.imm_operands = 0;
3131 }
3132
3133 if ((i.tm.opcode_modifier.jump
3134 || i.tm.opcode_modifier.jumpbyte
3135 || i.tm.opcode_modifier.jumpdword)
3136 && i.op[0].disps->X_op == O_constant)
3137 {
3138 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3139 the absolute address given by the constant. Since ix86 jumps and
3140 calls are pc relative, we need to generate a reloc. */
3141 i.op[0].disps->X_add_symbol = &abs_symbol;
3142 i.op[0].disps->X_op = O_symbol;
3143 }
3144
3145 if (i.tm.opcode_modifier.rex64)
3146 i.rex |= REX_W;
3147
3148 /* For 8 bit registers we need an empty rex prefix. Also if the
3149 instruction already has a prefix, we need to convert old
3150 registers to new ones. */
3151
3152 if ((i.types[0].bitfield.reg8
3153 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3154 || (i.types[1].bitfield.reg8
3155 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3156 || ((i.types[0].bitfield.reg8
3157 || i.types[1].bitfield.reg8)
3158 && i.rex != 0))
3159 {
3160 int x;
3161
3162 i.rex |= REX_OPCODE;
3163 for (x = 0; x < 2; x++)
3164 {
3165 /* Look for 8 bit operand that uses old registers. */
3166 if (i.types[x].bitfield.reg8
3167 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3168 {
3169 /* In case it is "hi" register, give up. */
3170 if (i.op[x].regs->reg_num > 3)
3171 as_bad (_("can't encode register '%s%s' in an "
3172 "instruction requiring REX prefix."),
3173 register_prefix, i.op[x].regs->reg_name);
3174
3175 /* Otherwise it is equivalent to the extended register.
3176 Since the encoding doesn't change this is merely
3177 cosmetic cleanup for debug output. */
3178
3179 i.op[x].regs = i.op[x].regs + 8;
3180 }
3181 }
3182 }
3183
3184 if (i.rex != 0)
3185 add_prefix (REX_OPCODE | i.rex);
3186
3187 /* We are ready to output the insn. */
3188 output_insn ();
3189 }
3190
3191 static char *
3192 parse_insn (char *line, char *mnemonic)
3193 {
3194 char *l = line;
3195 char *token_start = l;
3196 char *mnem_p;
3197 int supported;
3198 const insn_template *t;
3199 char *dot_p = NULL;
3200
3201 /* Non-zero if we found a prefix only acceptable with string insns. */
3202 const char *expecting_string_instruction = NULL;
3203
3204 while (1)
3205 {
3206 mnem_p = mnemonic;
3207 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3208 {
3209 if (*mnem_p == '.')
3210 dot_p = mnem_p;
3211 mnem_p++;
3212 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3213 {
3214 as_bad (_("no such instruction: `%s'"), token_start);
3215 return NULL;
3216 }
3217 l++;
3218 }
3219 if (!is_space_char (*l)
3220 && *l != END_OF_INSN
3221 && (intel_syntax
3222 || (*l != PREFIX_SEPARATOR
3223 && *l != ',')))
3224 {
3225 as_bad (_("invalid character %s in mnemonic"),
3226 output_invalid (*l));
3227 return NULL;
3228 }
3229 if (token_start == l)
3230 {
3231 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3232 as_bad (_("expecting prefix; got nothing"));
3233 else
3234 as_bad (_("expecting mnemonic; got nothing"));
3235 return NULL;
3236 }
3237
3238 /* Look up instruction (or prefix) via hash table. */
3239 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3240
3241 if (*l != END_OF_INSN
3242 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3243 && current_templates
3244 && current_templates->start->opcode_modifier.isprefix)
3245 {
3246 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3247 {
3248 as_bad ((flag_code != CODE_64BIT
3249 ? _("`%s' is only supported in 64-bit mode")
3250 : _("`%s' is not supported in 64-bit mode")),
3251 current_templates->start->name);
3252 return NULL;
3253 }
3254 /* If we are in 16-bit mode, do not allow addr16 or data16.
3255 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3256 if ((current_templates->start->opcode_modifier.size16
3257 || current_templates->start->opcode_modifier.size32)
3258 && flag_code != CODE_64BIT
3259 && (current_templates->start->opcode_modifier.size32
3260 ^ (flag_code == CODE_16BIT)))
3261 {
3262 as_bad (_("redundant %s prefix"),
3263 current_templates->start->name);
3264 return NULL;
3265 }
3266 /* Add prefix, checking for repeated prefixes. */
3267 switch (add_prefix (current_templates->start->base_opcode))
3268 {
3269 case PREFIX_EXIST:
3270 return NULL;
3271 case PREFIX_REP:
3272 expecting_string_instruction = current_templates->start->name;
3273 break;
3274 default:
3275 break;
3276 }
3277 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3278 token_start = ++l;
3279 }
3280 else
3281 break;
3282 }
3283
3284 if (!current_templates)
3285 {
3286 /* Check if we should swap operand or force 32bit displacement in
3287 encoding. */
3288 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3289 i.swap_operand = 1;
3290 else if (mnem_p - 4 == dot_p
3291 && dot_p[1] == 'd'
3292 && dot_p[2] == '3'
3293 && dot_p[3] == '2')
3294 i.disp32_encoding = 1;
3295 else
3296 goto check_suffix;
3297 mnem_p = dot_p;
3298 *dot_p = '\0';
3299 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3300 }
3301
3302 if (!current_templates)
3303 {
3304 check_suffix:
3305 /* See if we can get a match by trimming off a suffix. */
3306 switch (mnem_p[-1])
3307 {
3308 case WORD_MNEM_SUFFIX:
3309 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3310 i.suffix = SHORT_MNEM_SUFFIX;
3311 else
3312 case BYTE_MNEM_SUFFIX:
3313 case QWORD_MNEM_SUFFIX:
3314 i.suffix = mnem_p[-1];
3315 mnem_p[-1] = '\0';
3316 current_templates = (const templates *) hash_find (op_hash,
3317 mnemonic);
3318 break;
3319 case SHORT_MNEM_SUFFIX:
3320 case LONG_MNEM_SUFFIX:
3321 if (!intel_syntax)
3322 {
3323 i.suffix = mnem_p[-1];
3324 mnem_p[-1] = '\0';
3325 current_templates = (const templates *) hash_find (op_hash,
3326 mnemonic);
3327 }
3328 break;
3329
3330 /* Intel Syntax. */
3331 case 'd':
3332 if (intel_syntax)
3333 {
3334 if (intel_float_operand (mnemonic) == 1)
3335 i.suffix = SHORT_MNEM_SUFFIX;
3336 else
3337 i.suffix = LONG_MNEM_SUFFIX;
3338 mnem_p[-1] = '\0';
3339 current_templates = (const templates *) hash_find (op_hash,
3340 mnemonic);
3341 }
3342 break;
3343 }
3344 if (!current_templates)
3345 {
3346 as_bad (_("no such instruction: `%s'"), token_start);
3347 return NULL;
3348 }
3349 }
3350
3351 if (current_templates->start->opcode_modifier.jump
3352 || current_templates->start->opcode_modifier.jumpbyte)
3353 {
3354 /* Check for a branch hint. We allow ",pt" and ",pn" for
3355 predict taken and predict not taken respectively.
3356 I'm not sure that branch hints actually do anything on loop
3357 and jcxz insns (JumpByte) for current Pentium4 chips. They
3358 may work in the future and it doesn't hurt to accept them
3359 now. */
3360 if (l[0] == ',' && l[1] == 'p')
3361 {
3362 if (l[2] == 't')
3363 {
3364 if (!add_prefix (DS_PREFIX_OPCODE))
3365 return NULL;
3366 l += 3;
3367 }
3368 else if (l[2] == 'n')
3369 {
3370 if (!add_prefix (CS_PREFIX_OPCODE))
3371 return NULL;
3372 l += 3;
3373 }
3374 }
3375 }
3376 /* Any other comma loses. */
3377 if (*l == ',')
3378 {
3379 as_bad (_("invalid character %s in mnemonic"),
3380 output_invalid (*l));
3381 return NULL;
3382 }
3383
3384 /* Check if instruction is supported on specified architecture. */
3385 supported = 0;
3386 for (t = current_templates->start; t < current_templates->end; ++t)
3387 {
3388 supported |= cpu_flags_match (t);
3389 if (supported == CPU_FLAGS_PERFECT_MATCH)
3390 goto skip;
3391 }
3392
3393 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3394 {
3395 as_bad (flag_code == CODE_64BIT
3396 ? _("`%s' is not supported in 64-bit mode")
3397 : _("`%s' is only supported in 64-bit mode"),
3398 current_templates->start->name);
3399 return NULL;
3400 }
3401 if (supported != CPU_FLAGS_PERFECT_MATCH)
3402 {
3403 as_bad (_("`%s' is not supported on `%s%s'"),
3404 current_templates->start->name,
3405 cpu_arch_name ? cpu_arch_name : default_arch,
3406 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3407 return NULL;
3408 }
3409
3410 skip:
3411 if (!cpu_arch_flags.bitfield.cpui386
3412 && (flag_code != CODE_16BIT))
3413 {
3414 as_warn (_("use .code16 to ensure correct addressing mode"));
3415 }
3416
3417 /* Check for rep/repne without a string instruction. */
3418 if (expecting_string_instruction)
3419 {
3420 static templates override;
3421
3422 for (t = current_templates->start; t < current_templates->end; ++t)
3423 if (t->opcode_modifier.isstring)
3424 break;
3425 if (t >= current_templates->end)
3426 {
3427 as_bad (_("expecting string instruction after `%s'"),
3428 expecting_string_instruction);
3429 return NULL;
3430 }
3431 for (override.start = t; t < current_templates->end; ++t)
3432 if (!t->opcode_modifier.isstring)
3433 break;
3434 override.end = t;
3435 current_templates = &override;
3436 }
3437
3438 return l;
3439 }
3440
3441 static char *
3442 parse_operands (char *l, const char *mnemonic)
3443 {
3444 char *token_start;
3445
3446 /* 1 if operand is pending after ','. */
3447 unsigned int expecting_operand = 0;
3448
3449 /* Non-zero if operand parens not balanced. */
3450 unsigned int paren_not_balanced;
3451
3452 while (*l != END_OF_INSN)
3453 {
3454 /* Skip optional white space before operand. */
3455 if (is_space_char (*l))
3456 ++l;
3457 if (!is_operand_char (*l) && *l != END_OF_INSN)
3458 {
3459 as_bad (_("invalid character %s before operand %d"),
3460 output_invalid (*l),
3461 i.operands + 1);
3462 return NULL;
3463 }
3464 token_start = l; /* after white space */
3465 paren_not_balanced = 0;
3466 while (paren_not_balanced || *l != ',')
3467 {
3468 if (*l == END_OF_INSN)
3469 {
3470 if (paren_not_balanced)
3471 {
3472 if (!intel_syntax)
3473 as_bad (_("unbalanced parenthesis in operand %d."),
3474 i.operands + 1);
3475 else
3476 as_bad (_("unbalanced brackets in operand %d."),
3477 i.operands + 1);
3478 return NULL;
3479 }
3480 else
3481 break; /* we are done */
3482 }
3483 else if (!is_operand_char (*l) && !is_space_char (*l))
3484 {
3485 as_bad (_("invalid character %s in operand %d"),
3486 output_invalid (*l),
3487 i.operands + 1);
3488 return NULL;
3489 }
3490 if (!intel_syntax)
3491 {
3492 if (*l == '(')
3493 ++paren_not_balanced;
3494 if (*l == ')')
3495 --paren_not_balanced;
3496 }
3497 else
3498 {
3499 if (*l == '[')
3500 ++paren_not_balanced;
3501 if (*l == ']')
3502 --paren_not_balanced;
3503 }
3504 l++;
3505 }
3506 if (l != token_start)
3507 { /* Yes, we've read in another operand. */
3508 unsigned int operand_ok;
3509 this_operand = i.operands++;
3510 i.types[this_operand].bitfield.unspecified = 1;
3511 if (i.operands > MAX_OPERANDS)
3512 {
3513 as_bad (_("spurious operands; (%d operands/instruction max)"),
3514 MAX_OPERANDS);
3515 return NULL;
3516 }
3517 /* Now parse operand adding info to 'i' as we go along. */
3518 END_STRING_AND_SAVE (l);
3519
3520 if (intel_syntax)
3521 operand_ok =
3522 i386_intel_operand (token_start,
3523 intel_float_operand (mnemonic));
3524 else
3525 operand_ok = i386_att_operand (token_start);
3526
3527 RESTORE_END_STRING (l);
3528 if (!operand_ok)
3529 return NULL;
3530 }
3531 else
3532 {
3533 if (expecting_operand)
3534 {
3535 expecting_operand_after_comma:
3536 as_bad (_("expecting operand after ','; got nothing"));
3537 return NULL;
3538 }
3539 if (*l == ',')
3540 {
3541 as_bad (_("expecting operand before ','; got nothing"));
3542 return NULL;
3543 }
3544 }
3545
3546 /* Now *l must be either ',' or END_OF_INSN. */
3547 if (*l == ',')
3548 {
3549 if (*++l == END_OF_INSN)
3550 {
3551 /* Just skip it, if it's \n complain. */
3552 goto expecting_operand_after_comma;
3553 }
3554 expecting_operand = 1;
3555 }
3556 }
3557 return l;
3558 }
3559
3560 static void
3561 swap_2_operands (int xchg1, int xchg2)
3562 {
3563 union i386_op temp_op;
3564 i386_operand_type temp_type;
3565 enum bfd_reloc_code_real temp_reloc;
3566
3567 temp_type = i.types[xchg2];
3568 i.types[xchg2] = i.types[xchg1];
3569 i.types[xchg1] = temp_type;
3570 temp_op = i.op[xchg2];
3571 i.op[xchg2] = i.op[xchg1];
3572 i.op[xchg1] = temp_op;
3573 temp_reloc = i.reloc[xchg2];
3574 i.reloc[xchg2] = i.reloc[xchg1];
3575 i.reloc[xchg1] = temp_reloc;
3576 }
3577
3578 static void
3579 swap_operands (void)
3580 {
3581 switch (i.operands)
3582 {
3583 case 5:
3584 case 4:
3585 swap_2_operands (1, i.operands - 2);
3586 case 3:
3587 case 2:
3588 swap_2_operands (0, i.operands - 1);
3589 break;
3590 default:
3591 abort ();
3592 }
3593
3594 if (i.mem_operands == 2)
3595 {
3596 const seg_entry *temp_seg;
3597 temp_seg = i.seg[0];
3598 i.seg[0] = i.seg[1];
3599 i.seg[1] = temp_seg;
3600 }
3601 }
3602
3603 /* Try to ensure constant immediates are represented in the smallest
3604 opcode possible. */
3605 static void
3606 optimize_imm (void)
3607 {
3608 char guess_suffix = 0;
3609 int op;
3610
3611 if (i.suffix)
3612 guess_suffix = i.suffix;
3613 else if (i.reg_operands)
3614 {
3615 /* Figure out a suffix from the last register operand specified.
3616 We can't do this properly yet, ie. excluding InOutPortReg,
3617 but the following works for instructions with immediates.
3618 In any case, we can't set i.suffix yet. */
3619 for (op = i.operands; --op >= 0;)
3620 if (i.types[op].bitfield.reg8)
3621 {
3622 guess_suffix = BYTE_MNEM_SUFFIX;
3623 break;
3624 }
3625 else if (i.types[op].bitfield.reg16)
3626 {
3627 guess_suffix = WORD_MNEM_SUFFIX;
3628 break;
3629 }
3630 else if (i.types[op].bitfield.reg32)
3631 {
3632 guess_suffix = LONG_MNEM_SUFFIX;
3633 break;
3634 }
3635 else if (i.types[op].bitfield.reg64)
3636 {
3637 guess_suffix = QWORD_MNEM_SUFFIX;
3638 break;
3639 }
3640 }
3641 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3642 guess_suffix = WORD_MNEM_SUFFIX;
3643
3644 for (op = i.operands; --op >= 0;)
3645 if (operand_type_check (i.types[op], imm))
3646 {
3647 switch (i.op[op].imms->X_op)
3648 {
3649 case O_constant:
3650 /* If a suffix is given, this operand may be shortened. */
3651 switch (guess_suffix)
3652 {
3653 case LONG_MNEM_SUFFIX:
3654 i.types[op].bitfield.imm32 = 1;
3655 i.types[op].bitfield.imm64 = 1;
3656 break;
3657 case WORD_MNEM_SUFFIX:
3658 i.types[op].bitfield.imm16 = 1;
3659 i.types[op].bitfield.imm32 = 1;
3660 i.types[op].bitfield.imm32s = 1;
3661 i.types[op].bitfield.imm64 = 1;
3662 break;
3663 case BYTE_MNEM_SUFFIX:
3664 i.types[op].bitfield.imm8 = 1;
3665 i.types[op].bitfield.imm8s = 1;
3666 i.types[op].bitfield.imm16 = 1;
3667 i.types[op].bitfield.imm32 = 1;
3668 i.types[op].bitfield.imm32s = 1;
3669 i.types[op].bitfield.imm64 = 1;
3670 break;
3671 }
3672
3673 /* If this operand is at most 16 bits, convert it
3674 to a signed 16 bit number before trying to see
3675 whether it will fit in an even smaller size.
3676 This allows a 16-bit operand such as $0xffe0 to
3677 be recognised as within Imm8S range. */
3678 if ((i.types[op].bitfield.imm16)
3679 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3680 {
3681 i.op[op].imms->X_add_number =
3682 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3683 }
3684 if ((i.types[op].bitfield.imm32)
3685 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3686 == 0))
3687 {
3688 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3689 ^ ((offsetT) 1 << 31))
3690 - ((offsetT) 1 << 31));
3691 }
3692 i.types[op]
3693 = operand_type_or (i.types[op],
3694 smallest_imm_type (i.op[op].imms->X_add_number));
3695
3696 /* We must avoid matching of Imm32 templates when 64bit
3697 only immediate is available. */
3698 if (guess_suffix == QWORD_MNEM_SUFFIX)
3699 i.types[op].bitfield.imm32 = 0;
3700 break;
3701
3702 case O_absent:
3703 case O_register:
3704 abort ();
3705
3706 /* Symbols and expressions. */
3707 default:
3708 /* Convert symbolic operand to proper sizes for matching, but don't
3709 prevent matching a set of insns that only supports sizes other
3710 than those matching the insn suffix. */
3711 {
3712 i386_operand_type mask, allowed;
3713 const insn_template *t;
3714
3715 operand_type_set (&mask, 0);
3716 operand_type_set (&allowed, 0);
3717
3718 for (t = current_templates->start;
3719 t < current_templates->end;
3720 ++t)
3721 allowed = operand_type_or (allowed,
3722 t->operand_types[op]);
3723 switch (guess_suffix)
3724 {
3725 case QWORD_MNEM_SUFFIX:
3726 mask.bitfield.imm64 = 1;
3727 mask.bitfield.imm32s = 1;
3728 break;
3729 case LONG_MNEM_SUFFIX:
3730 mask.bitfield.imm32 = 1;
3731 break;
3732 case WORD_MNEM_SUFFIX:
3733 mask.bitfield.imm16 = 1;
3734 break;
3735 case BYTE_MNEM_SUFFIX:
3736 mask.bitfield.imm8 = 1;
3737 break;
3738 default:
3739 break;
3740 }
3741 allowed = operand_type_and (mask, allowed);
3742 if (!operand_type_all_zero (&allowed))
3743 i.types[op] = operand_type_and (i.types[op], mask);
3744 }
3745 break;
3746 }
3747 }
3748 }
3749
3750 /* Try to use the smallest displacement type too. */
3751 static void
3752 optimize_disp (void)
3753 {
3754 int op;
3755
3756 for (op = i.operands; --op >= 0;)
3757 if (operand_type_check (i.types[op], disp))
3758 {
3759 if (i.op[op].disps->X_op == O_constant)
3760 {
3761 offsetT op_disp = i.op[op].disps->X_add_number;
3762
3763 if (i.types[op].bitfield.disp16
3764 && (op_disp & ~(offsetT) 0xffff) == 0)
3765 {
3766 /* If this operand is at most 16 bits, convert
3767 to a signed 16 bit number and don't use 64bit
3768 displacement. */
3769 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3770 i.types[op].bitfield.disp64 = 0;
3771 }
3772 if (i.types[op].bitfield.disp32
3773 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3774 {
3775 /* If this operand is at most 32 bits, convert
3776 to a signed 32 bit number and don't use 64bit
3777 displacement. */
3778 op_disp &= (((offsetT) 2 << 31) - 1);
3779 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3780 i.types[op].bitfield.disp64 = 0;
3781 }
3782 if (!op_disp && i.types[op].bitfield.baseindex)
3783 {
3784 i.types[op].bitfield.disp8 = 0;
3785 i.types[op].bitfield.disp16 = 0;
3786 i.types[op].bitfield.disp32 = 0;
3787 i.types[op].bitfield.disp32s = 0;
3788 i.types[op].bitfield.disp64 = 0;
3789 i.op[op].disps = 0;
3790 i.disp_operands--;
3791 }
3792 else if (flag_code == CODE_64BIT)
3793 {
3794 if (fits_in_signed_long (op_disp))
3795 {
3796 i.types[op].bitfield.disp64 = 0;
3797 i.types[op].bitfield.disp32s = 1;
3798 }
3799 if (i.prefix[ADDR_PREFIX]
3800 && fits_in_unsigned_long (op_disp))
3801 i.types[op].bitfield.disp32 = 1;
3802 }
3803 if ((i.types[op].bitfield.disp32
3804 || i.types[op].bitfield.disp32s
3805 || i.types[op].bitfield.disp16)
3806 && fits_in_signed_byte (op_disp))
3807 i.types[op].bitfield.disp8 = 1;
3808 }
3809 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3810 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3811 {
3812 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3813 i.op[op].disps, 0, i.reloc[op]);
3814 i.types[op].bitfield.disp8 = 0;
3815 i.types[op].bitfield.disp16 = 0;
3816 i.types[op].bitfield.disp32 = 0;
3817 i.types[op].bitfield.disp32s = 0;
3818 i.types[op].bitfield.disp64 = 0;
3819 }
3820 else
3821 /* We only support 64bit displacement on constants. */
3822 i.types[op].bitfield.disp64 = 0;
3823 }
3824 }
3825
3826 /* Check if operands are valid for the instruction. Update VEX
3827 operand types. */
3828
3829 static int
3830 VEX_check_operands (const insn_template *t)
3831 {
3832 if (!t->opcode_modifier.vex)
3833 return 0;
3834
3835 /* Only check VEX_Imm4, which must be the first operand. */
3836 if (t->operand_types[0].bitfield.vec_imm4)
3837 {
3838 if (i.op[0].imms->X_op != O_constant
3839 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3840 {
3841 i.error = bad_imm4;
3842 return 1;
3843 }
3844
3845 /* Turn off Imm8 so that update_imm won't complain. */
3846 i.types[0] = vec_imm4;
3847 }
3848
3849 return 0;
3850 }
3851
3852 static const insn_template *
3853 match_template (void)
3854 {
3855 /* Points to template once we've found it. */
3856 const insn_template *t;
3857 i386_operand_type overlap0, overlap1, overlap2, overlap3;
3858 i386_operand_type overlap4;
3859 unsigned int found_reverse_match;
3860 i386_opcode_modifier suffix_check;
3861 i386_operand_type operand_types [MAX_OPERANDS];
3862 int addr_prefix_disp;
3863 unsigned int j;
3864 unsigned int found_cpu_match;
3865 unsigned int check_register;
3866
3867 #if MAX_OPERANDS != 5
3868 # error "MAX_OPERANDS must be 5."
3869 #endif
3870
3871 found_reverse_match = 0;
3872 addr_prefix_disp = -1;
3873
3874 memset (&suffix_check, 0, sizeof (suffix_check));
3875 if (i.suffix == BYTE_MNEM_SUFFIX)
3876 suffix_check.no_bsuf = 1;
3877 else if (i.suffix == WORD_MNEM_SUFFIX)
3878 suffix_check.no_wsuf = 1;
3879 else if (i.suffix == SHORT_MNEM_SUFFIX)
3880 suffix_check.no_ssuf = 1;
3881 else if (i.suffix == LONG_MNEM_SUFFIX)
3882 suffix_check.no_lsuf = 1;
3883 else if (i.suffix == QWORD_MNEM_SUFFIX)
3884 suffix_check.no_qsuf = 1;
3885 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
3886 suffix_check.no_ldsuf = 1;
3887
3888 /* Must have right number of operands. */
3889 i.error = number_of_operands_mismatch;
3890
3891 for (t = current_templates->start; t < current_templates->end; t++)
3892 {
3893 addr_prefix_disp = -1;
3894
3895 if (i.operands != t->operands)
3896 continue;
3897
3898 /* Check processor support. */
3899 i.error = unsupported;
3900 found_cpu_match = (cpu_flags_match (t)
3901 == CPU_FLAGS_PERFECT_MATCH);
3902 if (!found_cpu_match)
3903 continue;
3904
3905 /* Check old gcc support. */
3906 i.error = old_gcc_only;
3907 if (!old_gcc && t->opcode_modifier.oldgcc)
3908 continue;
3909
3910 /* Check AT&T mnemonic. */
3911 i.error = unsupported_with_intel_mnemonic;
3912 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
3913 continue;
3914
3915 /* Check AT&T/Intel syntax. */
3916 i.error = unsupported_syntax;
3917 if ((intel_syntax && t->opcode_modifier.attsyntax)
3918 || (!intel_syntax && t->opcode_modifier.intelsyntax))
3919 continue;
3920
3921 /* Check the suffix, except for some instructions in intel mode. */
3922 i.error = invalid_instruction_suffix;
3923 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
3924 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
3925 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
3926 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
3927 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
3928 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
3929 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
3930 continue;
3931
3932 if (!operand_size_match (t))
3933 continue;
3934
3935 for (j = 0; j < MAX_OPERANDS; j++)
3936 operand_types[j] = t->operand_types[j];
3937
3938 /* In general, don't allow 64-bit operands in 32-bit mode. */
3939 if (i.suffix == QWORD_MNEM_SUFFIX
3940 && flag_code != CODE_64BIT
3941 && (intel_syntax
3942 ? (!t->opcode_modifier.ignoresize
3943 && !intel_float_operand (t->name))
3944 : intel_float_operand (t->name) != 2)
3945 && ((!operand_types[0].bitfield.regmmx
3946 && !operand_types[0].bitfield.regxmm
3947 && !operand_types[0].bitfield.regymm)
3948 || (!operand_types[t->operands > 1].bitfield.regmmx
3949 && !!operand_types[t->operands > 1].bitfield.regxmm
3950 && !!operand_types[t->operands > 1].bitfield.regymm))
3951 && (t->base_opcode != 0x0fc7
3952 || t->extension_opcode != 1 /* cmpxchg8b */))
3953 continue;
3954
3955 /* In general, don't allow 32-bit operands on pre-386. */
3956 else if (i.suffix == LONG_MNEM_SUFFIX
3957 && !cpu_arch_flags.bitfield.cpui386
3958 && (intel_syntax
3959 ? (!t->opcode_modifier.ignoresize
3960 && !intel_float_operand (t->name))
3961 : intel_float_operand (t->name) != 2)
3962 && ((!operand_types[0].bitfield.regmmx
3963 && !operand_types[0].bitfield.regxmm)
3964 || (!operand_types[t->operands > 1].bitfield.regmmx
3965 && !!operand_types[t->operands > 1].bitfield.regxmm)))
3966 continue;
3967
3968 /* Do not verify operands when there are none. */
3969 else
3970 {
3971 if (!t->operands)
3972 /* We've found a match; break out of loop. */
3973 break;
3974 }
3975
3976 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
3977 into Disp32/Disp16/Disp32 operand. */
3978 if (i.prefix[ADDR_PREFIX] != 0)
3979 {
3980 /* There should be only one Disp operand. */
3981 switch (flag_code)
3982 {
3983 case CODE_16BIT:
3984 for (j = 0; j < MAX_OPERANDS; j++)
3985 {
3986 if (operand_types[j].bitfield.disp16)
3987 {
3988 addr_prefix_disp = j;
3989 operand_types[j].bitfield.disp32 = 1;
3990 operand_types[j].bitfield.disp16 = 0;
3991 break;
3992 }
3993 }
3994 break;
3995 case CODE_32BIT:
3996 for (j = 0; j < MAX_OPERANDS; j++)
3997 {
3998 if (operand_types[j].bitfield.disp32)
3999 {
4000 addr_prefix_disp = j;
4001 operand_types[j].bitfield.disp32 = 0;
4002 operand_types[j].bitfield.disp16 = 1;
4003 break;
4004 }
4005 }
4006 break;
4007 case CODE_64BIT:
4008 for (j = 0; j < MAX_OPERANDS; j++)
4009 {
4010 if (operand_types[j].bitfield.disp64)
4011 {
4012 addr_prefix_disp = j;
4013 operand_types[j].bitfield.disp64 = 0;
4014 operand_types[j].bitfield.disp32 = 1;
4015 break;
4016 }
4017 }
4018 break;
4019 }
4020 }
4021
4022 /* We check register size if needed. */
4023 check_register = t->opcode_modifier.checkregsize;
4024 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4025 switch (t->operands)
4026 {
4027 case 1:
4028 if (!operand_type_match (overlap0, i.types[0]))
4029 continue;
4030 break;
4031 case 2:
4032 /* xchg %eax, %eax is a special case. It is an aliase for nop
4033 only in 32bit mode and we can use opcode 0x90. In 64bit
4034 mode, we can't use 0x90 for xchg %eax, %eax since it should
4035 zero-extend %eax to %rax. */
4036 if (flag_code == CODE_64BIT
4037 && t->base_opcode == 0x90
4038 && operand_type_equal (&i.types [0], &acc32)
4039 && operand_type_equal (&i.types [1], &acc32))
4040 continue;
4041 if (i.swap_operand)
4042 {
4043 /* If we swap operand in encoding, we either match
4044 the next one or reverse direction of operands. */
4045 if (t->opcode_modifier.s)
4046 continue;
4047 else if (t->opcode_modifier.d)
4048 goto check_reverse;
4049 }
4050
4051 case 3:
4052 /* If we swap operand in encoding, we match the next one. */
4053 if (i.swap_operand && t->opcode_modifier.s)
4054 continue;
4055 case 4:
4056 case 5:
4057 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4058 if (!operand_type_match (overlap0, i.types[0])
4059 || !operand_type_match (overlap1, i.types[1])
4060 || (check_register
4061 && !operand_type_register_match (overlap0, i.types[0],
4062 operand_types[0],
4063 overlap1, i.types[1],
4064 operand_types[1])))
4065 {
4066 /* Check if other direction is valid ... */
4067 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4068 continue;
4069
4070 check_reverse:
4071 /* Try reversing direction of operands. */
4072 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4073 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4074 if (!operand_type_match (overlap0, i.types[0])
4075 || !operand_type_match (overlap1, i.types[1])
4076 || (check_register
4077 && !operand_type_register_match (overlap0,
4078 i.types[0],
4079 operand_types[1],
4080 overlap1,
4081 i.types[1],
4082 operand_types[0])))
4083 {
4084 /* Does not match either direction. */
4085 continue;
4086 }
4087 /* found_reverse_match holds which of D or FloatDR
4088 we've found. */
4089 if (t->opcode_modifier.d)
4090 found_reverse_match = Opcode_D;
4091 else if (t->opcode_modifier.floatd)
4092 found_reverse_match = Opcode_FloatD;
4093 else
4094 found_reverse_match = 0;
4095 if (t->opcode_modifier.floatr)
4096 found_reverse_match |= Opcode_FloatR;
4097 }
4098 else
4099 {
4100 /* Found a forward 2 operand match here. */
4101 switch (t->operands)
4102 {
4103 case 5:
4104 overlap4 = operand_type_and (i.types[4],
4105 operand_types[4]);
4106 case 4:
4107 overlap3 = operand_type_and (i.types[3],
4108 operand_types[3]);
4109 case 3:
4110 overlap2 = operand_type_and (i.types[2],
4111 operand_types[2]);
4112 break;
4113 }
4114
4115 switch (t->operands)
4116 {
4117 case 5:
4118 if (!operand_type_match (overlap4, i.types[4])
4119 || !operand_type_register_match (overlap3,
4120 i.types[3],
4121 operand_types[3],
4122 overlap4,
4123 i.types[4],
4124 operand_types[4]))
4125 continue;
4126 case 4:
4127 if (!operand_type_match (overlap3, i.types[3])
4128 || (check_register
4129 && !operand_type_register_match (overlap2,
4130 i.types[2],
4131 operand_types[2],
4132 overlap3,
4133 i.types[3],
4134 operand_types[3])))
4135 continue;
4136 case 3:
4137 /* Here we make use of the fact that there are no
4138 reverse match 3 operand instructions, and all 3
4139 operand instructions only need to be checked for
4140 register consistency between operands 2 and 3. */
4141 if (!operand_type_match (overlap2, i.types[2])
4142 || (check_register
4143 && !operand_type_register_match (overlap1,
4144 i.types[1],
4145 operand_types[1],
4146 overlap2,
4147 i.types[2],
4148 operand_types[2])))
4149 continue;
4150 break;
4151 }
4152 }
4153 /* Found either forward/reverse 2, 3 or 4 operand match here:
4154 slip through to break. */
4155 }
4156 if (!found_cpu_match)
4157 {
4158 found_reverse_match = 0;
4159 continue;
4160 }
4161
4162 /* Check if VEX operands are valid. */
4163 if (VEX_check_operands (t))
4164 continue;
4165
4166 /* We've found a match; break out of loop. */
4167 break;
4168 }
4169
4170 if (t == current_templates->end)
4171 {
4172 /* We found no match. */
4173 const char *err_msg;
4174 switch (i.error)
4175 {
4176 default:
4177 abort ();
4178 case operand_size_mismatch:
4179 err_msg = _("operand size mismatch");
4180 break;
4181 case operand_type_mismatch:
4182 err_msg = _("operand type mismatch");
4183 break;
4184 case register_type_mismatch:
4185 err_msg = _("register type mismatch");
4186 break;
4187 case number_of_operands_mismatch:
4188 err_msg = _("number of operands mismatch");
4189 break;
4190 case invalid_instruction_suffix:
4191 err_msg = _("invalid instruction suffix");
4192 break;
4193 case bad_imm4:
4194 err_msg = _("Imm4 isn't the first operand");
4195 break;
4196 case old_gcc_only:
4197 err_msg = _("only supported with old gcc");
4198 break;
4199 case unsupported_with_intel_mnemonic:
4200 err_msg = _("unsupported with Intel mnemonic");
4201 break;
4202 case unsupported_syntax:
4203 err_msg = _("unsupported syntax");
4204 break;
4205 case unsupported:
4206 err_msg = _("unsupported");
4207 break;
4208 }
4209 as_bad (_("%s for `%s'"), err_msg,
4210 current_templates->start->name);
4211 return NULL;
4212 }
4213
4214 if (!quiet_warnings)
4215 {
4216 if (!intel_syntax
4217 && (i.types[0].bitfield.jumpabsolute
4218 != operand_types[0].bitfield.jumpabsolute))
4219 {
4220 as_warn (_("indirect %s without `*'"), t->name);
4221 }
4222
4223 if (t->opcode_modifier.isprefix
4224 && t->opcode_modifier.ignoresize)
4225 {
4226 /* Warn them that a data or address size prefix doesn't
4227 affect assembly of the next line of code. */
4228 as_warn (_("stand-alone `%s' prefix"), t->name);
4229 }
4230 }
4231
4232 /* Copy the template we found. */
4233 i.tm = *t;
4234
4235 if (addr_prefix_disp != -1)
4236 i.tm.operand_types[addr_prefix_disp]
4237 = operand_types[addr_prefix_disp];
4238
4239 if (found_reverse_match)
4240 {
4241 /* If we found a reverse match we must alter the opcode
4242 direction bit. found_reverse_match holds bits to change
4243 (different for int & float insns). */
4244
4245 i.tm.base_opcode ^= found_reverse_match;
4246
4247 i.tm.operand_types[0] = operand_types[1];
4248 i.tm.operand_types[1] = operand_types[0];
4249 }
4250
4251 return t;
4252 }
4253
4254 static int
4255 check_string (void)
4256 {
4257 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4258 if (i.tm.operand_types[mem_op].bitfield.esseg)
4259 {
4260 if (i.seg[0] != NULL && i.seg[0] != &es)
4261 {
4262 as_bad (_("`%s' operand %d must use `%ses' segment"),
4263 i.tm.name,
4264 mem_op + 1,
4265 register_prefix);
4266 return 0;
4267 }
4268 /* There's only ever one segment override allowed per instruction.
4269 This instruction possibly has a legal segment override on the
4270 second operand, so copy the segment to where non-string
4271 instructions store it, allowing common code. */
4272 i.seg[0] = i.seg[1];
4273 }
4274 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4275 {
4276 if (i.seg[1] != NULL && i.seg[1] != &es)
4277 {
4278 as_bad (_("`%s' operand %d must use `%ses' segment"),
4279 i.tm.name,
4280 mem_op + 2,
4281 register_prefix);
4282 return 0;
4283 }
4284 }
4285 return 1;
4286 }
4287
4288 static int
4289 process_suffix (void)
4290 {
4291 /* If matched instruction specifies an explicit instruction mnemonic
4292 suffix, use it. */
4293 if (i.tm.opcode_modifier.size16)
4294 i.suffix = WORD_MNEM_SUFFIX;
4295 else if (i.tm.opcode_modifier.size32)
4296 i.suffix = LONG_MNEM_SUFFIX;
4297 else if (i.tm.opcode_modifier.size64)
4298 i.suffix = QWORD_MNEM_SUFFIX;
4299 else if (i.reg_operands)
4300 {
4301 /* If there's no instruction mnemonic suffix we try to invent one
4302 based on register operands. */
4303 if (!i.suffix)
4304 {
4305 /* We take i.suffix from the last register operand specified,
4306 Destination register type is more significant than source
4307 register type. crc32 in SSE4.2 prefers source register
4308 type. */
4309 if (i.tm.base_opcode == 0xf20f38f1)
4310 {
4311 if (i.types[0].bitfield.reg16)
4312 i.suffix = WORD_MNEM_SUFFIX;
4313 else if (i.types[0].bitfield.reg32)
4314 i.suffix = LONG_MNEM_SUFFIX;
4315 else if (i.types[0].bitfield.reg64)
4316 i.suffix = QWORD_MNEM_SUFFIX;
4317 }
4318 else if (i.tm.base_opcode == 0xf20f38f0)
4319 {
4320 if (i.types[0].bitfield.reg8)
4321 i.suffix = BYTE_MNEM_SUFFIX;
4322 }
4323
4324 if (!i.suffix)
4325 {
4326 int op;
4327
4328 if (i.tm.base_opcode == 0xf20f38f1
4329 || i.tm.base_opcode == 0xf20f38f0)
4330 {
4331 /* We have to know the operand size for crc32. */
4332 as_bad (_("ambiguous memory operand size for `%s`"),
4333 i.tm.name);
4334 return 0;
4335 }
4336
4337 for (op = i.operands; --op >= 0;)
4338 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4339 {
4340 if (i.types[op].bitfield.reg8)
4341 {
4342 i.suffix = BYTE_MNEM_SUFFIX;
4343 break;
4344 }
4345 else if (i.types[op].bitfield.reg16)
4346 {
4347 i.suffix = WORD_MNEM_SUFFIX;
4348 break;
4349 }
4350 else if (i.types[op].bitfield.reg32)
4351 {
4352 i.suffix = LONG_MNEM_SUFFIX;
4353 break;
4354 }
4355 else if (i.types[op].bitfield.reg64)
4356 {
4357 i.suffix = QWORD_MNEM_SUFFIX;
4358 break;
4359 }
4360 }
4361 }
4362 }
4363 else if (i.suffix == BYTE_MNEM_SUFFIX)
4364 {
4365 if (intel_syntax
4366 && i.tm.opcode_modifier.ignoresize
4367 && i.tm.opcode_modifier.no_bsuf)
4368 i.suffix = 0;
4369 else if (!check_byte_reg ())
4370 return 0;
4371 }
4372 else if (i.suffix == LONG_MNEM_SUFFIX)
4373 {
4374 if (intel_syntax
4375 && i.tm.opcode_modifier.ignoresize
4376 && i.tm.opcode_modifier.no_lsuf)
4377 i.suffix = 0;
4378 else if (!check_long_reg ())
4379 return 0;
4380 }
4381 else if (i.suffix == QWORD_MNEM_SUFFIX)
4382 {
4383 if (intel_syntax
4384 && i.tm.opcode_modifier.ignoresize
4385 && i.tm.opcode_modifier.no_qsuf)
4386 i.suffix = 0;
4387 else if (!check_qword_reg ())
4388 return 0;
4389 }
4390 else if (i.suffix == WORD_MNEM_SUFFIX)
4391 {
4392 if (intel_syntax
4393 && i.tm.opcode_modifier.ignoresize
4394 && i.tm.opcode_modifier.no_wsuf)
4395 i.suffix = 0;
4396 else if (!check_word_reg ())
4397 return 0;
4398 }
4399 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4400 || i.suffix == YMMWORD_MNEM_SUFFIX)
4401 {
4402 /* Skip if the instruction has x/y suffix. match_template
4403 should check if it is a valid suffix. */
4404 }
4405 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4406 /* Do nothing if the instruction is going to ignore the prefix. */
4407 ;
4408 else
4409 abort ();
4410 }
4411 else if (i.tm.opcode_modifier.defaultsize
4412 && !i.suffix
4413 /* exclude fldenv/frstor/fsave/fstenv */
4414 && i.tm.opcode_modifier.no_ssuf)
4415 {
4416 i.suffix = stackop_size;
4417 }
4418 else if (intel_syntax
4419 && !i.suffix
4420 && (i.tm.operand_types[0].bitfield.jumpabsolute
4421 || i.tm.opcode_modifier.jumpbyte
4422 || i.tm.opcode_modifier.jumpintersegment
4423 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4424 && i.tm.extension_opcode <= 3)))
4425 {
4426 switch (flag_code)
4427 {
4428 case CODE_64BIT:
4429 if (!i.tm.opcode_modifier.no_qsuf)
4430 {
4431 i.suffix = QWORD_MNEM_SUFFIX;
4432 break;
4433 }
4434 case CODE_32BIT:
4435 if (!i.tm.opcode_modifier.no_lsuf)
4436 i.suffix = LONG_MNEM_SUFFIX;
4437 break;
4438 case CODE_16BIT:
4439 if (!i.tm.opcode_modifier.no_wsuf)
4440 i.suffix = WORD_MNEM_SUFFIX;
4441 break;
4442 }
4443 }
4444
4445 if (!i.suffix)
4446 {
4447 if (!intel_syntax)
4448 {
4449 if (i.tm.opcode_modifier.w)
4450 {
4451 as_bad (_("no instruction mnemonic suffix given and "
4452 "no register operands; can't size instruction"));
4453 return 0;
4454 }
4455 }
4456 else
4457 {
4458 unsigned int suffixes;
4459
4460 suffixes = !i.tm.opcode_modifier.no_bsuf;
4461 if (!i.tm.opcode_modifier.no_wsuf)
4462 suffixes |= 1 << 1;
4463 if (!i.tm.opcode_modifier.no_lsuf)
4464 suffixes |= 1 << 2;
4465 if (!i.tm.opcode_modifier.no_ldsuf)
4466 suffixes |= 1 << 3;
4467 if (!i.tm.opcode_modifier.no_ssuf)
4468 suffixes |= 1 << 4;
4469 if (!i.tm.opcode_modifier.no_qsuf)
4470 suffixes |= 1 << 5;
4471
4472 /* There are more than suffix matches. */
4473 if (i.tm.opcode_modifier.w
4474 || ((suffixes & (suffixes - 1))
4475 && !i.tm.opcode_modifier.defaultsize
4476 && !i.tm.opcode_modifier.ignoresize))
4477 {
4478 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4479 return 0;
4480 }
4481 }
4482 }
4483
4484 /* Change the opcode based on the operand size given by i.suffix;
4485 We don't need to change things for byte insns. */
4486
4487 if (i.suffix
4488 && i.suffix != BYTE_MNEM_SUFFIX
4489 && i.suffix != XMMWORD_MNEM_SUFFIX
4490 && i.suffix != YMMWORD_MNEM_SUFFIX)
4491 {
4492 /* It's not a byte, select word/dword operation. */
4493 if (i.tm.opcode_modifier.w)
4494 {
4495 if (i.tm.opcode_modifier.shortform)
4496 i.tm.base_opcode |= 8;
4497 else
4498 i.tm.base_opcode |= 1;
4499 }
4500
4501 /* Now select between word & dword operations via the operand
4502 size prefix, except for instructions that will ignore this
4503 prefix anyway. */
4504 if (i.tm.opcode_modifier.addrprefixop0)
4505 {
4506 /* The address size override prefix changes the size of the
4507 first operand. */
4508 if ((flag_code == CODE_32BIT
4509 && i.op->regs[0].reg_type.bitfield.reg16)
4510 || (flag_code != CODE_32BIT
4511 && i.op->regs[0].reg_type.bitfield.reg32))
4512 if (!add_prefix (ADDR_PREFIX_OPCODE))
4513 return 0;
4514 }
4515 else if (i.suffix != QWORD_MNEM_SUFFIX
4516 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4517 && !i.tm.opcode_modifier.ignoresize
4518 && !i.tm.opcode_modifier.floatmf
4519 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4520 || (flag_code == CODE_64BIT
4521 && i.tm.opcode_modifier.jumpbyte)))
4522 {
4523 unsigned int prefix = DATA_PREFIX_OPCODE;
4524
4525 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4526 prefix = ADDR_PREFIX_OPCODE;
4527
4528 if (!add_prefix (prefix))
4529 return 0;
4530 }
4531
4532 /* Set mode64 for an operand. */
4533 if (i.suffix == QWORD_MNEM_SUFFIX
4534 && flag_code == CODE_64BIT
4535 && !i.tm.opcode_modifier.norex64)
4536 {
4537 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4538 need rex64. cmpxchg8b is also a special case. */
4539 if (! (i.operands == 2
4540 && i.tm.base_opcode == 0x90
4541 && i.tm.extension_opcode == None
4542 && operand_type_equal (&i.types [0], &acc64)
4543 && operand_type_equal (&i.types [1], &acc64))
4544 && ! (i.operands == 1
4545 && i.tm.base_opcode == 0xfc7
4546 && i.tm.extension_opcode == 1
4547 && !operand_type_check (i.types [0], reg)
4548 && operand_type_check (i.types [0], anymem)))
4549 i.rex |= REX_W;
4550 }
4551
4552 /* Size floating point instruction. */
4553 if (i.suffix == LONG_MNEM_SUFFIX)
4554 if (i.tm.opcode_modifier.floatmf)
4555 i.tm.base_opcode ^= 4;
4556 }
4557
4558 return 1;
4559 }
4560
4561 static int
4562 check_byte_reg (void)
4563 {
4564 int op;
4565
4566 for (op = i.operands; --op >= 0;)
4567 {
4568 /* If this is an eight bit register, it's OK. If it's the 16 or
4569 32 bit version of an eight bit register, we will just use the
4570 low portion, and that's OK too. */
4571 if (i.types[op].bitfield.reg8)
4572 continue;
4573
4574 /* crc32 doesn't generate this warning. */
4575 if (i.tm.base_opcode == 0xf20f38f0)
4576 continue;
4577
4578 if ((i.types[op].bitfield.reg16
4579 || i.types[op].bitfield.reg32
4580 || i.types[op].bitfield.reg64)
4581 && i.op[op].regs->reg_num < 4)
4582 {
4583 /* Prohibit these changes in the 64bit mode, since the
4584 lowering is more complicated. */
4585 if (flag_code == CODE_64BIT
4586 && !i.tm.operand_types[op].bitfield.inoutportreg)
4587 {
4588 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4589 register_prefix, i.op[op].regs->reg_name,
4590 i.suffix);
4591 return 0;
4592 }
4593 #if REGISTER_WARNINGS
4594 if (!quiet_warnings
4595 && !i.tm.operand_types[op].bitfield.inoutportreg)
4596 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4597 register_prefix,
4598 (i.op[op].regs + (i.types[op].bitfield.reg16
4599 ? REGNAM_AL - REGNAM_AX
4600 : REGNAM_AL - REGNAM_EAX))->reg_name,
4601 register_prefix,
4602 i.op[op].regs->reg_name,
4603 i.suffix);
4604 #endif
4605 continue;
4606 }
4607 /* Any other register is bad. */
4608 if (i.types[op].bitfield.reg16
4609 || i.types[op].bitfield.reg32
4610 || i.types[op].bitfield.reg64
4611 || i.types[op].bitfield.regmmx
4612 || i.types[op].bitfield.regxmm
4613 || i.types[op].bitfield.regymm
4614 || i.types[op].bitfield.sreg2
4615 || i.types[op].bitfield.sreg3
4616 || i.types[op].bitfield.control
4617 || i.types[op].bitfield.debug
4618 || i.types[op].bitfield.test
4619 || i.types[op].bitfield.floatreg
4620 || i.types[op].bitfield.floatacc)
4621 {
4622 as_bad (_("`%s%s' not allowed with `%s%c'"),
4623 register_prefix,
4624 i.op[op].regs->reg_name,
4625 i.tm.name,
4626 i.suffix);
4627 return 0;
4628 }
4629 }
4630 return 1;
4631 }
4632
4633 static int
4634 check_long_reg (void)
4635 {
4636 int op;
4637
4638 for (op = i.operands; --op >= 0;)
4639 /* Reject eight bit registers, except where the template requires
4640 them. (eg. movzb) */
4641 if (i.types[op].bitfield.reg8
4642 && (i.tm.operand_types[op].bitfield.reg16
4643 || i.tm.operand_types[op].bitfield.reg32
4644 || i.tm.operand_types[op].bitfield.acc))
4645 {
4646 as_bad (_("`%s%s' not allowed with `%s%c'"),
4647 register_prefix,
4648 i.op[op].regs->reg_name,
4649 i.tm.name,
4650 i.suffix);
4651 return 0;
4652 }
4653 /* Warn if the e prefix on a general reg is missing. */
4654 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4655 && i.types[op].bitfield.reg16
4656 && (i.tm.operand_types[op].bitfield.reg32
4657 || i.tm.operand_types[op].bitfield.acc))
4658 {
4659 /* Prohibit these changes in the 64bit mode, since the
4660 lowering is more complicated. */
4661 if (flag_code == CODE_64BIT)
4662 {
4663 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4664 register_prefix, i.op[op].regs->reg_name,
4665 i.suffix);
4666 return 0;
4667 }
4668 #if REGISTER_WARNINGS
4669 else
4670 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4671 register_prefix,
4672 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4673 register_prefix,
4674 i.op[op].regs->reg_name,
4675 i.suffix);
4676 #endif
4677 }
4678 /* Warn if the r prefix on a general reg is missing. */
4679 else if (i.types[op].bitfield.reg64
4680 && (i.tm.operand_types[op].bitfield.reg32
4681 || i.tm.operand_types[op].bitfield.acc))
4682 {
4683 if (intel_syntax
4684 && i.tm.opcode_modifier.toqword
4685 && !i.types[0].bitfield.regxmm)
4686 {
4687 /* Convert to QWORD. We want REX byte. */
4688 i.suffix = QWORD_MNEM_SUFFIX;
4689 }
4690 else
4691 {
4692 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4693 register_prefix, i.op[op].regs->reg_name,
4694 i.suffix);
4695 return 0;
4696 }
4697 }
4698 return 1;
4699 }
4700
4701 static int
4702 check_qword_reg (void)
4703 {
4704 int op;
4705
4706 for (op = i.operands; --op >= 0; )
4707 /* Reject eight bit registers, except where the template requires
4708 them. (eg. movzb) */
4709 if (i.types[op].bitfield.reg8
4710 && (i.tm.operand_types[op].bitfield.reg16
4711 || i.tm.operand_types[op].bitfield.reg32
4712 || i.tm.operand_types[op].bitfield.acc))
4713 {
4714 as_bad (_("`%s%s' not allowed with `%s%c'"),
4715 register_prefix,
4716 i.op[op].regs->reg_name,
4717 i.tm.name,
4718 i.suffix);
4719 return 0;
4720 }
4721 /* Warn if the e prefix on a general reg is missing. */
4722 else if ((i.types[op].bitfield.reg16
4723 || i.types[op].bitfield.reg32)
4724 && (i.tm.operand_types[op].bitfield.reg32
4725 || i.tm.operand_types[op].bitfield.acc))
4726 {
4727 /* Prohibit these changes in the 64bit mode, since the
4728 lowering is more complicated. */
4729 if (intel_syntax
4730 && i.tm.opcode_modifier.todword
4731 && !i.types[0].bitfield.regxmm)
4732 {
4733 /* Convert to DWORD. We don't want REX byte. */
4734 i.suffix = LONG_MNEM_SUFFIX;
4735 }
4736 else
4737 {
4738 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4739 register_prefix, i.op[op].regs->reg_name,
4740 i.suffix);
4741 return 0;
4742 }
4743 }
4744 return 1;
4745 }
4746
4747 static int
4748 check_word_reg (void)
4749 {
4750 int op;
4751 for (op = i.operands; --op >= 0;)
4752 /* Reject eight bit registers, except where the template requires
4753 them. (eg. movzb) */
4754 if (i.types[op].bitfield.reg8
4755 && (i.tm.operand_types[op].bitfield.reg16
4756 || i.tm.operand_types[op].bitfield.reg32
4757 || i.tm.operand_types[op].bitfield.acc))
4758 {
4759 as_bad (_("`%s%s' not allowed with `%s%c'"),
4760 register_prefix,
4761 i.op[op].regs->reg_name,
4762 i.tm.name,
4763 i.suffix);
4764 return 0;
4765 }
4766 /* Warn if the e prefix on a general reg is present. */
4767 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4768 && i.types[op].bitfield.reg32
4769 && (i.tm.operand_types[op].bitfield.reg16
4770 || i.tm.operand_types[op].bitfield.acc))
4771 {
4772 /* Prohibit these changes in the 64bit mode, since the
4773 lowering is more complicated. */
4774 if (flag_code == CODE_64BIT)
4775 {
4776 as_bad (_("Incorrect register `%s%s' used with `%c' suffix"),
4777 register_prefix, i.op[op].regs->reg_name,
4778 i.suffix);
4779 return 0;
4780 }
4781 else
4782 #if REGISTER_WARNINGS
4783 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4784 register_prefix,
4785 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4786 register_prefix,
4787 i.op[op].regs->reg_name,
4788 i.suffix);
4789 #endif
4790 }
4791 return 1;
4792 }
4793
4794 static int
4795 update_imm (unsigned int j)
4796 {
4797 i386_operand_type overlap = i.types[j];
4798 if ((overlap.bitfield.imm8
4799 || overlap.bitfield.imm8s
4800 || overlap.bitfield.imm16
4801 || overlap.bitfield.imm32
4802 || overlap.bitfield.imm32s
4803 || overlap.bitfield.imm64)
4804 && !operand_type_equal (&overlap, &imm8)
4805 && !operand_type_equal (&overlap, &imm8s)
4806 && !operand_type_equal (&overlap, &imm16)
4807 && !operand_type_equal (&overlap, &imm32)
4808 && !operand_type_equal (&overlap, &imm32s)
4809 && !operand_type_equal (&overlap, &imm64))
4810 {
4811 if (i.suffix)
4812 {
4813 i386_operand_type temp;
4814
4815 operand_type_set (&temp, 0);
4816 if (i.suffix == BYTE_MNEM_SUFFIX)
4817 {
4818 temp.bitfield.imm8 = overlap.bitfield.imm8;
4819 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4820 }
4821 else if (i.suffix == WORD_MNEM_SUFFIX)
4822 temp.bitfield.imm16 = overlap.bitfield.imm16;
4823 else if (i.suffix == QWORD_MNEM_SUFFIX)
4824 {
4825 temp.bitfield.imm64 = overlap.bitfield.imm64;
4826 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4827 }
4828 else
4829 temp.bitfield.imm32 = overlap.bitfield.imm32;
4830 overlap = temp;
4831 }
4832 else if (operand_type_equal (&overlap, &imm16_32_32s)
4833 || operand_type_equal (&overlap, &imm16_32)
4834 || operand_type_equal (&overlap, &imm16_32s))
4835 {
4836 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4837 overlap = imm16;
4838 else
4839 overlap = imm32s;
4840 }
4841 if (!operand_type_equal (&overlap, &imm8)
4842 && !operand_type_equal (&overlap, &imm8s)
4843 && !operand_type_equal (&overlap, &imm16)
4844 && !operand_type_equal (&overlap, &imm32)
4845 && !operand_type_equal (&overlap, &imm32s)
4846 && !operand_type_equal (&overlap, &imm64))
4847 {
4848 as_bad (_("no instruction mnemonic suffix given; "
4849 "can't determine immediate size"));
4850 return 0;
4851 }
4852 }
4853 i.types[j] = overlap;
4854
4855 return 1;
4856 }
4857
4858 static int
4859 finalize_imm (void)
4860 {
4861 unsigned int j, n;
4862
4863 /* Update the first 2 immediate operands. */
4864 n = i.operands > 2 ? 2 : i.operands;
4865 if (n)
4866 {
4867 for (j = 0; j < n; j++)
4868 if (update_imm (j) == 0)
4869 return 0;
4870
4871 /* The 3rd operand can't be immediate operand. */
4872 gas_assert (operand_type_check (i.types[2], imm) == 0);
4873 }
4874
4875 return 1;
4876 }
4877
4878 static int
4879 bad_implicit_operand (int xmm)
4880 {
4881 const char *ireg = xmm ? "xmm0" : "ymm0";
4882
4883 if (intel_syntax)
4884 as_bad (_("the last operand of `%s' must be `%s%s'"),
4885 i.tm.name, register_prefix, ireg);
4886 else
4887 as_bad (_("the first operand of `%s' must be `%s%s'"),
4888 i.tm.name, register_prefix, ireg);
4889 return 0;
4890 }
4891
4892 static int
4893 process_operands (void)
4894 {
4895 /* Default segment register this instruction will use for memory
4896 accesses. 0 means unknown. This is only for optimizing out
4897 unnecessary segment overrides. */
4898 const seg_entry *default_seg = 0;
4899
4900 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
4901 {
4902 unsigned int dupl = i.operands;
4903 unsigned int dest = dupl - 1;
4904 unsigned int j;
4905
4906 /* The destination must be an xmm register. */
4907 gas_assert (i.reg_operands
4908 && MAX_OPERANDS > dupl
4909 && operand_type_equal (&i.types[dest], &regxmm));
4910
4911 if (i.tm.opcode_modifier.firstxmm0)
4912 {
4913 /* The first operand is implicit and must be xmm0. */
4914 gas_assert (operand_type_equal (&i.types[0], &regxmm));
4915 if (i.op[0].regs->reg_num != 0)
4916 return bad_implicit_operand (1);
4917
4918 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4919 {
4920 /* Keep xmm0 for instructions with VEX prefix and 3
4921 sources. */
4922 goto duplicate;
4923 }
4924 else
4925 {
4926 /* We remove the first xmm0 and keep the number of
4927 operands unchanged, which in fact duplicates the
4928 destination. */
4929 for (j = 1; j < i.operands; j++)
4930 {
4931 i.op[j - 1] = i.op[j];
4932 i.types[j - 1] = i.types[j];
4933 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
4934 }
4935 }
4936 }
4937 else if (i.tm.opcode_modifier.implicit1stxmm0)
4938 {
4939 gas_assert ((MAX_OPERANDS - 1) > dupl
4940 && (i.tm.opcode_modifier.vexsources
4941 == VEX3SOURCES));
4942
4943 /* Add the implicit xmm0 for instructions with VEX prefix
4944 and 3 sources. */
4945 for (j = i.operands; j > 0; j--)
4946 {
4947 i.op[j] = i.op[j - 1];
4948 i.types[j] = i.types[j - 1];
4949 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
4950 }
4951 i.op[0].regs
4952 = (const reg_entry *) hash_find (reg_hash, "xmm0");
4953 i.types[0] = regxmm;
4954 i.tm.operand_types[0] = regxmm;
4955
4956 i.operands += 2;
4957 i.reg_operands += 2;
4958 i.tm.operands += 2;
4959
4960 dupl++;
4961 dest++;
4962 i.op[dupl] = i.op[dest];
4963 i.types[dupl] = i.types[dest];
4964 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4965 }
4966 else
4967 {
4968 duplicate:
4969 i.operands++;
4970 i.reg_operands++;
4971 i.tm.operands++;
4972
4973 i.op[dupl] = i.op[dest];
4974 i.types[dupl] = i.types[dest];
4975 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
4976 }
4977
4978 if (i.tm.opcode_modifier.immext)
4979 process_immext ();
4980 }
4981 else if (i.tm.opcode_modifier.firstxmm0)
4982 {
4983 unsigned int j;
4984
4985 /* The first operand is implicit and must be xmm0/ymm0. */
4986 gas_assert (i.reg_operands
4987 && (operand_type_equal (&i.types[0], &regxmm)
4988 || operand_type_equal (&i.types[0], &regymm)));
4989 if (i.op[0].regs->reg_num != 0)
4990 return bad_implicit_operand (i.types[0].bitfield.regxmm);
4991
4992 for (j = 1; j < i.operands; j++)
4993 {
4994 i.op[j - 1] = i.op[j];
4995 i.types[j - 1] = i.types[j];
4996
4997 /* We need to adjust fields in i.tm since they are used by
4998 build_modrm_byte. */
4999 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5000 }
5001
5002 i.operands--;
5003 i.reg_operands--;
5004 i.tm.operands--;
5005 }
5006 else if (i.tm.opcode_modifier.regkludge)
5007 {
5008 /* The imul $imm, %reg instruction is converted into
5009 imul $imm, %reg, %reg, and the clr %reg instruction
5010 is converted into xor %reg, %reg. */
5011
5012 unsigned int first_reg_op;
5013
5014 if (operand_type_check (i.types[0], reg))
5015 first_reg_op = 0;
5016 else
5017 first_reg_op = 1;
5018 /* Pretend we saw the extra register operand. */
5019 gas_assert (i.reg_operands == 1
5020 && i.op[first_reg_op + 1].regs == 0);
5021 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5022 i.types[first_reg_op + 1] = i.types[first_reg_op];
5023 i.operands++;
5024 i.reg_operands++;
5025 }
5026
5027 if (i.tm.opcode_modifier.shortform)
5028 {
5029 if (i.types[0].bitfield.sreg2
5030 || i.types[0].bitfield.sreg3)
5031 {
5032 if (i.tm.base_opcode == POP_SEG_SHORT
5033 && i.op[0].regs->reg_num == 1)
5034 {
5035 as_bad (_("you can't `pop %scs'"), register_prefix);
5036 return 0;
5037 }
5038 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5039 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5040 i.rex |= REX_B;
5041 }
5042 else
5043 {
5044 /* The register or float register operand is in operand
5045 0 or 1. */
5046 unsigned int op;
5047
5048 if (i.types[0].bitfield.floatreg
5049 || operand_type_check (i.types[0], reg))
5050 op = 0;
5051 else
5052 op = 1;
5053 /* Register goes in low 3 bits of opcode. */
5054 i.tm.base_opcode |= i.op[op].regs->reg_num;
5055 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5056 i.rex |= REX_B;
5057 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5058 {
5059 /* Warn about some common errors, but press on regardless.
5060 The first case can be generated by gcc (<= 2.8.1). */
5061 if (i.operands == 2)
5062 {
5063 /* Reversed arguments on faddp, fsubp, etc. */
5064 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5065 register_prefix, i.op[!intel_syntax].regs->reg_name,
5066 register_prefix, i.op[intel_syntax].regs->reg_name);
5067 }
5068 else
5069 {
5070 /* Extraneous `l' suffix on fp insn. */
5071 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5072 register_prefix, i.op[0].regs->reg_name);
5073 }
5074 }
5075 }
5076 }
5077 else if (i.tm.opcode_modifier.modrm)
5078 {
5079 /* The opcode is completed (modulo i.tm.extension_opcode which
5080 must be put into the modrm byte). Now, we make the modrm and
5081 index base bytes based on all the info we've collected. */
5082
5083 default_seg = build_modrm_byte ();
5084 }
5085 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5086 {
5087 default_seg = &ds;
5088 }
5089 else if (i.tm.opcode_modifier.isstring)
5090 {
5091 /* For the string instructions that allow a segment override
5092 on one of their operands, the default segment is ds. */
5093 default_seg = &ds;
5094 }
5095
5096 if (i.tm.base_opcode == 0x8d /* lea */
5097 && i.seg[0]
5098 && !quiet_warnings)
5099 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5100
5101 /* If a segment was explicitly specified, and the specified segment
5102 is not the default, use an opcode prefix to select it. If we
5103 never figured out what the default segment is, then default_seg
5104 will be zero at this point, and the specified segment prefix will
5105 always be used. */
5106 if ((i.seg[0]) && (i.seg[0] != default_seg))
5107 {
5108 if (!add_prefix (i.seg[0]->seg_prefix))
5109 return 0;
5110 }
5111 return 1;
5112 }
5113
5114 static const seg_entry *
5115 build_modrm_byte (void)
5116 {
5117 const seg_entry *default_seg = 0;
5118 unsigned int source, dest;
5119 int vex_3_sources;
5120
5121 /* The first operand of instructions with VEX prefix and 3 sources
5122 must be VEX_Imm4. */
5123 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5124 if (vex_3_sources)
5125 {
5126 unsigned int nds, reg_slot;
5127 expressionS *exp;
5128
5129 if (i.tm.opcode_modifier.veximmext
5130 && i.tm.opcode_modifier.immext)
5131 {
5132 dest = i.operands - 2;
5133 gas_assert (dest == 3);
5134 }
5135 else
5136 dest = i.operands - 1;
5137 nds = dest - 1;
5138
5139 /* There are 2 kinds of instructions:
5140 1. 5 operands: 4 register operands or 3 register operands
5141 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5142 VexW0 or VexW1. The destination must be either XMM or YMM
5143 register.
5144 2. 4 operands: 4 register operands or 3 register operands
5145 plus 1 memory operand, VexXDS, and VexImmExt */
5146 gas_assert ((i.reg_operands == 4
5147 || (i.reg_operands == 3 && i.mem_operands == 1))
5148 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5149 && (i.tm.opcode_modifier.veximmext
5150 || (i.imm_operands == 1
5151 && i.types[0].bitfield.vec_imm4
5152 && (i.tm.opcode_modifier.vexw == VEXW0
5153 || i.tm.opcode_modifier.vexw == VEXW1)
5154 && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
5155 || operand_type_equal (&i.tm.operand_types[dest], &regymm)))));
5156
5157 if (i.imm_operands == 0)
5158 {
5159 /* When there is no immediate operand, generate an 8bit
5160 immediate operand to encode the first operand. */
5161 exp = &im_expressions[i.imm_operands++];
5162 i.op[i.operands].imms = exp;
5163 i.types[i.operands] = imm8;
5164 i.operands++;
5165 /* If VexW1 is set, the first operand is the source and
5166 the second operand is encoded in the immediate operand. */
5167 if (i.tm.opcode_modifier.vexw == VEXW1)
5168 {
5169 source = 0;
5170 reg_slot = 1;
5171 }
5172 else
5173 {
5174 source = 1;
5175 reg_slot = 0;
5176 }
5177
5178 /* FMA swaps REG and NDS. */
5179 if (i.tm.cpu_flags.bitfield.cpufma)
5180 {
5181 unsigned int tmp;
5182 tmp = reg_slot;
5183 reg_slot = nds;
5184 nds = tmp;
5185 }
5186
5187 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5188 &regxmm)
5189 || operand_type_equal (&i.tm.operand_types[reg_slot],
5190 &regymm));
5191 exp->X_op = O_constant;
5192 exp->X_add_number
5193 = ((i.op[reg_slot].regs->reg_num
5194 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5195 << 4);
5196 }
5197 else
5198 {
5199 unsigned int imm_slot;
5200
5201 if (i.tm.opcode_modifier.vexw == VEXW0)
5202 {
5203 /* If VexW0 is set, the third operand is the source and
5204 the second operand is encoded in the immediate
5205 operand. */
5206 source = 2;
5207 reg_slot = 1;
5208 }
5209 else
5210 {
5211 /* VexW1 is set, the second operand is the source and
5212 the third operand is encoded in the immediate
5213 operand. */
5214 source = 1;
5215 reg_slot = 2;
5216 }
5217
5218 if (i.tm.opcode_modifier.immext)
5219 {
5220 /* When ImmExt is set, the immdiate byte is the last
5221 operand. */
5222 imm_slot = i.operands - 1;
5223 source--;
5224 reg_slot--;
5225 }
5226 else
5227 {
5228 imm_slot = 0;
5229
5230 /* Turn on Imm8 so that output_imm will generate it. */
5231 i.types[imm_slot].bitfield.imm8 = 1;
5232 }
5233
5234 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5235 &regxmm)
5236 || operand_type_equal (&i.tm.operand_types[reg_slot],
5237 &regymm));
5238 i.op[imm_slot].imms->X_add_number
5239 |= ((i.op[reg_slot].regs->reg_num
5240 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5241 << 4);
5242 }
5243
5244 gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
5245 || operand_type_equal (&i.tm.operand_types[nds],
5246 &regymm));
5247 i.vex.register_specifier = i.op[nds].regs;
5248 }
5249 else
5250 source = dest = 0;
5251
5252 /* i.reg_operands MUST be the number of real register operands;
5253 implicit registers do not count. If there are 3 register
5254 operands, it must be a instruction with VexNDS. For a
5255 instruction with VexNDD, the destination register is encoded
5256 in VEX prefix. If there are 4 register operands, it must be
5257 a instruction with VEX prefix and 3 sources. */
5258 if (i.mem_operands == 0
5259 && ((i.reg_operands == 2
5260 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5261 || (i.reg_operands == 3
5262 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5263 || (i.reg_operands == 4 && vex_3_sources)))
5264 {
5265 switch (i.operands)
5266 {
5267 case 2:
5268 source = 0;
5269 break;
5270 case 3:
5271 /* When there are 3 operands, one of them may be immediate,
5272 which may be the first or the last operand. Otherwise,
5273 the first operand must be shift count register (cl) or it
5274 is an instruction with VexNDS. */
5275 gas_assert (i.imm_operands == 1
5276 || (i.imm_operands == 0
5277 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5278 || i.types[0].bitfield.shiftcount)));
5279 if (operand_type_check (i.types[0], imm)
5280 || i.types[0].bitfield.shiftcount)
5281 source = 1;
5282 else
5283 source = 0;
5284 break;
5285 case 4:
5286 /* When there are 4 operands, the first two must be 8bit
5287 immediate operands. The source operand will be the 3rd
5288 one.
5289
5290 For instructions with VexNDS, if the first operand
5291 an imm8, the source operand is the 2nd one. If the last
5292 operand is imm8, the source operand is the first one. */
5293 gas_assert ((i.imm_operands == 2
5294 && i.types[0].bitfield.imm8
5295 && i.types[1].bitfield.imm8)
5296 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5297 && i.imm_operands == 1
5298 && (i.types[0].bitfield.imm8
5299 || i.types[i.operands - 1].bitfield.imm8)));
5300 if (i.imm_operands == 2)
5301 source = 2;
5302 else
5303 {
5304 if (i.types[0].bitfield.imm8)
5305 source = 1;
5306 else
5307 source = 0;
5308 }
5309 break;
5310 case 5:
5311 break;
5312 default:
5313 abort ();
5314 }
5315
5316 if (!vex_3_sources)
5317 {
5318 dest = source + 1;
5319
5320 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5321 {
5322 /* For instructions with VexNDS, the register-only
5323 source operand must be 32/64bit integer, XMM or
5324 YMM register. It is encoded in VEX prefix. We
5325 need to clear RegMem bit before calling
5326 operand_type_equal. */
5327
5328 i386_operand_type op;
5329 unsigned int vvvv;
5330
5331 /* Check register-only source operand when two source
5332 operands are swapped. */
5333 if (!i.tm.operand_types[source].bitfield.baseindex
5334 && i.tm.operand_types[dest].bitfield.baseindex)
5335 {
5336 vvvv = source;
5337 source = dest;
5338 }
5339 else
5340 vvvv = dest;
5341
5342 op = i.tm.operand_types[vvvv];
5343 op.bitfield.regmem = 0;
5344 if ((dest + 1) >= i.operands
5345 || (op.bitfield.reg32 != 1
5346 && !op.bitfield.reg64 != 1
5347 && !operand_type_equal (&op, &regxmm)
5348 && !operand_type_equal (&op, &regymm)))
5349 abort ();
5350 i.vex.register_specifier = i.op[vvvv].regs;
5351 dest++;
5352 }
5353 }
5354
5355 i.rm.mode = 3;
5356 /* One of the register operands will be encoded in the i.tm.reg
5357 field, the other in the combined i.tm.mode and i.tm.regmem
5358 fields. If no form of this instruction supports a memory
5359 destination operand, then we assume the source operand may
5360 sometimes be a memory operand and so we need to store the
5361 destination in the i.rm.reg field. */
5362 if (!i.tm.operand_types[dest].bitfield.regmem
5363 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5364 {
5365 i.rm.reg = i.op[dest].regs->reg_num;
5366 i.rm.regmem = i.op[source].regs->reg_num;
5367 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5368 i.rex |= REX_R;
5369 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5370 i.rex |= REX_B;
5371 }
5372 else
5373 {
5374 i.rm.reg = i.op[source].regs->reg_num;
5375 i.rm.regmem = i.op[dest].regs->reg_num;
5376 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5377 i.rex |= REX_B;
5378 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5379 i.rex |= REX_R;
5380 }
5381 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5382 {
5383 if (!i.types[0].bitfield.control
5384 && !i.types[1].bitfield.control)
5385 abort ();
5386 i.rex &= ~(REX_R | REX_B);
5387 add_prefix (LOCK_PREFIX_OPCODE);
5388 }
5389 }
5390 else
5391 { /* If it's not 2 reg operands... */
5392 unsigned int mem;
5393
5394 if (i.mem_operands)
5395 {
5396 unsigned int fake_zero_displacement = 0;
5397 unsigned int op;
5398
5399 for (op = 0; op < i.operands; op++)
5400 if (operand_type_check (i.types[op], anymem))
5401 break;
5402 gas_assert (op < i.operands);
5403
5404 default_seg = &ds;
5405
5406 if (i.base_reg == 0)
5407 {
5408 i.rm.mode = 0;
5409 if (!i.disp_operands)
5410 fake_zero_displacement = 1;
5411 if (i.index_reg == 0)
5412 {
5413 /* Operand is just <disp> */
5414 if (flag_code == CODE_64BIT)
5415 {
5416 /* 64bit mode overwrites the 32bit absolute
5417 addressing by RIP relative addressing and
5418 absolute addressing is encoded by one of the
5419 redundant SIB forms. */
5420 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5421 i.sib.base = NO_BASE_REGISTER;
5422 i.sib.index = NO_INDEX_REGISTER;
5423 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5424 ? disp32s : disp32);
5425 }
5426 else if ((flag_code == CODE_16BIT)
5427 ^ (i.prefix[ADDR_PREFIX] != 0))
5428 {
5429 i.rm.regmem = NO_BASE_REGISTER_16;
5430 i.types[op] = disp16;
5431 }
5432 else
5433 {
5434 i.rm.regmem = NO_BASE_REGISTER;
5435 i.types[op] = disp32;
5436 }
5437 }
5438 else /* !i.base_reg && i.index_reg */
5439 {
5440 if (i.index_reg->reg_num == RegEiz
5441 || i.index_reg->reg_num == RegRiz)
5442 i.sib.index = NO_INDEX_REGISTER;
5443 else
5444 i.sib.index = i.index_reg->reg_num;
5445 i.sib.base = NO_BASE_REGISTER;
5446 i.sib.scale = i.log2_scale_factor;
5447 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5448 i.types[op].bitfield.disp8 = 0;
5449 i.types[op].bitfield.disp16 = 0;
5450 i.types[op].bitfield.disp64 = 0;
5451 if (flag_code != CODE_64BIT)
5452 {
5453 /* Must be 32 bit */
5454 i.types[op].bitfield.disp32 = 1;
5455 i.types[op].bitfield.disp32s = 0;
5456 }
5457 else
5458 {
5459 i.types[op].bitfield.disp32 = 0;
5460 i.types[op].bitfield.disp32s = 1;
5461 }
5462 if ((i.index_reg->reg_flags & RegRex) != 0)
5463 i.rex |= REX_X;
5464 }
5465 }
5466 /* RIP addressing for 64bit mode. */
5467 else if (i.base_reg->reg_num == RegRip ||
5468 i.base_reg->reg_num == RegEip)
5469 {
5470 i.rm.regmem = NO_BASE_REGISTER;
5471 i.types[op].bitfield.disp8 = 0;
5472 i.types[op].bitfield.disp16 = 0;
5473 i.types[op].bitfield.disp32 = 0;
5474 i.types[op].bitfield.disp32s = 1;
5475 i.types[op].bitfield.disp64 = 0;
5476 i.flags[op] |= Operand_PCrel;
5477 if (! i.disp_operands)
5478 fake_zero_displacement = 1;
5479 }
5480 else if (i.base_reg->reg_type.bitfield.reg16)
5481 {
5482 switch (i.base_reg->reg_num)
5483 {
5484 case 3: /* (%bx) */
5485 if (i.index_reg == 0)
5486 i.rm.regmem = 7;
5487 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5488 i.rm.regmem = i.index_reg->reg_num - 6;
5489 break;
5490 case 5: /* (%bp) */
5491 default_seg = &ss;
5492 if (i.index_reg == 0)
5493 {
5494 i.rm.regmem = 6;
5495 if (operand_type_check (i.types[op], disp) == 0)
5496 {
5497 /* fake (%bp) into 0(%bp) */
5498 i.types[op].bitfield.disp8 = 1;
5499 fake_zero_displacement = 1;
5500 }
5501 }
5502 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5503 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5504 break;
5505 default: /* (%si) -> 4 or (%di) -> 5 */
5506 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5507 }
5508 i.rm.mode = mode_from_disp_size (i.types[op]);
5509 }
5510 else /* i.base_reg and 32/64 bit mode */
5511 {
5512 if (flag_code == CODE_64BIT
5513 && operand_type_check (i.types[op], disp))
5514 {
5515 i386_operand_type temp;
5516 operand_type_set (&temp, 0);
5517 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5518 i.types[op] = temp;
5519 if (i.prefix[ADDR_PREFIX] == 0)
5520 i.types[op].bitfield.disp32s = 1;
5521 else
5522 i.types[op].bitfield.disp32 = 1;
5523 }
5524
5525 i.rm.regmem = i.base_reg->reg_num;
5526 if ((i.base_reg->reg_flags & RegRex) != 0)
5527 i.rex |= REX_B;
5528 i.sib.base = i.base_reg->reg_num;
5529 /* x86-64 ignores REX prefix bit here to avoid decoder
5530 complications. */
5531 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5532 {
5533 default_seg = &ss;
5534 if (i.disp_operands == 0)
5535 {
5536 fake_zero_displacement = 1;
5537 i.types[op].bitfield.disp8 = 1;
5538 }
5539 }
5540 else if (i.base_reg->reg_num == ESP_REG_NUM)
5541 {
5542 default_seg = &ss;
5543 }
5544 i.sib.scale = i.log2_scale_factor;
5545 if (i.index_reg == 0)
5546 {
5547 /* <disp>(%esp) becomes two byte modrm with no index
5548 register. We've already stored the code for esp
5549 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5550 Any base register besides %esp will not use the
5551 extra modrm byte. */
5552 i.sib.index = NO_INDEX_REGISTER;
5553 }
5554 else
5555 {
5556 if (i.index_reg->reg_num == RegEiz
5557 || i.index_reg->reg_num == RegRiz)
5558 i.sib.index = NO_INDEX_REGISTER;
5559 else
5560 i.sib.index = i.index_reg->reg_num;
5561 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5562 if ((i.index_reg->reg_flags & RegRex) != 0)
5563 i.rex |= REX_X;
5564 }
5565
5566 if (i.disp_operands
5567 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5568 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5569 i.rm.mode = 0;
5570 else
5571 i.rm.mode = mode_from_disp_size (i.types[op]);
5572 }
5573
5574 if (fake_zero_displacement)
5575 {
5576 /* Fakes a zero displacement assuming that i.types[op]
5577 holds the correct displacement size. */
5578 expressionS *exp;
5579
5580 gas_assert (i.op[op].disps == 0);
5581 exp = &disp_expressions[i.disp_operands++];
5582 i.op[op].disps = exp;
5583 exp->X_op = O_constant;
5584 exp->X_add_number = 0;
5585 exp->X_add_symbol = (symbolS *) 0;
5586 exp->X_op_symbol = (symbolS *) 0;
5587 }
5588
5589 mem = op;
5590 }
5591 else
5592 mem = ~0;
5593
5594 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5595 {
5596 if (operand_type_check (i.types[0], imm))
5597 i.vex.register_specifier = NULL;
5598 else
5599 {
5600 /* VEX.vvvv encodes one of the sources when the first
5601 operand is not an immediate. */
5602 if (i.tm.opcode_modifier.vexw == VEXW0)
5603 i.vex.register_specifier = i.op[0].regs;
5604 else
5605 i.vex.register_specifier = i.op[1].regs;
5606 }
5607
5608 /* Destination is a XMM register encoded in the ModRM.reg
5609 and VEX.R bit. */
5610 i.rm.reg = i.op[2].regs->reg_num;
5611 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5612 i.rex |= REX_R;
5613
5614 /* ModRM.rm and VEX.B encodes the other source. */
5615 if (!i.mem_operands)
5616 {
5617 i.rm.mode = 3;
5618
5619 if (i.tm.opcode_modifier.vexw == VEXW0)
5620 i.rm.regmem = i.op[1].regs->reg_num;
5621 else
5622 i.rm.regmem = i.op[0].regs->reg_num;
5623
5624 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5625 i.rex |= REX_B;
5626 }
5627 }
5628 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5629 {
5630 i.vex.register_specifier = i.op[2].regs;
5631 if (!i.mem_operands)
5632 {
5633 i.rm.mode = 3;
5634 i.rm.regmem = i.op[1].regs->reg_num;
5635 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5636 i.rex |= REX_B;
5637 }
5638 }
5639 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5640 (if any) based on i.tm.extension_opcode. Again, we must be
5641 careful to make sure that segment/control/debug/test/MMX
5642 registers are coded into the i.rm.reg field. */
5643 else if (i.reg_operands)
5644 {
5645 unsigned int op;
5646 unsigned int vex_reg = ~0;
5647
5648 for (op = 0; op < i.operands; op++)
5649 if (i.types[op].bitfield.reg8
5650 || i.types[op].bitfield.reg16
5651 || i.types[op].bitfield.reg32
5652 || i.types[op].bitfield.reg64
5653 || i.types[op].bitfield.regmmx
5654 || i.types[op].bitfield.regxmm
5655 || i.types[op].bitfield.regymm
5656 || i.types[op].bitfield.sreg2
5657 || i.types[op].bitfield.sreg3
5658 || i.types[op].bitfield.control
5659 || i.types[op].bitfield.debug
5660 || i.types[op].bitfield.test)
5661 break;
5662
5663 if (vex_3_sources)
5664 op = dest;
5665 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5666 {
5667 /* For instructions with VexNDS, the register-only
5668 source operand is encoded in VEX prefix. */
5669 gas_assert (mem != (unsigned int) ~0);
5670
5671 if (op > mem)
5672 {
5673 vex_reg = op++;
5674 gas_assert (op < i.operands);
5675 }
5676 else
5677 {
5678 /* Check register-only source operand when two source
5679 operands are swapped. */
5680 if (!i.tm.operand_types[op].bitfield.baseindex
5681 && i.tm.operand_types[op + 1].bitfield.baseindex)
5682 {
5683 vex_reg = op;
5684 op += 2;
5685 gas_assert (mem == (vex_reg + 1)
5686 && op < i.operands);
5687 }
5688 else
5689 {
5690 vex_reg = op + 1;
5691 gas_assert (vex_reg < i.operands);
5692 }
5693 }
5694 }
5695 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5696 {
5697 /* For instructions with VexNDD, the register destination
5698 is encoded in VEX prefix. */
5699 if (i.mem_operands == 0)
5700 {
5701 /* There is no memory operand. */
5702 gas_assert ((op + 2) == i.operands);
5703 vex_reg = op + 1;
5704 }
5705 else
5706 {
5707 /* There are only 2 operands. */
5708 gas_assert (op < 2 && i.operands == 2);
5709 vex_reg = 1;
5710 }
5711 }
5712 else
5713 gas_assert (op < i.operands);
5714
5715 if (vex_reg != (unsigned int) ~0)
5716 {
5717 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5718
5719 if (type->bitfield.reg32 != 1
5720 && type->bitfield.reg64 != 1
5721 && !operand_type_equal (type, &regxmm)
5722 && !operand_type_equal (type, &regymm))
5723 abort ();
5724
5725 i.vex.register_specifier = i.op[vex_reg].regs;
5726 }
5727
5728 /* Don't set OP operand twice. */
5729 if (vex_reg != op)
5730 {
5731 /* If there is an extension opcode to put here, the
5732 register number must be put into the regmem field. */
5733 if (i.tm.extension_opcode != None)
5734 {
5735 i.rm.regmem = i.op[op].regs->reg_num;
5736 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5737 i.rex |= REX_B;
5738 }
5739 else
5740 {
5741 i.rm.reg = i.op[op].regs->reg_num;
5742 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5743 i.rex |= REX_R;
5744 }
5745 }
5746
5747 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5748 must set it to 3 to indicate this is a register operand
5749 in the regmem field. */
5750 if (!i.mem_operands)
5751 i.rm.mode = 3;
5752 }
5753
5754 /* Fill in i.rm.reg field with extension opcode (if any). */
5755 if (i.tm.extension_opcode != None)
5756 i.rm.reg = i.tm.extension_opcode;
5757 }
5758 return default_seg;
5759 }
5760
5761 static void
5762 output_branch (void)
5763 {
5764 char *p;
5765 int size;
5766 int code16;
5767 int prefix;
5768 relax_substateT subtype;
5769 symbolS *sym;
5770 offsetT off;
5771
5772 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5773 size = i.disp32_encoding ? BIG : SMALL;
5774
5775 prefix = 0;
5776 if (i.prefix[DATA_PREFIX] != 0)
5777 {
5778 prefix = 1;
5779 i.prefixes -= 1;
5780 code16 ^= CODE16;
5781 }
5782 /* Pentium4 branch hints. */
5783 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5784 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5785 {
5786 prefix++;
5787 i.prefixes--;
5788 }
5789 if (i.prefix[REX_PREFIX] != 0)
5790 {
5791 prefix++;
5792 i.prefixes--;
5793 }
5794
5795 if (i.prefixes != 0 && !intel_syntax)
5796 as_warn (_("skipping prefixes on this instruction"));
5797
5798 /* It's always a symbol; End frag & setup for relax.
5799 Make sure there is enough room in this frag for the largest
5800 instruction we may generate in md_convert_frag. This is 2
5801 bytes for the opcode and room for the prefix and largest
5802 displacement. */
5803 frag_grow (prefix + 2 + 4);
5804 /* Prefix and 1 opcode byte go in fr_fix. */
5805 p = frag_more (prefix + 1);
5806 if (i.prefix[DATA_PREFIX] != 0)
5807 *p++ = DATA_PREFIX_OPCODE;
5808 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
5809 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
5810 *p++ = i.prefix[SEG_PREFIX];
5811 if (i.prefix[REX_PREFIX] != 0)
5812 *p++ = i.prefix[REX_PREFIX];
5813 *p = i.tm.base_opcode;
5814
5815 if ((unsigned char) *p == JUMP_PC_RELATIVE)
5816 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
5817 else if (cpu_arch_flags.bitfield.cpui386)
5818 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
5819 else
5820 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
5821 subtype |= code16;
5822
5823 sym = i.op[0].disps->X_add_symbol;
5824 off = i.op[0].disps->X_add_number;
5825
5826 if (i.op[0].disps->X_op != O_constant
5827 && i.op[0].disps->X_op != O_symbol)
5828 {
5829 /* Handle complex expressions. */
5830 sym = make_expr_symbol (i.op[0].disps);
5831 off = 0;
5832 }
5833
5834 /* 1 possible extra opcode + 4 byte displacement go in var part.
5835 Pass reloc in fr_var. */
5836 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
5837 }
5838
5839 static void
5840 output_jump (void)
5841 {
5842 char *p;
5843 int size;
5844 fixS *fixP;
5845
5846 if (i.tm.opcode_modifier.jumpbyte)
5847 {
5848 /* This is a loop or jecxz type instruction. */
5849 size = 1;
5850 if (i.prefix[ADDR_PREFIX] != 0)
5851 {
5852 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
5853 i.prefixes -= 1;
5854 }
5855 /* Pentium4 branch hints. */
5856 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5857 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
5858 {
5859 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
5860 i.prefixes--;
5861 }
5862 }
5863 else
5864 {
5865 int code16;
5866
5867 code16 = 0;
5868 if (flag_code == CODE_16BIT)
5869 code16 = CODE16;
5870
5871 if (i.prefix[DATA_PREFIX] != 0)
5872 {
5873 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
5874 i.prefixes -= 1;
5875 code16 ^= CODE16;
5876 }
5877
5878 size = 4;
5879 if (code16)
5880 size = 2;
5881 }
5882
5883 if (i.prefix[REX_PREFIX] != 0)
5884 {
5885 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
5886 i.prefixes -= 1;
5887 }
5888
5889 if (i.prefixes != 0 && !intel_syntax)
5890 as_warn (_("skipping prefixes on this instruction"));
5891
5892 p = frag_more (1 + size);
5893 *p++ = i.tm.base_opcode;
5894
5895 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5896 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
5897
5898 /* All jumps handled here are signed, but don't use a signed limit
5899 check for 32 and 16 bit jumps as we want to allow wrap around at
5900 4G and 64k respectively. */
5901 if (size == 1)
5902 fixP->fx_signed = 1;
5903 }
5904
5905 static void
5906 output_interseg_jump (void)
5907 {
5908 char *p;
5909 int size;
5910 int prefix;
5911 int code16;
5912
5913 code16 = 0;
5914 if (flag_code == CODE_16BIT)
5915 code16 = CODE16;
5916
5917 prefix = 0;
5918 if (i.prefix[DATA_PREFIX] != 0)
5919 {
5920 prefix = 1;
5921 i.prefixes -= 1;
5922 code16 ^= CODE16;
5923 }
5924 if (i.prefix[REX_PREFIX] != 0)
5925 {
5926 prefix++;
5927 i.prefixes -= 1;
5928 }
5929
5930 size = 4;
5931 if (code16)
5932 size = 2;
5933
5934 if (i.prefixes != 0 && !intel_syntax)
5935 as_warn (_("skipping prefixes on this instruction"));
5936
5937 /* 1 opcode; 2 segment; offset */
5938 p = frag_more (prefix + 1 + 2 + size);
5939
5940 if (i.prefix[DATA_PREFIX] != 0)
5941 *p++ = DATA_PREFIX_OPCODE;
5942
5943 if (i.prefix[REX_PREFIX] != 0)
5944 *p++ = i.prefix[REX_PREFIX];
5945
5946 *p++ = i.tm.base_opcode;
5947 if (i.op[1].imms->X_op == O_constant)
5948 {
5949 offsetT n = i.op[1].imms->X_add_number;
5950
5951 if (size == 2
5952 && !fits_in_unsigned_word (n)
5953 && !fits_in_signed_word (n))
5954 {
5955 as_bad (_("16-bit jump out of range"));
5956 return;
5957 }
5958 md_number_to_chars (p, n, size);
5959 }
5960 else
5961 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
5962 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
5963 if (i.op[0].imms->X_op != O_constant)
5964 as_bad (_("can't handle non absolute segment in `%s'"),
5965 i.tm.name);
5966 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
5967 }
5968
5969 static void
5970 output_insn (void)
5971 {
5972 fragS *insn_start_frag;
5973 offsetT insn_start_off;
5974
5975 /* Tie dwarf2 debug info to the address at the start of the insn.
5976 We can't do this after the insn has been output as the current
5977 frag may have been closed off. eg. by frag_var. */
5978 dwarf2_emit_insn (0);
5979
5980 insn_start_frag = frag_now;
5981 insn_start_off = frag_now_fix ();
5982
5983 /* Output jumps. */
5984 if (i.tm.opcode_modifier.jump)
5985 output_branch ();
5986 else if (i.tm.opcode_modifier.jumpbyte
5987 || i.tm.opcode_modifier.jumpdword)
5988 output_jump ();
5989 else if (i.tm.opcode_modifier.jumpintersegment)
5990 output_interseg_jump ();
5991 else
5992 {
5993 /* Output normal instructions here. */
5994 char *p;
5995 unsigned char *q;
5996 unsigned int j;
5997 unsigned int prefix;
5998
5999 /* Since the VEX prefix contains the implicit prefix, we don't
6000 need the explicit prefix. */
6001 if (!i.tm.opcode_modifier.vex)
6002 {
6003 switch (i.tm.opcode_length)
6004 {
6005 case 3:
6006 if (i.tm.base_opcode & 0xff000000)
6007 {
6008 prefix = (i.tm.base_opcode >> 24) & 0xff;
6009 goto check_prefix;
6010 }
6011 break;
6012 case 2:
6013 if ((i.tm.base_opcode & 0xff0000) != 0)
6014 {
6015 prefix = (i.tm.base_opcode >> 16) & 0xff;
6016 if (i.tm.cpu_flags.bitfield.cpupadlock)
6017 {
6018 check_prefix:
6019 if (prefix != REPE_PREFIX_OPCODE
6020 || (i.prefix[REP_PREFIX]
6021 != REPE_PREFIX_OPCODE))
6022 add_prefix (prefix);
6023 }
6024 else
6025 add_prefix (prefix);
6026 }
6027 break;
6028 case 1:
6029 break;
6030 default:
6031 abort ();
6032 }
6033
6034 /* The prefix bytes. */
6035 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6036 if (*q)
6037 FRAG_APPEND_1_CHAR (*q);
6038 }
6039
6040 if (i.tm.opcode_modifier.vex)
6041 {
6042 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6043 if (*q)
6044 switch (j)
6045 {
6046 case REX_PREFIX:
6047 /* REX byte is encoded in VEX prefix. */
6048 break;
6049 case SEG_PREFIX:
6050 case ADDR_PREFIX:
6051 FRAG_APPEND_1_CHAR (*q);
6052 break;
6053 default:
6054 /* There should be no other prefixes for instructions
6055 with VEX prefix. */
6056 abort ();
6057 }
6058
6059 /* Now the VEX prefix. */
6060 p = frag_more (i.vex.length);
6061 for (j = 0; j < i.vex.length; j++)
6062 p[j] = i.vex.bytes[j];
6063 }
6064
6065 /* Now the opcode; be careful about word order here! */
6066 if (i.tm.opcode_length == 1)
6067 {
6068 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6069 }
6070 else
6071 {
6072 switch (i.tm.opcode_length)
6073 {
6074 case 3:
6075 p = frag_more (3);
6076 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6077 break;
6078 case 2:
6079 p = frag_more (2);
6080 break;
6081 default:
6082 abort ();
6083 break;
6084 }
6085
6086 /* Put out high byte first: can't use md_number_to_chars! */
6087 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6088 *p = i.tm.base_opcode & 0xff;
6089 }
6090
6091 /* Now the modrm byte and sib byte (if present). */
6092 if (i.tm.opcode_modifier.modrm)
6093 {
6094 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6095 | i.rm.reg << 3
6096 | i.rm.mode << 6));
6097 /* If i.rm.regmem == ESP (4)
6098 && i.rm.mode != (Register mode)
6099 && not 16 bit
6100 ==> need second modrm byte. */
6101 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6102 && i.rm.mode != 3
6103 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6104 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6105 | i.sib.index << 3
6106 | i.sib.scale << 6));
6107 }
6108
6109 if (i.disp_operands)
6110 output_disp (insn_start_frag, insn_start_off);
6111
6112 if (i.imm_operands)
6113 output_imm (insn_start_frag, insn_start_off);
6114 }
6115
6116 #ifdef DEBUG386
6117 if (flag_debug)
6118 {
6119 pi ("" /*line*/, &i);
6120 }
6121 #endif /* DEBUG386 */
6122 }
6123
6124 /* Return the size of the displacement operand N. */
6125
6126 static int
6127 disp_size (unsigned int n)
6128 {
6129 int size = 4;
6130 if (i.types[n].bitfield.disp64)
6131 size = 8;
6132 else if (i.types[n].bitfield.disp8)
6133 size = 1;
6134 else if (i.types[n].bitfield.disp16)
6135 size = 2;
6136 return size;
6137 }
6138
6139 /* Return the size of the immediate operand N. */
6140
6141 static int
6142 imm_size (unsigned int n)
6143 {
6144 int size = 4;
6145 if (i.types[n].bitfield.imm64)
6146 size = 8;
6147 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6148 size = 1;
6149 else if (i.types[n].bitfield.imm16)
6150 size = 2;
6151 return size;
6152 }
6153
6154 static void
6155 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6156 {
6157 char *p;
6158 unsigned int n;
6159
6160 for (n = 0; n < i.operands; n++)
6161 {
6162 if (operand_type_check (i.types[n], disp))
6163 {
6164 if (i.op[n].disps->X_op == O_constant)
6165 {
6166 int size = disp_size (n);
6167 offsetT val;
6168
6169 val = offset_in_range (i.op[n].disps->X_add_number,
6170 size);
6171 p = frag_more (size);
6172 md_number_to_chars (p, val, size);
6173 }
6174 else
6175 {
6176 enum bfd_reloc_code_real reloc_type;
6177 int size = disp_size (n);
6178 int sign = i.types[n].bitfield.disp32s;
6179 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6180
6181 /* We can't have 8 bit displacement here. */
6182 gas_assert (!i.types[n].bitfield.disp8);
6183
6184 /* The PC relative address is computed relative
6185 to the instruction boundary, so in case immediate
6186 fields follows, we need to adjust the value. */
6187 if (pcrel && i.imm_operands)
6188 {
6189 unsigned int n1;
6190 int sz = 0;
6191
6192 for (n1 = 0; n1 < i.operands; n1++)
6193 if (operand_type_check (i.types[n1], imm))
6194 {
6195 /* Only one immediate is allowed for PC
6196 relative address. */
6197 gas_assert (sz == 0);
6198 sz = imm_size (n1);
6199 i.op[n].disps->X_add_number -= sz;
6200 }
6201 /* We should find the immediate. */
6202 gas_assert (sz != 0);
6203 }
6204
6205 p = frag_more (size);
6206 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6207 if (GOT_symbol
6208 && GOT_symbol == i.op[n].disps->X_add_symbol
6209 && (((reloc_type == BFD_RELOC_32
6210 || reloc_type == BFD_RELOC_X86_64_32S
6211 || (reloc_type == BFD_RELOC_64
6212 && object_64bit))
6213 && (i.op[n].disps->X_op == O_symbol
6214 || (i.op[n].disps->X_op == O_add
6215 && ((symbol_get_value_expression
6216 (i.op[n].disps->X_op_symbol)->X_op)
6217 == O_subtract))))
6218 || reloc_type == BFD_RELOC_32_PCREL))
6219 {
6220 offsetT add;
6221
6222 if (insn_start_frag == frag_now)
6223 add = (p - frag_now->fr_literal) - insn_start_off;
6224 else
6225 {
6226 fragS *fr;
6227
6228 add = insn_start_frag->fr_fix - insn_start_off;
6229 for (fr = insn_start_frag->fr_next;
6230 fr && fr != frag_now; fr = fr->fr_next)
6231 add += fr->fr_fix;
6232 add += p - frag_now->fr_literal;
6233 }
6234
6235 if (!object_64bit)
6236 {
6237 reloc_type = BFD_RELOC_386_GOTPC;
6238 i.op[n].imms->X_add_number += add;
6239 }
6240 else if (reloc_type == BFD_RELOC_64)
6241 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6242 else
6243 /* Don't do the adjustment for x86-64, as there
6244 the pcrel addressing is relative to the _next_
6245 insn, and that is taken care of in other code. */
6246 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6247 }
6248 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6249 i.op[n].disps, pcrel, reloc_type);
6250 }
6251 }
6252 }
6253 }
6254
6255 static void
6256 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6257 {
6258 char *p;
6259 unsigned int n;
6260
6261 for (n = 0; n < i.operands; n++)
6262 {
6263 if (operand_type_check (i.types[n], imm))
6264 {
6265 if (i.op[n].imms->X_op == O_constant)
6266 {
6267 int size = imm_size (n);
6268 offsetT val;
6269
6270 val = offset_in_range (i.op[n].imms->X_add_number,
6271 size);
6272 p = frag_more (size);
6273 md_number_to_chars (p, val, size);
6274 }
6275 else
6276 {
6277 /* Not absolute_section.
6278 Need a 32-bit fixup (don't support 8bit
6279 non-absolute imms). Try to support other
6280 sizes ... */
6281 enum bfd_reloc_code_real reloc_type;
6282 int size = imm_size (n);
6283 int sign;
6284
6285 if (i.types[n].bitfield.imm32s
6286 && (i.suffix == QWORD_MNEM_SUFFIX
6287 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6288 sign = 1;
6289 else
6290 sign = 0;
6291
6292 p = frag_more (size);
6293 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6294
6295 /* This is tough to explain. We end up with this one if we
6296 * have operands that look like
6297 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6298 * obtain the absolute address of the GOT, and it is strongly
6299 * preferable from a performance point of view to avoid using
6300 * a runtime relocation for this. The actual sequence of
6301 * instructions often look something like:
6302 *
6303 * call .L66
6304 * .L66:
6305 * popl %ebx
6306 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6307 *
6308 * The call and pop essentially return the absolute address
6309 * of the label .L66 and store it in %ebx. The linker itself
6310 * will ultimately change the first operand of the addl so
6311 * that %ebx points to the GOT, but to keep things simple, the
6312 * .o file must have this operand set so that it generates not
6313 * the absolute address of .L66, but the absolute address of
6314 * itself. This allows the linker itself simply treat a GOTPC
6315 * relocation as asking for a pcrel offset to the GOT to be
6316 * added in, and the addend of the relocation is stored in the
6317 * operand field for the instruction itself.
6318 *
6319 * Our job here is to fix the operand so that it would add
6320 * the correct offset so that %ebx would point to itself. The
6321 * thing that is tricky is that .-.L66 will point to the
6322 * beginning of the instruction, so we need to further modify
6323 * the operand so that it will point to itself. There are
6324 * other cases where you have something like:
6325 *
6326 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6327 *
6328 * and here no correction would be required. Internally in
6329 * the assembler we treat operands of this form as not being
6330 * pcrel since the '.' is explicitly mentioned, and I wonder
6331 * whether it would simplify matters to do it this way. Who
6332 * knows. In earlier versions of the PIC patches, the
6333 * pcrel_adjust field was used to store the correction, but
6334 * since the expression is not pcrel, I felt it would be
6335 * confusing to do it this way. */
6336
6337 if ((reloc_type == BFD_RELOC_32
6338 || reloc_type == BFD_RELOC_X86_64_32S
6339 || reloc_type == BFD_RELOC_64)
6340 && GOT_symbol
6341 && GOT_symbol == i.op[n].imms->X_add_symbol
6342 && (i.op[n].imms->X_op == O_symbol
6343 || (i.op[n].imms->X_op == O_add
6344 && ((symbol_get_value_expression
6345 (i.op[n].imms->X_op_symbol)->X_op)
6346 == O_subtract))))
6347 {
6348 offsetT add;
6349
6350 if (insn_start_frag == frag_now)
6351 add = (p - frag_now->fr_literal) - insn_start_off;
6352 else
6353 {
6354 fragS *fr;
6355
6356 add = insn_start_frag->fr_fix - insn_start_off;
6357 for (fr = insn_start_frag->fr_next;
6358 fr && fr != frag_now; fr = fr->fr_next)
6359 add += fr->fr_fix;
6360 add += p - frag_now->fr_literal;
6361 }
6362
6363 if (!object_64bit)
6364 reloc_type = BFD_RELOC_386_GOTPC;
6365 else if (size == 4)
6366 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6367 else if (size == 8)
6368 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6369 i.op[n].imms->X_add_number += add;
6370 }
6371 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6372 i.op[n].imms, 0, reloc_type);
6373 }
6374 }
6375 }
6376 }
6377 \f
6378 /* x86_cons_fix_new is called via the expression parsing code when a
6379 reloc is needed. We use this hook to get the correct .got reloc. */
6380 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6381 static int cons_sign = -1;
6382
6383 void
6384 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6385 expressionS *exp)
6386 {
6387 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6388
6389 got_reloc = NO_RELOC;
6390
6391 #ifdef TE_PE
6392 if (exp->X_op == O_secrel)
6393 {
6394 exp->X_op = O_symbol;
6395 r = BFD_RELOC_32_SECREL;
6396 }
6397 #endif
6398
6399 fix_new_exp (frag, off, len, exp, 0, r);
6400 }
6401
6402 #if (!defined (OBJ_ELF) && !defined (OBJ_MAYBE_ELF)) || defined (LEX_AT)
6403 # define lex_got(reloc, adjust, types) NULL
6404 #else
6405 /* Parse operands of the form
6406 <symbol>@GOTOFF+<nnn>
6407 and similar .plt or .got references.
6408
6409 If we find one, set up the correct relocation in RELOC and copy the
6410 input string, minus the `@GOTOFF' into a malloc'd buffer for
6411 parsing by the calling routine. Return this buffer, and if ADJUST
6412 is non-null set it to the length of the string we removed from the
6413 input line. Otherwise return NULL. */
6414 static char *
6415 lex_got (enum bfd_reloc_code_real *rel,
6416 int *adjust,
6417 i386_operand_type *types)
6418 {
6419 /* Some of the relocations depend on the size of what field is to
6420 be relocated. But in our callers i386_immediate and i386_displacement
6421 we don't yet know the operand size (this will be set by insn
6422 matching). Hence we record the word32 relocation here,
6423 and adjust the reloc according to the real size in reloc(). */
6424 static const struct {
6425 const char *str;
6426 int len;
6427 const enum bfd_reloc_code_real rel[2];
6428 const i386_operand_type types64;
6429 } gotrel[] = {
6430 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6431 BFD_RELOC_X86_64_PLTOFF64 },
6432 OPERAND_TYPE_IMM64 },
6433 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6434 BFD_RELOC_X86_64_PLT32 },
6435 OPERAND_TYPE_IMM32_32S_DISP32 },
6436 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6437 BFD_RELOC_X86_64_GOTPLT64 },
6438 OPERAND_TYPE_IMM64_DISP64 },
6439 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6440 BFD_RELOC_X86_64_GOTOFF64 },
6441 OPERAND_TYPE_IMM64_DISP64 },
6442 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6443 BFD_RELOC_X86_64_GOTPCREL },
6444 OPERAND_TYPE_IMM32_32S_DISP32 },
6445 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6446 BFD_RELOC_X86_64_TLSGD },
6447 OPERAND_TYPE_IMM32_32S_DISP32 },
6448 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6449 _dummy_first_bfd_reloc_code_real },
6450 OPERAND_TYPE_NONE },
6451 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6452 BFD_RELOC_X86_64_TLSLD },
6453 OPERAND_TYPE_IMM32_32S_DISP32 },
6454 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6455 BFD_RELOC_X86_64_GOTTPOFF },
6456 OPERAND_TYPE_IMM32_32S_DISP32 },
6457 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6458 BFD_RELOC_X86_64_TPOFF32 },
6459 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6460 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6461 _dummy_first_bfd_reloc_code_real },
6462 OPERAND_TYPE_NONE },
6463 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6464 BFD_RELOC_X86_64_DTPOFF32 },
6465 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6466 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6467 _dummy_first_bfd_reloc_code_real },
6468 OPERAND_TYPE_NONE },
6469 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6470 _dummy_first_bfd_reloc_code_real },
6471 OPERAND_TYPE_NONE },
6472 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6473 BFD_RELOC_X86_64_GOT32 },
6474 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6475 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6476 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6477 OPERAND_TYPE_IMM32_32S_DISP32 },
6478 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6479 BFD_RELOC_X86_64_TLSDESC_CALL },
6480 OPERAND_TYPE_IMM32_32S_DISP32 },
6481 };
6482 char *cp;
6483 unsigned int j;
6484
6485 if (!IS_ELF)
6486 return NULL;
6487
6488 for (cp = input_line_pointer; *cp != '@'; cp++)
6489 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6490 return NULL;
6491
6492 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6493 {
6494 int len = gotrel[j].len;
6495 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6496 {
6497 if (gotrel[j].rel[object_64bit] != 0)
6498 {
6499 int first, second;
6500 char *tmpbuf, *past_reloc;
6501
6502 *rel = gotrel[j].rel[object_64bit];
6503 if (adjust)
6504 *adjust = len;
6505
6506 if (types)
6507 {
6508 if (flag_code != CODE_64BIT)
6509 {
6510 types->bitfield.imm32 = 1;
6511 types->bitfield.disp32 = 1;
6512 }
6513 else
6514 *types = gotrel[j].types64;
6515 }
6516
6517 if (GOT_symbol == NULL)
6518 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6519
6520 /* The length of the first part of our input line. */
6521 first = cp - input_line_pointer;
6522
6523 /* The second part goes from after the reloc token until
6524 (and including) an end_of_line char or comma. */
6525 past_reloc = cp + 1 + len;
6526 cp = past_reloc;
6527 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6528 ++cp;
6529 second = cp + 1 - past_reloc;
6530
6531 /* Allocate and copy string. The trailing NUL shouldn't
6532 be necessary, but be safe. */
6533 tmpbuf = (char *) xmalloc (first + second + 2);
6534 memcpy (tmpbuf, input_line_pointer, first);
6535 if (second != 0 && *past_reloc != ' ')
6536 /* Replace the relocation token with ' ', so that
6537 errors like foo@GOTOFF1 will be detected. */
6538 tmpbuf[first++] = ' ';
6539 memcpy (tmpbuf + first, past_reloc, second);
6540 tmpbuf[first + second] = '\0';
6541 return tmpbuf;
6542 }
6543
6544 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6545 gotrel[j].str, 1 << (5 + object_64bit));
6546 return NULL;
6547 }
6548 }
6549
6550 /* Might be a symbol version string. Don't as_bad here. */
6551 return NULL;
6552 }
6553
6554 void
6555 x86_cons (expressionS *exp, int size)
6556 {
6557 intel_syntax = -intel_syntax;
6558
6559 exp->X_md = 0;
6560 if (size == 4 || (object_64bit && size == 8))
6561 {
6562 /* Handle @GOTOFF and the like in an expression. */
6563 char *save;
6564 char *gotfree_input_line;
6565 int adjust;
6566
6567 save = input_line_pointer;
6568 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6569 if (gotfree_input_line)
6570 input_line_pointer = gotfree_input_line;
6571
6572 expression (exp);
6573
6574 if (gotfree_input_line)
6575 {
6576 /* expression () has merrily parsed up to the end of line,
6577 or a comma - in the wrong buffer. Transfer how far
6578 input_line_pointer has moved to the right buffer. */
6579 input_line_pointer = (save
6580 + (input_line_pointer - gotfree_input_line)
6581 + adjust);
6582 free (gotfree_input_line);
6583 if (exp->X_op == O_constant
6584 || exp->X_op == O_absent
6585 || exp->X_op == O_illegal
6586 || exp->X_op == O_register
6587 || exp->X_op == O_big)
6588 {
6589 char c = *input_line_pointer;
6590 *input_line_pointer = 0;
6591 as_bad (_("missing or invalid expression `%s'"), save);
6592 *input_line_pointer = c;
6593 }
6594 }
6595 }
6596 else
6597 expression (exp);
6598
6599 intel_syntax = -intel_syntax;
6600
6601 if (intel_syntax)
6602 i386_intel_simplify (exp);
6603 }
6604 #endif
6605
6606 static void
6607 signed_cons (int size)
6608 {
6609 if (flag_code == CODE_64BIT)
6610 cons_sign = 1;
6611 cons (size);
6612 cons_sign = -1;
6613 }
6614
6615 #ifdef TE_PE
6616 static void
6617 pe_directive_secrel (dummy)
6618 int dummy ATTRIBUTE_UNUSED;
6619 {
6620 expressionS exp;
6621
6622 do
6623 {
6624 expression (&exp);
6625 if (exp.X_op == O_symbol)
6626 exp.X_op = O_secrel;
6627
6628 emit_expr (&exp, 4);
6629 }
6630 while (*input_line_pointer++ == ',');
6631
6632 input_line_pointer--;
6633 demand_empty_rest_of_line ();
6634 }
6635 #endif
6636
6637 static int
6638 i386_immediate (char *imm_start)
6639 {
6640 char *save_input_line_pointer;
6641 char *gotfree_input_line;
6642 segT exp_seg = 0;
6643 expressionS *exp;
6644 i386_operand_type types;
6645
6646 operand_type_set (&types, ~0);
6647
6648 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6649 {
6650 as_bad (_("at most %d immediate operands are allowed"),
6651 MAX_IMMEDIATE_OPERANDS);
6652 return 0;
6653 }
6654
6655 exp = &im_expressions[i.imm_operands++];
6656 i.op[this_operand].imms = exp;
6657
6658 if (is_space_char (*imm_start))
6659 ++imm_start;
6660
6661 save_input_line_pointer = input_line_pointer;
6662 input_line_pointer = imm_start;
6663
6664 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6665 if (gotfree_input_line)
6666 input_line_pointer = gotfree_input_line;
6667
6668 exp_seg = expression (exp);
6669
6670 SKIP_WHITESPACE ();
6671 if (*input_line_pointer)
6672 as_bad (_("junk `%s' after expression"), input_line_pointer);
6673
6674 input_line_pointer = save_input_line_pointer;
6675 if (gotfree_input_line)
6676 {
6677 free (gotfree_input_line);
6678
6679 if (exp->X_op == O_constant || exp->X_op == O_register)
6680 exp->X_op = O_illegal;
6681 }
6682
6683 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6684 }
6685
6686 static int
6687 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6688 i386_operand_type types, const char *imm_start)
6689 {
6690 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6691 {
6692 if (imm_start)
6693 as_bad (_("missing or invalid immediate expression `%s'"),
6694 imm_start);
6695 return 0;
6696 }
6697 else if (exp->X_op == O_constant)
6698 {
6699 /* Size it properly later. */
6700 i.types[this_operand].bitfield.imm64 = 1;
6701 /* If not 64bit, sign extend val. */
6702 if (flag_code != CODE_64BIT
6703 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6704 exp->X_add_number
6705 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6706 }
6707 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6708 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6709 && exp_seg != absolute_section
6710 && exp_seg != text_section
6711 && exp_seg != data_section
6712 && exp_seg != bss_section
6713 && exp_seg != undefined_section
6714 && !bfd_is_com_section (exp_seg))
6715 {
6716 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6717 return 0;
6718 }
6719 #endif
6720 else if (!intel_syntax && exp->X_op == O_register)
6721 {
6722 if (imm_start)
6723 as_bad (_("illegal immediate register operand %s"), imm_start);
6724 return 0;
6725 }
6726 else
6727 {
6728 /* This is an address. The size of the address will be
6729 determined later, depending on destination register,
6730 suffix, or the default for the section. */
6731 i.types[this_operand].bitfield.imm8 = 1;
6732 i.types[this_operand].bitfield.imm16 = 1;
6733 i.types[this_operand].bitfield.imm32 = 1;
6734 i.types[this_operand].bitfield.imm32s = 1;
6735 i.types[this_operand].bitfield.imm64 = 1;
6736 i.types[this_operand] = operand_type_and (i.types[this_operand],
6737 types);
6738 }
6739
6740 return 1;
6741 }
6742
6743 static char *
6744 i386_scale (char *scale)
6745 {
6746 offsetT val;
6747 char *save = input_line_pointer;
6748
6749 input_line_pointer = scale;
6750 val = get_absolute_expression ();
6751
6752 switch (val)
6753 {
6754 case 1:
6755 i.log2_scale_factor = 0;
6756 break;
6757 case 2:
6758 i.log2_scale_factor = 1;
6759 break;
6760 case 4:
6761 i.log2_scale_factor = 2;
6762 break;
6763 case 8:
6764 i.log2_scale_factor = 3;
6765 break;
6766 default:
6767 {
6768 char sep = *input_line_pointer;
6769
6770 *input_line_pointer = '\0';
6771 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6772 scale);
6773 *input_line_pointer = sep;
6774 input_line_pointer = save;
6775 return NULL;
6776 }
6777 }
6778 if (i.log2_scale_factor != 0 && i.index_reg == 0)
6779 {
6780 as_warn (_("scale factor of %d without an index register"),
6781 1 << i.log2_scale_factor);
6782 i.log2_scale_factor = 0;
6783 }
6784 scale = input_line_pointer;
6785 input_line_pointer = save;
6786 return scale;
6787 }
6788
6789 static int
6790 i386_displacement (char *disp_start, char *disp_end)
6791 {
6792 expressionS *exp;
6793 segT exp_seg = 0;
6794 char *save_input_line_pointer;
6795 char *gotfree_input_line;
6796 int override;
6797 i386_operand_type bigdisp, types = anydisp;
6798 int ret;
6799
6800 if (i.disp_operands == MAX_MEMORY_OPERANDS)
6801 {
6802 as_bad (_("at most %d displacement operands are allowed"),
6803 MAX_MEMORY_OPERANDS);
6804 return 0;
6805 }
6806
6807 operand_type_set (&bigdisp, 0);
6808 if ((i.types[this_operand].bitfield.jumpabsolute)
6809 || (!current_templates->start->opcode_modifier.jump
6810 && !current_templates->start->opcode_modifier.jumpdword))
6811 {
6812 bigdisp.bitfield.disp32 = 1;
6813 override = (i.prefix[ADDR_PREFIX] != 0);
6814 if (flag_code == CODE_64BIT)
6815 {
6816 if (!override)
6817 {
6818 bigdisp.bitfield.disp32s = 1;
6819 bigdisp.bitfield.disp64 = 1;
6820 }
6821 }
6822 else if ((flag_code == CODE_16BIT) ^ override)
6823 {
6824 bigdisp.bitfield.disp32 = 0;
6825 bigdisp.bitfield.disp16 = 1;
6826 }
6827 }
6828 else
6829 {
6830 /* For PC-relative branches, the width of the displacement
6831 is dependent upon data size, not address size. */
6832 override = (i.prefix[DATA_PREFIX] != 0);
6833 if (flag_code == CODE_64BIT)
6834 {
6835 if (override || i.suffix == WORD_MNEM_SUFFIX)
6836 bigdisp.bitfield.disp16 = 1;
6837 else
6838 {
6839 bigdisp.bitfield.disp32 = 1;
6840 bigdisp.bitfield.disp32s = 1;
6841 }
6842 }
6843 else
6844 {
6845 if (!override)
6846 override = (i.suffix == (flag_code != CODE_16BIT
6847 ? WORD_MNEM_SUFFIX
6848 : LONG_MNEM_SUFFIX));
6849 bigdisp.bitfield.disp32 = 1;
6850 if ((flag_code == CODE_16BIT) ^ override)
6851 {
6852 bigdisp.bitfield.disp32 = 0;
6853 bigdisp.bitfield.disp16 = 1;
6854 }
6855 }
6856 }
6857 i.types[this_operand] = operand_type_or (i.types[this_operand],
6858 bigdisp);
6859
6860 exp = &disp_expressions[i.disp_operands];
6861 i.op[this_operand].disps = exp;
6862 i.disp_operands++;
6863 save_input_line_pointer = input_line_pointer;
6864 input_line_pointer = disp_start;
6865 END_STRING_AND_SAVE (disp_end);
6866
6867 #ifndef GCC_ASM_O_HACK
6868 #define GCC_ASM_O_HACK 0
6869 #endif
6870 #if GCC_ASM_O_HACK
6871 END_STRING_AND_SAVE (disp_end + 1);
6872 if (i.types[this_operand].bitfield.baseIndex
6873 && displacement_string_end[-1] == '+')
6874 {
6875 /* This hack is to avoid a warning when using the "o"
6876 constraint within gcc asm statements.
6877 For instance:
6878
6879 #define _set_tssldt_desc(n,addr,limit,type) \
6880 __asm__ __volatile__ ( \
6881 "movw %w2,%0\n\t" \
6882 "movw %w1,2+%0\n\t" \
6883 "rorl $16,%1\n\t" \
6884 "movb %b1,4+%0\n\t" \
6885 "movb %4,5+%0\n\t" \
6886 "movb $0,6+%0\n\t" \
6887 "movb %h1,7+%0\n\t" \
6888 "rorl $16,%1" \
6889 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
6890
6891 This works great except that the output assembler ends
6892 up looking a bit weird if it turns out that there is
6893 no offset. You end up producing code that looks like:
6894
6895 #APP
6896 movw $235,(%eax)
6897 movw %dx,2+(%eax)
6898 rorl $16,%edx
6899 movb %dl,4+(%eax)
6900 movb $137,5+(%eax)
6901 movb $0,6+(%eax)
6902 movb %dh,7+(%eax)
6903 rorl $16,%edx
6904 #NO_APP
6905
6906 So here we provide the missing zero. */
6907
6908 *displacement_string_end = '0';
6909 }
6910 #endif
6911 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6912 if (gotfree_input_line)
6913 input_line_pointer = gotfree_input_line;
6914
6915 exp_seg = expression (exp);
6916
6917 SKIP_WHITESPACE ();
6918 if (*input_line_pointer)
6919 as_bad (_("junk `%s' after expression"), input_line_pointer);
6920 #if GCC_ASM_O_HACK
6921 RESTORE_END_STRING (disp_end + 1);
6922 #endif
6923 input_line_pointer = save_input_line_pointer;
6924 if (gotfree_input_line)
6925 {
6926 free (gotfree_input_line);
6927
6928 if (exp->X_op == O_constant || exp->X_op == O_register)
6929 exp->X_op = O_illegal;
6930 }
6931
6932 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
6933
6934 RESTORE_END_STRING (disp_end);
6935
6936 return ret;
6937 }
6938
6939 static int
6940 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6941 i386_operand_type types, const char *disp_start)
6942 {
6943 i386_operand_type bigdisp;
6944 int ret = 1;
6945
6946 /* We do this to make sure that the section symbol is in
6947 the symbol table. We will ultimately change the relocation
6948 to be relative to the beginning of the section. */
6949 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
6950 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
6951 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6952 {
6953 if (exp->X_op != O_symbol)
6954 goto inv_disp;
6955
6956 if (S_IS_LOCAL (exp->X_add_symbol)
6957 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
6958 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
6959 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
6960 exp->X_op = O_subtract;
6961 exp->X_op_symbol = GOT_symbol;
6962 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
6963 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
6964 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
6965 i.reloc[this_operand] = BFD_RELOC_64;
6966 else
6967 i.reloc[this_operand] = BFD_RELOC_32;
6968 }
6969
6970 else if (exp->X_op == O_absent
6971 || exp->X_op == O_illegal
6972 || exp->X_op == O_big)
6973 {
6974 inv_disp:
6975 as_bad (_("missing or invalid displacement expression `%s'"),
6976 disp_start);
6977 ret = 0;
6978 }
6979
6980 else if (flag_code == CODE_64BIT
6981 && !i.prefix[ADDR_PREFIX]
6982 && exp->X_op == O_constant)
6983 {
6984 /* Since displacement is signed extended to 64bit, don't allow
6985 disp32 and turn off disp32s if they are out of range. */
6986 i.types[this_operand].bitfield.disp32 = 0;
6987 if (!fits_in_signed_long (exp->X_add_number))
6988 {
6989 i.types[this_operand].bitfield.disp32s = 0;
6990 if (i.types[this_operand].bitfield.baseindex)
6991 {
6992 as_bad (_("0x%lx out range of signed 32bit displacement"),
6993 (long) exp->X_add_number);
6994 ret = 0;
6995 }
6996 }
6997 }
6998
6999 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7000 else if (exp->X_op != O_constant
7001 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7002 && exp_seg != absolute_section
7003 && exp_seg != text_section
7004 && exp_seg != data_section
7005 && exp_seg != bss_section
7006 && exp_seg != undefined_section
7007 && !bfd_is_com_section (exp_seg))
7008 {
7009 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7010 ret = 0;
7011 }
7012 #endif
7013
7014 /* Check if this is a displacement only operand. */
7015 bigdisp = i.types[this_operand];
7016 bigdisp.bitfield.disp8 = 0;
7017 bigdisp.bitfield.disp16 = 0;
7018 bigdisp.bitfield.disp32 = 0;
7019 bigdisp.bitfield.disp32s = 0;
7020 bigdisp.bitfield.disp64 = 0;
7021 if (operand_type_all_zero (&bigdisp))
7022 i.types[this_operand] = operand_type_and (i.types[this_operand],
7023 types);
7024
7025 return ret;
7026 }
7027
7028 /* Make sure the memory operand we've been dealt is valid.
7029 Return 1 on success, 0 on a failure. */
7030
7031 static int
7032 i386_index_check (const char *operand_string)
7033 {
7034 int ok;
7035 const char *kind = "base/index";
7036 #if INFER_ADDR_PREFIX
7037 int fudged = 0;
7038
7039 tryprefix:
7040 #endif
7041 ok = 1;
7042 if (current_templates->start->opcode_modifier.isstring
7043 && !current_templates->start->opcode_modifier.immext
7044 && (current_templates->end[-1].opcode_modifier.isstring
7045 || i.mem_operands))
7046 {
7047 /* Memory operands of string insns are special in that they only allow
7048 a single register (rDI, rSI, or rBX) as their memory address. */
7049 unsigned int expected;
7050
7051 kind = "string address";
7052
7053 if (current_templates->start->opcode_modifier.w)
7054 {
7055 i386_operand_type type = current_templates->end[-1].operand_types[0];
7056
7057 if (!type.bitfield.baseindex
7058 || ((!i.mem_operands != !intel_syntax)
7059 && current_templates->end[-1].operand_types[1]
7060 .bitfield.baseindex))
7061 type = current_templates->end[-1].operand_types[1];
7062 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7063 }
7064 else
7065 expected = 3 /* rBX */;
7066
7067 if (!i.base_reg || i.index_reg
7068 || operand_type_check (i.types[this_operand], disp))
7069 ok = -1;
7070 else if (!(flag_code == CODE_64BIT
7071 ? i.prefix[ADDR_PREFIX]
7072 ? i.base_reg->reg_type.bitfield.reg32
7073 : i.base_reg->reg_type.bitfield.reg64
7074 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7075 ? i.base_reg->reg_type.bitfield.reg32
7076 : i.base_reg->reg_type.bitfield.reg16))
7077 ok = 0;
7078 else if (i.base_reg->reg_num != expected)
7079 ok = -1;
7080
7081 if (ok < 0)
7082 {
7083 unsigned int j;
7084
7085 for (j = 0; j < i386_regtab_size; ++j)
7086 if ((flag_code == CODE_64BIT
7087 ? i.prefix[ADDR_PREFIX]
7088 ? i386_regtab[j].reg_type.bitfield.reg32
7089 : i386_regtab[j].reg_type.bitfield.reg64
7090 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7091 ? i386_regtab[j].reg_type.bitfield.reg32
7092 : i386_regtab[j].reg_type.bitfield.reg16)
7093 && i386_regtab[j].reg_num == expected)
7094 break;
7095 gas_assert (j < i386_regtab_size);
7096 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7097 operand_string,
7098 intel_syntax ? '[' : '(',
7099 register_prefix,
7100 i386_regtab[j].reg_name,
7101 intel_syntax ? ']' : ')');
7102 ok = 1;
7103 }
7104 }
7105 else if (flag_code == CODE_64BIT)
7106 {
7107 if ((i.base_reg
7108 && ((i.prefix[ADDR_PREFIX] == 0
7109 && !i.base_reg->reg_type.bitfield.reg64)
7110 || (i.prefix[ADDR_PREFIX]
7111 && !i.base_reg->reg_type.bitfield.reg32))
7112 && (i.index_reg
7113 || i.base_reg->reg_num !=
7114 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7115 || (i.index_reg
7116 && (!i.index_reg->reg_type.bitfield.baseindex
7117 || (i.prefix[ADDR_PREFIX] == 0
7118 && i.index_reg->reg_num != RegRiz
7119 && !i.index_reg->reg_type.bitfield.reg64
7120 )
7121 || (i.prefix[ADDR_PREFIX]
7122 && i.index_reg->reg_num != RegEiz
7123 && !i.index_reg->reg_type.bitfield.reg32))))
7124 ok = 0;
7125 }
7126 else
7127 {
7128 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7129 {
7130 /* 16bit checks. */
7131 if ((i.base_reg
7132 && (!i.base_reg->reg_type.bitfield.reg16
7133 || !i.base_reg->reg_type.bitfield.baseindex))
7134 || (i.index_reg
7135 && (!i.index_reg->reg_type.bitfield.reg16
7136 || !i.index_reg->reg_type.bitfield.baseindex
7137 || !(i.base_reg
7138 && i.base_reg->reg_num < 6
7139 && i.index_reg->reg_num >= 6
7140 && i.log2_scale_factor == 0))))
7141 ok = 0;
7142 }
7143 else
7144 {
7145 /* 32bit checks. */
7146 if ((i.base_reg
7147 && !i.base_reg->reg_type.bitfield.reg32)
7148 || (i.index_reg
7149 && ((!i.index_reg->reg_type.bitfield.reg32
7150 && i.index_reg->reg_num != RegEiz)
7151 || !i.index_reg->reg_type.bitfield.baseindex)))
7152 ok = 0;
7153 }
7154 }
7155 if (!ok)
7156 {
7157 #if INFER_ADDR_PREFIX
7158 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7159 {
7160 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7161 i.prefixes += 1;
7162 /* Change the size of any displacement too. At most one of
7163 Disp16 or Disp32 is set.
7164 FIXME. There doesn't seem to be any real need for separate
7165 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7166 Removing them would probably clean up the code quite a lot. */
7167 if (flag_code != CODE_64BIT
7168 && (i.types[this_operand].bitfield.disp16
7169 || i.types[this_operand].bitfield.disp32))
7170 i.types[this_operand]
7171 = operand_type_xor (i.types[this_operand], disp16_32);
7172 fudged = 1;
7173 goto tryprefix;
7174 }
7175 if (fudged)
7176 as_bad (_("`%s' is not a valid %s expression"),
7177 operand_string,
7178 kind);
7179 else
7180 #endif
7181 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7182 operand_string,
7183 flag_code_names[i.prefix[ADDR_PREFIX]
7184 ? flag_code == CODE_32BIT
7185 ? CODE_16BIT
7186 : CODE_32BIT
7187 : flag_code],
7188 kind);
7189 }
7190 return ok;
7191 }
7192
7193 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7194 on error. */
7195
7196 static int
7197 i386_att_operand (char *operand_string)
7198 {
7199 const reg_entry *r;
7200 char *end_op;
7201 char *op_string = operand_string;
7202
7203 if (is_space_char (*op_string))
7204 ++op_string;
7205
7206 /* We check for an absolute prefix (differentiating,
7207 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7208 if (*op_string == ABSOLUTE_PREFIX)
7209 {
7210 ++op_string;
7211 if (is_space_char (*op_string))
7212 ++op_string;
7213 i.types[this_operand].bitfield.jumpabsolute = 1;
7214 }
7215
7216 /* Check if operand is a register. */
7217 if ((r = parse_register (op_string, &end_op)) != NULL)
7218 {
7219 i386_operand_type temp;
7220
7221 /* Check for a segment override by searching for ':' after a
7222 segment register. */
7223 op_string = end_op;
7224 if (is_space_char (*op_string))
7225 ++op_string;
7226 if (*op_string == ':'
7227 && (r->reg_type.bitfield.sreg2
7228 || r->reg_type.bitfield.sreg3))
7229 {
7230 switch (r->reg_num)
7231 {
7232 case 0:
7233 i.seg[i.mem_operands] = &es;
7234 break;
7235 case 1:
7236 i.seg[i.mem_operands] = &cs;
7237 break;
7238 case 2:
7239 i.seg[i.mem_operands] = &ss;
7240 break;
7241 case 3:
7242 i.seg[i.mem_operands] = &ds;
7243 break;
7244 case 4:
7245 i.seg[i.mem_operands] = &fs;
7246 break;
7247 case 5:
7248 i.seg[i.mem_operands] = &gs;
7249 break;
7250 }
7251
7252 /* Skip the ':' and whitespace. */
7253 ++op_string;
7254 if (is_space_char (*op_string))
7255 ++op_string;
7256
7257 if (!is_digit_char (*op_string)
7258 && !is_identifier_char (*op_string)
7259 && *op_string != '('
7260 && *op_string != ABSOLUTE_PREFIX)
7261 {
7262 as_bad (_("bad memory operand `%s'"), op_string);
7263 return 0;
7264 }
7265 /* Handle case of %es:*foo. */
7266 if (*op_string == ABSOLUTE_PREFIX)
7267 {
7268 ++op_string;
7269 if (is_space_char (*op_string))
7270 ++op_string;
7271 i.types[this_operand].bitfield.jumpabsolute = 1;
7272 }
7273 goto do_memory_reference;
7274 }
7275 if (*op_string)
7276 {
7277 as_bad (_("junk `%s' after register"), op_string);
7278 return 0;
7279 }
7280 temp = r->reg_type;
7281 temp.bitfield.baseindex = 0;
7282 i.types[this_operand] = operand_type_or (i.types[this_operand],
7283 temp);
7284 i.types[this_operand].bitfield.unspecified = 0;
7285 i.op[this_operand].regs = r;
7286 i.reg_operands++;
7287 }
7288 else if (*op_string == REGISTER_PREFIX)
7289 {
7290 as_bad (_("bad register name `%s'"), op_string);
7291 return 0;
7292 }
7293 else if (*op_string == IMMEDIATE_PREFIX)
7294 {
7295 ++op_string;
7296 if (i.types[this_operand].bitfield.jumpabsolute)
7297 {
7298 as_bad (_("immediate operand illegal with absolute jump"));
7299 return 0;
7300 }
7301 if (!i386_immediate (op_string))
7302 return 0;
7303 }
7304 else if (is_digit_char (*op_string)
7305 || is_identifier_char (*op_string)
7306 || *op_string == '(')
7307 {
7308 /* This is a memory reference of some sort. */
7309 char *base_string;
7310
7311 /* Start and end of displacement string expression (if found). */
7312 char *displacement_string_start;
7313 char *displacement_string_end;
7314
7315 do_memory_reference:
7316 if ((i.mem_operands == 1
7317 && !current_templates->start->opcode_modifier.isstring)
7318 || i.mem_operands == 2)
7319 {
7320 as_bad (_("too many memory references for `%s'"),
7321 current_templates->start->name);
7322 return 0;
7323 }
7324
7325 /* Check for base index form. We detect the base index form by
7326 looking for an ')' at the end of the operand, searching
7327 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7328 after the '('. */
7329 base_string = op_string + strlen (op_string);
7330
7331 --base_string;
7332 if (is_space_char (*base_string))
7333 --base_string;
7334
7335 /* If we only have a displacement, set-up for it to be parsed later. */
7336 displacement_string_start = op_string;
7337 displacement_string_end = base_string + 1;
7338
7339 if (*base_string == ')')
7340 {
7341 char *temp_string;
7342 unsigned int parens_balanced = 1;
7343 /* We've already checked that the number of left & right ()'s are
7344 equal, so this loop will not be infinite. */
7345 do
7346 {
7347 base_string--;
7348 if (*base_string == ')')
7349 parens_balanced++;
7350 if (*base_string == '(')
7351 parens_balanced--;
7352 }
7353 while (parens_balanced);
7354
7355 temp_string = base_string;
7356
7357 /* Skip past '(' and whitespace. */
7358 ++base_string;
7359 if (is_space_char (*base_string))
7360 ++base_string;
7361
7362 if (*base_string == ','
7363 || ((i.base_reg = parse_register (base_string, &end_op))
7364 != NULL))
7365 {
7366 displacement_string_end = temp_string;
7367
7368 i.types[this_operand].bitfield.baseindex = 1;
7369
7370 if (i.base_reg)
7371 {
7372 base_string = end_op;
7373 if (is_space_char (*base_string))
7374 ++base_string;
7375 }
7376
7377 /* There may be an index reg or scale factor here. */
7378 if (*base_string == ',')
7379 {
7380 ++base_string;
7381 if (is_space_char (*base_string))
7382 ++base_string;
7383
7384 if ((i.index_reg = parse_register (base_string, &end_op))
7385 != NULL)
7386 {
7387 base_string = end_op;
7388 if (is_space_char (*base_string))
7389 ++base_string;
7390 if (*base_string == ',')
7391 {
7392 ++base_string;
7393 if (is_space_char (*base_string))
7394 ++base_string;
7395 }
7396 else if (*base_string != ')')
7397 {
7398 as_bad (_("expecting `,' or `)' "
7399 "after index register in `%s'"),
7400 operand_string);
7401 return 0;
7402 }
7403 }
7404 else if (*base_string == REGISTER_PREFIX)
7405 {
7406 as_bad (_("bad register name `%s'"), base_string);
7407 return 0;
7408 }
7409
7410 /* Check for scale factor. */
7411 if (*base_string != ')')
7412 {
7413 char *end_scale = i386_scale (base_string);
7414
7415 if (!end_scale)
7416 return 0;
7417
7418 base_string = end_scale;
7419 if (is_space_char (*base_string))
7420 ++base_string;
7421 if (*base_string != ')')
7422 {
7423 as_bad (_("expecting `)' "
7424 "after scale factor in `%s'"),
7425 operand_string);
7426 return 0;
7427 }
7428 }
7429 else if (!i.index_reg)
7430 {
7431 as_bad (_("expecting index register or scale factor "
7432 "after `,'; got '%c'"),
7433 *base_string);
7434 return 0;
7435 }
7436 }
7437 else if (*base_string != ')')
7438 {
7439 as_bad (_("expecting `,' or `)' "
7440 "after base register in `%s'"),
7441 operand_string);
7442 return 0;
7443 }
7444 }
7445 else if (*base_string == REGISTER_PREFIX)
7446 {
7447 as_bad (_("bad register name `%s'"), base_string);
7448 return 0;
7449 }
7450 }
7451
7452 /* If there's an expression beginning the operand, parse it,
7453 assuming displacement_string_start and
7454 displacement_string_end are meaningful. */
7455 if (displacement_string_start != displacement_string_end)
7456 {
7457 if (!i386_displacement (displacement_string_start,
7458 displacement_string_end))
7459 return 0;
7460 }
7461
7462 /* Special case for (%dx) while doing input/output op. */
7463 if (i.base_reg
7464 && operand_type_equal (&i.base_reg->reg_type,
7465 &reg16_inoutportreg)
7466 && i.index_reg == 0
7467 && i.log2_scale_factor == 0
7468 && i.seg[i.mem_operands] == 0
7469 && !operand_type_check (i.types[this_operand], disp))
7470 {
7471 i.types[this_operand] = inoutportreg;
7472 return 1;
7473 }
7474
7475 if (i386_index_check (operand_string) == 0)
7476 return 0;
7477 i.types[this_operand].bitfield.mem = 1;
7478 i.mem_operands++;
7479 }
7480 else
7481 {
7482 /* It's not a memory operand; argh! */
7483 as_bad (_("invalid char %s beginning operand %d `%s'"),
7484 output_invalid (*op_string),
7485 this_operand + 1,
7486 op_string);
7487 return 0;
7488 }
7489 return 1; /* Normal return. */
7490 }
7491 \f
7492 /* md_estimate_size_before_relax()
7493
7494 Called just before relax() for rs_machine_dependent frags. The x86
7495 assembler uses these frags to handle variable size jump
7496 instructions.
7497
7498 Any symbol that is now undefined will not become defined.
7499 Return the correct fr_subtype in the frag.
7500 Return the initial "guess for variable size of frag" to caller.
7501 The guess is actually the growth beyond the fixed part. Whatever
7502 we do to grow the fixed or variable part contributes to our
7503 returned value. */
7504
7505 int
7506 md_estimate_size_before_relax (fragP, segment)
7507 fragS *fragP;
7508 segT segment;
7509 {
7510 /* We've already got fragP->fr_subtype right; all we have to do is
7511 check for un-relaxable symbols. On an ELF system, we can't relax
7512 an externally visible symbol, because it may be overridden by a
7513 shared library. */
7514 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7515 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7516 || (IS_ELF
7517 && (S_IS_EXTERNAL (fragP->fr_symbol)
7518 || S_IS_WEAK (fragP->fr_symbol)
7519 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7520 & BSF_GNU_INDIRECT_FUNCTION))))
7521 #endif
7522 #if defined (OBJ_COFF) && defined (TE_PE)
7523 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7524 && S_IS_WEAK (fragP->fr_symbol))
7525 #endif
7526 )
7527 {
7528 /* Symbol is undefined in this segment, or we need to keep a
7529 reloc so that weak symbols can be overridden. */
7530 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7531 enum bfd_reloc_code_real reloc_type;
7532 unsigned char *opcode;
7533 int old_fr_fix;
7534
7535 if (fragP->fr_var != NO_RELOC)
7536 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7537 else if (size == 2)
7538 reloc_type = BFD_RELOC_16_PCREL;
7539 else
7540 reloc_type = BFD_RELOC_32_PCREL;
7541
7542 old_fr_fix = fragP->fr_fix;
7543 opcode = (unsigned char *) fragP->fr_opcode;
7544
7545 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7546 {
7547 case UNCOND_JUMP:
7548 /* Make jmp (0xeb) a (d)word displacement jump. */
7549 opcode[0] = 0xe9;
7550 fragP->fr_fix += size;
7551 fix_new (fragP, old_fr_fix, size,
7552 fragP->fr_symbol,
7553 fragP->fr_offset, 1,
7554 reloc_type);
7555 break;
7556
7557 case COND_JUMP86:
7558 if (size == 2
7559 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7560 {
7561 /* Negate the condition, and branch past an
7562 unconditional jump. */
7563 opcode[0] ^= 1;
7564 opcode[1] = 3;
7565 /* Insert an unconditional jump. */
7566 opcode[2] = 0xe9;
7567 /* We added two extra opcode bytes, and have a two byte
7568 offset. */
7569 fragP->fr_fix += 2 + 2;
7570 fix_new (fragP, old_fr_fix + 2, 2,
7571 fragP->fr_symbol,
7572 fragP->fr_offset, 1,
7573 reloc_type);
7574 break;
7575 }
7576 /* Fall through. */
7577
7578 case COND_JUMP:
7579 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7580 {
7581 fixS *fixP;
7582
7583 fragP->fr_fix += 1;
7584 fixP = fix_new (fragP, old_fr_fix, 1,
7585 fragP->fr_symbol,
7586 fragP->fr_offset, 1,
7587 BFD_RELOC_8_PCREL);
7588 fixP->fx_signed = 1;
7589 break;
7590 }
7591
7592 /* This changes the byte-displacement jump 0x7N
7593 to the (d)word-displacement jump 0x0f,0x8N. */
7594 opcode[1] = opcode[0] + 0x10;
7595 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7596 /* We've added an opcode byte. */
7597 fragP->fr_fix += 1 + size;
7598 fix_new (fragP, old_fr_fix + 1, size,
7599 fragP->fr_symbol,
7600 fragP->fr_offset, 1,
7601 reloc_type);
7602 break;
7603
7604 default:
7605 BAD_CASE (fragP->fr_subtype);
7606 break;
7607 }
7608 frag_wane (fragP);
7609 return fragP->fr_fix - old_fr_fix;
7610 }
7611
7612 /* Guess size depending on current relax state. Initially the relax
7613 state will correspond to a short jump and we return 1, because
7614 the variable part of the frag (the branch offset) is one byte
7615 long. However, we can relax a section more than once and in that
7616 case we must either set fr_subtype back to the unrelaxed state,
7617 or return the value for the appropriate branch. */
7618 return md_relax_table[fragP->fr_subtype].rlx_length;
7619 }
7620
7621 /* Called after relax() is finished.
7622
7623 In: Address of frag.
7624 fr_type == rs_machine_dependent.
7625 fr_subtype is what the address relaxed to.
7626
7627 Out: Any fixSs and constants are set up.
7628 Caller will turn frag into a ".space 0". */
7629
7630 void
7631 md_convert_frag (abfd, sec, fragP)
7632 bfd *abfd ATTRIBUTE_UNUSED;
7633 segT sec ATTRIBUTE_UNUSED;
7634 fragS *fragP;
7635 {
7636 unsigned char *opcode;
7637 unsigned char *where_to_put_displacement = NULL;
7638 offsetT target_address;
7639 offsetT opcode_address;
7640 unsigned int extension = 0;
7641 offsetT displacement_from_opcode_start;
7642
7643 opcode = (unsigned char *) fragP->fr_opcode;
7644
7645 /* Address we want to reach in file space. */
7646 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7647
7648 /* Address opcode resides at in file space. */
7649 opcode_address = fragP->fr_address + fragP->fr_fix;
7650
7651 /* Displacement from opcode start to fill into instruction. */
7652 displacement_from_opcode_start = target_address - opcode_address;
7653
7654 if ((fragP->fr_subtype & BIG) == 0)
7655 {
7656 /* Don't have to change opcode. */
7657 extension = 1; /* 1 opcode + 1 displacement */
7658 where_to_put_displacement = &opcode[1];
7659 }
7660 else
7661 {
7662 if (no_cond_jump_promotion
7663 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7664 as_warn_where (fragP->fr_file, fragP->fr_line,
7665 _("long jump required"));
7666
7667 switch (fragP->fr_subtype)
7668 {
7669 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7670 extension = 4; /* 1 opcode + 4 displacement */
7671 opcode[0] = 0xe9;
7672 where_to_put_displacement = &opcode[1];
7673 break;
7674
7675 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7676 extension = 2; /* 1 opcode + 2 displacement */
7677 opcode[0] = 0xe9;
7678 where_to_put_displacement = &opcode[1];
7679 break;
7680
7681 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7682 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7683 extension = 5; /* 2 opcode + 4 displacement */
7684 opcode[1] = opcode[0] + 0x10;
7685 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7686 where_to_put_displacement = &opcode[2];
7687 break;
7688
7689 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7690 extension = 3; /* 2 opcode + 2 displacement */
7691 opcode[1] = opcode[0] + 0x10;
7692 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7693 where_to_put_displacement = &opcode[2];
7694 break;
7695
7696 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7697 extension = 4;
7698 opcode[0] ^= 1;
7699 opcode[1] = 3;
7700 opcode[2] = 0xe9;
7701 where_to_put_displacement = &opcode[3];
7702 break;
7703
7704 default:
7705 BAD_CASE (fragP->fr_subtype);
7706 break;
7707 }
7708 }
7709
7710 /* If size if less then four we are sure that the operand fits,
7711 but if it's 4, then it could be that the displacement is larger
7712 then -/+ 2GB. */
7713 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7714 && object_64bit
7715 && ((addressT) (displacement_from_opcode_start - extension
7716 + ((addressT) 1 << 31))
7717 > (((addressT) 2 << 31) - 1)))
7718 {
7719 as_bad_where (fragP->fr_file, fragP->fr_line,
7720 _("jump target out of range"));
7721 /* Make us emit 0. */
7722 displacement_from_opcode_start = extension;
7723 }
7724 /* Now put displacement after opcode. */
7725 md_number_to_chars ((char *) where_to_put_displacement,
7726 (valueT) (displacement_from_opcode_start - extension),
7727 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7728 fragP->fr_fix += extension;
7729 }
7730 \f
7731 /* Apply a fixup (fixS) to segment data, once it has been determined
7732 by our caller that we have all the info we need to fix it up.
7733
7734 On the 386, immediates, displacements, and data pointers are all in
7735 the same (little-endian) format, so we don't need to care about which
7736 we are handling. */
7737
7738 void
7739 md_apply_fix (fixP, valP, seg)
7740 /* The fix we're to put in. */
7741 fixS *fixP;
7742 /* Pointer to the value of the bits. */
7743 valueT *valP;
7744 /* Segment fix is from. */
7745 segT seg ATTRIBUTE_UNUSED;
7746 {
7747 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7748 valueT value = *valP;
7749
7750 #if !defined (TE_Mach)
7751 if (fixP->fx_pcrel)
7752 {
7753 switch (fixP->fx_r_type)
7754 {
7755 default:
7756 break;
7757
7758 case BFD_RELOC_64:
7759 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7760 break;
7761 case BFD_RELOC_32:
7762 case BFD_RELOC_X86_64_32S:
7763 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7764 break;
7765 case BFD_RELOC_16:
7766 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7767 break;
7768 case BFD_RELOC_8:
7769 fixP->fx_r_type = BFD_RELOC_8_PCREL;
7770 break;
7771 }
7772 }
7773
7774 if (fixP->fx_addsy != NULL
7775 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
7776 || fixP->fx_r_type == BFD_RELOC_64_PCREL
7777 || fixP->fx_r_type == BFD_RELOC_16_PCREL
7778 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
7779 && !use_rela_relocations)
7780 {
7781 /* This is a hack. There should be a better way to handle this.
7782 This covers for the fact that bfd_install_relocation will
7783 subtract the current location (for partial_inplace, PC relative
7784 relocations); see more below. */
7785 #ifndef OBJ_AOUT
7786 if (IS_ELF
7787 #ifdef TE_PE
7788 || OUTPUT_FLAVOR == bfd_target_coff_flavour
7789 #endif
7790 )
7791 value += fixP->fx_where + fixP->fx_frag->fr_address;
7792 #endif
7793 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7794 if (IS_ELF)
7795 {
7796 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
7797
7798 if ((sym_seg == seg
7799 || (symbol_section_p (fixP->fx_addsy)
7800 && sym_seg != absolute_section))
7801 && !generic_force_reloc (fixP))
7802 {
7803 /* Yes, we add the values in twice. This is because
7804 bfd_install_relocation subtracts them out again. I think
7805 bfd_install_relocation is broken, but I don't dare change
7806 it. FIXME. */
7807 value += fixP->fx_where + fixP->fx_frag->fr_address;
7808 }
7809 }
7810 #endif
7811 #if defined (OBJ_COFF) && defined (TE_PE)
7812 /* For some reason, the PE format does not store a
7813 section address offset for a PC relative symbol. */
7814 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
7815 || S_IS_WEAK (fixP->fx_addsy))
7816 value += md_pcrel_from (fixP);
7817 #endif
7818 }
7819 #if defined (OBJ_COFF) && defined (TE_PE)
7820 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7821 {
7822 value -= S_GET_VALUE (fixP->fx_addsy);
7823 }
7824 #endif
7825
7826 /* Fix a few things - the dynamic linker expects certain values here,
7827 and we must not disappoint it. */
7828 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7829 if (IS_ELF && fixP->fx_addsy)
7830 switch (fixP->fx_r_type)
7831 {
7832 case BFD_RELOC_386_PLT32:
7833 case BFD_RELOC_X86_64_PLT32:
7834 /* Make the jump instruction point to the address of the operand. At
7835 runtime we merely add the offset to the actual PLT entry. */
7836 value = -4;
7837 break;
7838
7839 case BFD_RELOC_386_TLS_GD:
7840 case BFD_RELOC_386_TLS_LDM:
7841 case BFD_RELOC_386_TLS_IE_32:
7842 case BFD_RELOC_386_TLS_IE:
7843 case BFD_RELOC_386_TLS_GOTIE:
7844 case BFD_RELOC_386_TLS_GOTDESC:
7845 case BFD_RELOC_X86_64_TLSGD:
7846 case BFD_RELOC_X86_64_TLSLD:
7847 case BFD_RELOC_X86_64_GOTTPOFF:
7848 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
7849 value = 0; /* Fully resolved at runtime. No addend. */
7850 /* Fallthrough */
7851 case BFD_RELOC_386_TLS_LE:
7852 case BFD_RELOC_386_TLS_LDO_32:
7853 case BFD_RELOC_386_TLS_LE_32:
7854 case BFD_RELOC_X86_64_DTPOFF32:
7855 case BFD_RELOC_X86_64_DTPOFF64:
7856 case BFD_RELOC_X86_64_TPOFF32:
7857 case BFD_RELOC_X86_64_TPOFF64:
7858 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7859 break;
7860
7861 case BFD_RELOC_386_TLS_DESC_CALL:
7862 case BFD_RELOC_X86_64_TLSDESC_CALL:
7863 value = 0; /* Fully resolved at runtime. No addend. */
7864 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7865 fixP->fx_done = 0;
7866 return;
7867
7868 case BFD_RELOC_386_GOT32:
7869 case BFD_RELOC_X86_64_GOT32:
7870 value = 0; /* Fully resolved at runtime. No addend. */
7871 break;
7872
7873 case BFD_RELOC_VTABLE_INHERIT:
7874 case BFD_RELOC_VTABLE_ENTRY:
7875 fixP->fx_done = 0;
7876 return;
7877
7878 default:
7879 break;
7880 }
7881 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
7882 *valP = value;
7883 #endif /* !defined (TE_Mach) */
7884
7885 /* Are we finished with this relocation now? */
7886 if (fixP->fx_addsy == NULL)
7887 fixP->fx_done = 1;
7888 #if defined (OBJ_COFF) && defined (TE_PE)
7889 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
7890 {
7891 fixP->fx_done = 0;
7892 /* Remember value for tc_gen_reloc. */
7893 fixP->fx_addnumber = value;
7894 /* Clear out the frag for now. */
7895 value = 0;
7896 }
7897 #endif
7898 else if (use_rela_relocations)
7899 {
7900 fixP->fx_no_overflow = 1;
7901 /* Remember value for tc_gen_reloc. */
7902 fixP->fx_addnumber = value;
7903 value = 0;
7904 }
7905
7906 md_number_to_chars (p, value, fixP->fx_size);
7907 }
7908 \f
7909 char *
7910 md_atof (int type, char *litP, int *sizeP)
7911 {
7912 /* This outputs the LITTLENUMs in REVERSE order;
7913 in accord with the bigendian 386. */
7914 return ieee_md_atof (type, litP, sizeP, FALSE);
7915 }
7916 \f
7917 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
7918
7919 static char *
7920 output_invalid (int c)
7921 {
7922 if (ISPRINT (c))
7923 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7924 "'%c'", c);
7925 else
7926 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
7927 "(0x%x)", (unsigned char) c);
7928 return output_invalid_buf;
7929 }
7930
7931 /* REG_STRING starts *before* REGISTER_PREFIX. */
7932
7933 static const reg_entry *
7934 parse_real_register (char *reg_string, char **end_op)
7935 {
7936 char *s = reg_string;
7937 char *p;
7938 char reg_name_given[MAX_REG_NAME_SIZE + 1];
7939 const reg_entry *r;
7940
7941 /* Skip possible REGISTER_PREFIX and possible whitespace. */
7942 if (*s == REGISTER_PREFIX)
7943 ++s;
7944
7945 if (is_space_char (*s))
7946 ++s;
7947
7948 p = reg_name_given;
7949 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
7950 {
7951 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
7952 return (const reg_entry *) NULL;
7953 s++;
7954 }
7955
7956 /* For naked regs, make sure that we are not dealing with an identifier.
7957 This prevents confusing an identifier like `eax_var' with register
7958 `eax'. */
7959 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
7960 return (const reg_entry *) NULL;
7961
7962 *end_op = s;
7963
7964 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
7965
7966 /* Handle floating point regs, allowing spaces in the (i) part. */
7967 if (r == i386_regtab /* %st is first entry of table */)
7968 {
7969 if (is_space_char (*s))
7970 ++s;
7971 if (*s == '(')
7972 {
7973 ++s;
7974 if (is_space_char (*s))
7975 ++s;
7976 if (*s >= '0' && *s <= '7')
7977 {
7978 int fpr = *s - '0';
7979 ++s;
7980 if (is_space_char (*s))
7981 ++s;
7982 if (*s == ')')
7983 {
7984 *end_op = s + 1;
7985 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
7986 know (r);
7987 return r + fpr;
7988 }
7989 }
7990 /* We have "%st(" then garbage. */
7991 return (const reg_entry *) NULL;
7992 }
7993 }
7994
7995 if (r == NULL || allow_pseudo_reg)
7996 return r;
7997
7998 if (operand_type_all_zero (&r->reg_type))
7999 return (const reg_entry *) NULL;
8000
8001 if ((r->reg_type.bitfield.reg32
8002 || r->reg_type.bitfield.sreg3
8003 || r->reg_type.bitfield.control
8004 || r->reg_type.bitfield.debug
8005 || r->reg_type.bitfield.test)
8006 && !cpu_arch_flags.bitfield.cpui386)
8007 return (const reg_entry *) NULL;
8008
8009 if (r->reg_type.bitfield.floatreg
8010 && !cpu_arch_flags.bitfield.cpu8087
8011 && !cpu_arch_flags.bitfield.cpu287
8012 && !cpu_arch_flags.bitfield.cpu387)
8013 return (const reg_entry *) NULL;
8014
8015 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8016 return (const reg_entry *) NULL;
8017
8018 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8019 return (const reg_entry *) NULL;
8020
8021 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8022 return (const reg_entry *) NULL;
8023
8024 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8025 if (!allow_index_reg
8026 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8027 return (const reg_entry *) NULL;
8028
8029 if (((r->reg_flags & (RegRex64 | RegRex))
8030 || r->reg_type.bitfield.reg64)
8031 && (!cpu_arch_flags.bitfield.cpulm
8032 || !operand_type_equal (&r->reg_type, &control))
8033 && flag_code != CODE_64BIT)
8034 return (const reg_entry *) NULL;
8035
8036 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8037 return (const reg_entry *) NULL;
8038
8039 return r;
8040 }
8041
8042 /* REG_STRING starts *before* REGISTER_PREFIX. */
8043
8044 static const reg_entry *
8045 parse_register (char *reg_string, char **end_op)
8046 {
8047 const reg_entry *r;
8048
8049 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8050 r = parse_real_register (reg_string, end_op);
8051 else
8052 r = NULL;
8053 if (!r)
8054 {
8055 char *save = input_line_pointer;
8056 char c;
8057 symbolS *symbolP;
8058
8059 input_line_pointer = reg_string;
8060 c = get_symbol_end ();
8061 symbolP = symbol_find (reg_string);
8062 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8063 {
8064 const expressionS *e = symbol_get_value_expression (symbolP);
8065
8066 know (e->X_op == O_register);
8067 know (e->X_add_number >= 0
8068 && (valueT) e->X_add_number < i386_regtab_size);
8069 r = i386_regtab + e->X_add_number;
8070 *end_op = input_line_pointer;
8071 }
8072 *input_line_pointer = c;
8073 input_line_pointer = save;
8074 }
8075 return r;
8076 }
8077
8078 int
8079 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8080 {
8081 const reg_entry *r;
8082 char *end = input_line_pointer;
8083
8084 *end = *nextcharP;
8085 r = parse_register (name, &input_line_pointer);
8086 if (r && end <= input_line_pointer)
8087 {
8088 *nextcharP = *input_line_pointer;
8089 *input_line_pointer = 0;
8090 e->X_op = O_register;
8091 e->X_add_number = r - i386_regtab;
8092 return 1;
8093 }
8094 input_line_pointer = end;
8095 *end = 0;
8096 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8097 }
8098
8099 void
8100 md_operand (expressionS *e)
8101 {
8102 char *end;
8103 const reg_entry *r;
8104
8105 switch (*input_line_pointer)
8106 {
8107 case REGISTER_PREFIX:
8108 r = parse_real_register (input_line_pointer, &end);
8109 if (r)
8110 {
8111 e->X_op = O_register;
8112 e->X_add_number = r - i386_regtab;
8113 input_line_pointer = end;
8114 }
8115 break;
8116
8117 case '[':
8118 gas_assert (intel_syntax);
8119 end = input_line_pointer++;
8120 expression (e);
8121 if (*input_line_pointer == ']')
8122 {
8123 ++input_line_pointer;
8124 e->X_op_symbol = make_expr_symbol (e);
8125 e->X_add_symbol = NULL;
8126 e->X_add_number = 0;
8127 e->X_op = O_index;
8128 }
8129 else
8130 {
8131 e->X_op = O_absent;
8132 input_line_pointer = end;
8133 }
8134 break;
8135 }
8136 }
8137
8138 \f
8139 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8140 const char *md_shortopts = "kVQ:sqn";
8141 #else
8142 const char *md_shortopts = "qn";
8143 #endif
8144
8145 #define OPTION_32 (OPTION_MD_BASE + 0)
8146 #define OPTION_64 (OPTION_MD_BASE + 1)
8147 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8148 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8149 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8150 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8151 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8152 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8153 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8154 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8155 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8156 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8157 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8158 #define OPTION_X32 (OPTION_MD_BASE + 13)
8159
8160 struct option md_longopts[] =
8161 {
8162 {"32", no_argument, NULL, OPTION_32},
8163 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8164 || defined (TE_PE) || defined (TE_PEP))
8165 {"64", no_argument, NULL, OPTION_64},
8166 #endif
8167 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8168 {"x32", no_argument, NULL, OPTION_X32},
8169 #endif
8170 {"divide", no_argument, NULL, OPTION_DIVIDE},
8171 {"march", required_argument, NULL, OPTION_MARCH},
8172 {"mtune", required_argument, NULL, OPTION_MTUNE},
8173 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8174 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8175 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8176 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8177 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8178 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8179 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8180 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8181 {NULL, no_argument, NULL, 0}
8182 };
8183 size_t md_longopts_size = sizeof (md_longopts);
8184
8185 int
8186 md_parse_option (int c, char *arg)
8187 {
8188 unsigned int j;
8189 char *arch, *next;
8190
8191 switch (c)
8192 {
8193 case 'n':
8194 optimize_align_code = 0;
8195 break;
8196
8197 case 'q':
8198 quiet_warnings = 1;
8199 break;
8200
8201 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8202 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8203 should be emitted or not. FIXME: Not implemented. */
8204 case 'Q':
8205 break;
8206
8207 /* -V: SVR4 argument to print version ID. */
8208 case 'V':
8209 print_version_id ();
8210 break;
8211
8212 /* -k: Ignore for FreeBSD compatibility. */
8213 case 'k':
8214 break;
8215
8216 case 's':
8217 /* -s: On i386 Solaris, this tells the native assembler to use
8218 .stab instead of .stab.excl. We always use .stab anyhow. */
8219 break;
8220 #endif
8221 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8222 || defined (TE_PE) || defined (TE_PEP))
8223 case OPTION_64:
8224 {
8225 const char **list, **l;
8226
8227 list = bfd_target_list ();
8228 for (l = list; *l != NULL; l++)
8229 if (CONST_STRNEQ (*l, "elf64-x86-64")
8230 || strcmp (*l, "coff-x86-64") == 0
8231 || strcmp (*l, "pe-x86-64") == 0
8232 || strcmp (*l, "pei-x86-64") == 0)
8233 {
8234 default_arch = "x86_64";
8235 break;
8236 }
8237 if (*l == NULL)
8238 as_fatal (_("No compiled in support for x86_64"));
8239 free (list);
8240 }
8241 break;
8242 #endif
8243
8244 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8245 case OPTION_X32:
8246 if (IS_ELF)
8247 {
8248 const char **list, **l;
8249
8250 list = bfd_target_list ();
8251 for (l = list; *l != NULL; l++)
8252 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8253 {
8254 default_arch = "x86_64:32";
8255 break;
8256 }
8257 if (*l == NULL)
8258 as_fatal (_("No compiled in support for 32bit x86_64"));
8259 free (list);
8260 }
8261 else
8262 as_fatal (_("32bit x86_64 is only supported for ELF"));
8263 break;
8264 #endif
8265
8266 case OPTION_32:
8267 default_arch = "i386";
8268 break;
8269
8270 case OPTION_DIVIDE:
8271 #ifdef SVR4_COMMENT_CHARS
8272 {
8273 char *n, *t;
8274 const char *s;
8275
8276 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8277 t = n;
8278 for (s = i386_comment_chars; *s != '\0'; s++)
8279 if (*s != '/')
8280 *t++ = *s;
8281 *t = '\0';
8282 i386_comment_chars = n;
8283 }
8284 #endif
8285 break;
8286
8287 case OPTION_MARCH:
8288 arch = xstrdup (arg);
8289 do
8290 {
8291 if (*arch == '.')
8292 as_fatal (_("Invalid -march= option: `%s'"), arg);
8293 next = strchr (arch, '+');
8294 if (next)
8295 *next++ = '\0';
8296 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8297 {
8298 if (strcmp (arch, cpu_arch [j].name) == 0)
8299 {
8300 /* Processor. */
8301 if (! cpu_arch[j].flags.bitfield.cpui386)
8302 continue;
8303
8304 cpu_arch_name = cpu_arch[j].name;
8305 cpu_sub_arch_name = NULL;
8306 cpu_arch_flags = cpu_arch[j].flags;
8307 cpu_arch_isa = cpu_arch[j].type;
8308 cpu_arch_isa_flags = cpu_arch[j].flags;
8309 if (!cpu_arch_tune_set)
8310 {
8311 cpu_arch_tune = cpu_arch_isa;
8312 cpu_arch_tune_flags = cpu_arch_isa_flags;
8313 }
8314 break;
8315 }
8316 else if (*cpu_arch [j].name == '.'
8317 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8318 {
8319 /* ISA entension. */
8320 i386_cpu_flags flags;
8321
8322 if (!cpu_arch[j].negated)
8323 flags = cpu_flags_or (cpu_arch_flags,
8324 cpu_arch[j].flags);
8325 else
8326 flags = cpu_flags_and_not (cpu_arch_flags,
8327 cpu_arch[j].flags);
8328 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8329 {
8330 if (cpu_sub_arch_name)
8331 {
8332 char *name = cpu_sub_arch_name;
8333 cpu_sub_arch_name = concat (name,
8334 cpu_arch[j].name,
8335 (const char *) NULL);
8336 free (name);
8337 }
8338 else
8339 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8340 cpu_arch_flags = flags;
8341 }
8342 break;
8343 }
8344 }
8345
8346 if (j >= ARRAY_SIZE (cpu_arch))
8347 as_fatal (_("Invalid -march= option: `%s'"), arg);
8348
8349 arch = next;
8350 }
8351 while (next != NULL );
8352 break;
8353
8354 case OPTION_MTUNE:
8355 if (*arg == '.')
8356 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8357 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8358 {
8359 if (strcmp (arg, cpu_arch [j].name) == 0)
8360 {
8361 cpu_arch_tune_set = 1;
8362 cpu_arch_tune = cpu_arch [j].type;
8363 cpu_arch_tune_flags = cpu_arch[j].flags;
8364 break;
8365 }
8366 }
8367 if (j >= ARRAY_SIZE (cpu_arch))
8368 as_fatal (_("Invalid -mtune= option: `%s'"), arg);
8369 break;
8370
8371 case OPTION_MMNEMONIC:
8372 if (strcasecmp (arg, "att") == 0)
8373 intel_mnemonic = 0;
8374 else if (strcasecmp (arg, "intel") == 0)
8375 intel_mnemonic = 1;
8376 else
8377 as_fatal (_("Invalid -mmnemonic= option: `%s'"), arg);
8378 break;
8379
8380 case OPTION_MSYNTAX:
8381 if (strcasecmp (arg, "att") == 0)
8382 intel_syntax = 0;
8383 else if (strcasecmp (arg, "intel") == 0)
8384 intel_syntax = 1;
8385 else
8386 as_fatal (_("Invalid -msyntax= option: `%s'"), arg);
8387 break;
8388
8389 case OPTION_MINDEX_REG:
8390 allow_index_reg = 1;
8391 break;
8392
8393 case OPTION_MNAKED_REG:
8394 allow_naked_reg = 1;
8395 break;
8396
8397 case OPTION_MOLD_GCC:
8398 old_gcc = 1;
8399 break;
8400
8401 case OPTION_MSSE2AVX:
8402 sse2avx = 1;
8403 break;
8404
8405 case OPTION_MSSE_CHECK:
8406 if (strcasecmp (arg, "error") == 0)
8407 sse_check = sse_check_error;
8408 else if (strcasecmp (arg, "warning") == 0)
8409 sse_check = sse_check_warning;
8410 else if (strcasecmp (arg, "none") == 0)
8411 sse_check = sse_check_none;
8412 else
8413 as_fatal (_("Invalid -msse-check= option: `%s'"), arg);
8414 break;
8415
8416 case OPTION_MAVXSCALAR:
8417 if (strcasecmp (arg, "128") == 0)
8418 avxscalar = vex128;
8419 else if (strcasecmp (arg, "256") == 0)
8420 avxscalar = vex256;
8421 else
8422 as_fatal (_("Invalid -mavxscalar= option: `%s'"), arg);
8423 break;
8424
8425 default:
8426 return 0;
8427 }
8428 return 1;
8429 }
8430
8431 #define MESSAGE_TEMPLATE \
8432 " "
8433
8434 static void
8435 show_arch (FILE *stream, int ext, int check)
8436 {
8437 static char message[] = MESSAGE_TEMPLATE;
8438 char *start = message + 27;
8439 char *p;
8440 int size = sizeof (MESSAGE_TEMPLATE);
8441 int left;
8442 const char *name;
8443 int len;
8444 unsigned int j;
8445
8446 p = start;
8447 left = size - (start - message);
8448 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8449 {
8450 /* Should it be skipped? */
8451 if (cpu_arch [j].skip)
8452 continue;
8453
8454 name = cpu_arch [j].name;
8455 len = cpu_arch [j].len;
8456 if (*name == '.')
8457 {
8458 /* It is an extension. Skip if we aren't asked to show it. */
8459 if (ext)
8460 {
8461 name++;
8462 len--;
8463 }
8464 else
8465 continue;
8466 }
8467 else if (ext)
8468 {
8469 /* It is an processor. Skip if we show only extension. */
8470 continue;
8471 }
8472 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8473 {
8474 /* It is an impossible processor - skip. */
8475 continue;
8476 }
8477
8478 /* Reserve 2 spaces for ", " or ",\0" */
8479 left -= len + 2;
8480
8481 /* Check if there is any room. */
8482 if (left >= 0)
8483 {
8484 if (p != start)
8485 {
8486 *p++ = ',';
8487 *p++ = ' ';
8488 }
8489 p = mempcpy (p, name, len);
8490 }
8491 else
8492 {
8493 /* Output the current message now and start a new one. */
8494 *p++ = ',';
8495 *p = '\0';
8496 fprintf (stream, "%s\n", message);
8497 p = start;
8498 left = size - (start - message) - len - 2;
8499
8500 gas_assert (left >= 0);
8501
8502 p = mempcpy (p, name, len);
8503 }
8504 }
8505
8506 *p = '\0';
8507 fprintf (stream, "%s\n", message);
8508 }
8509
8510 void
8511 md_show_usage (FILE *stream)
8512 {
8513 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8514 fprintf (stream, _("\
8515 -Q ignored\n\
8516 -V print assembler version number\n\
8517 -k ignored\n"));
8518 #endif
8519 fprintf (stream, _("\
8520 -n Do not optimize code alignment\n\
8521 -q quieten some warnings\n"));
8522 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8523 fprintf (stream, _("\
8524 -s ignored\n"));
8525 #endif
8526 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8527 || defined (TE_PE) || defined (TE_PEP))
8528 fprintf (stream, _("\
8529 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8530 #endif
8531 #ifdef SVR4_COMMENT_CHARS
8532 fprintf (stream, _("\
8533 --divide do not treat `/' as a comment character\n"));
8534 #else
8535 fprintf (stream, _("\
8536 --divide ignored\n"));
8537 #endif
8538 fprintf (stream, _("\
8539 -march=CPU[,+EXTENSION...]\n\
8540 generate code for CPU and EXTENSION, CPU is one of:\n"));
8541 show_arch (stream, 0, 1);
8542 fprintf (stream, _("\
8543 EXTENSION is combination of:\n"));
8544 show_arch (stream, 1, 0);
8545 fprintf (stream, _("\
8546 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8547 show_arch (stream, 0, 0);
8548 fprintf (stream, _("\
8549 -msse2avx encode SSE instructions with VEX prefix\n"));
8550 fprintf (stream, _("\
8551 -msse-check=[none|error|warning]\n\
8552 check SSE instructions\n"));
8553 fprintf (stream, _("\
8554 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8555 length\n"));
8556 fprintf (stream, _("\
8557 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8558 fprintf (stream, _("\
8559 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8560 fprintf (stream, _("\
8561 -mindex-reg support pseudo index registers\n"));
8562 fprintf (stream, _("\
8563 -mnaked-reg don't require `%%' prefix for registers\n"));
8564 fprintf (stream, _("\
8565 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8566 }
8567
8568 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8569 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8570 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8571
8572 /* Pick the target format to use. */
8573
8574 const char *
8575 i386_target_format (void)
8576 {
8577 if (!strncmp (default_arch, "x86_64", 6))
8578 {
8579 update_code_flag (CODE_64BIT, 1);
8580 if (default_arch[6] == '\0')
8581 x86_elf_abi = X86_64_ABI;
8582 else
8583 x86_elf_abi = X86_64_X32_ABI;
8584 }
8585 else if (!strcmp (default_arch, "i386"))
8586 update_code_flag (CODE_32BIT, 1);
8587 else
8588 as_fatal (_("Unknown architecture"));
8589
8590 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8591 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8592 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8593 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8594
8595 switch (OUTPUT_FLAVOR)
8596 {
8597 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8598 case bfd_target_aout_flavour:
8599 return AOUT_TARGET_FORMAT;
8600 #endif
8601 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8602 # if defined (TE_PE) || defined (TE_PEP)
8603 case bfd_target_coff_flavour:
8604 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8605 # elif defined (TE_GO32)
8606 case bfd_target_coff_flavour:
8607 return "coff-go32";
8608 # else
8609 case bfd_target_coff_flavour:
8610 return "coff-i386";
8611 # endif
8612 #endif
8613 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8614 case bfd_target_elf_flavour:
8615 {
8616 const char *format;
8617
8618 switch (x86_elf_abi)
8619 {
8620 default:
8621 format = ELF_TARGET_FORMAT;
8622 break;
8623 case X86_64_ABI:
8624 use_rela_relocations = 1;
8625 object_64bit = 1;
8626 format = ELF_TARGET_FORMAT64;
8627 break;
8628 case X86_64_X32_ABI:
8629 use_rela_relocations = 1;
8630 object_64bit = 1;
8631 disallow_64bit_reloc = 1;
8632 format = ELF_TARGET_FORMAT32;
8633 break;
8634 }
8635 if (cpu_arch_isa == PROCESSOR_L1OM)
8636 {
8637 if (x86_elf_abi != X86_64_ABI)
8638 as_fatal (_("Intel L1OM is 64bit only"));
8639 return ELF_TARGET_L1OM_FORMAT;
8640 }
8641 else
8642 return format;
8643 }
8644 #endif
8645 #if defined (OBJ_MACH_O)
8646 case bfd_target_mach_o_flavour:
8647 return flag_code == CODE_64BIT ? "mach-o-x86-64" : "mach-o-i386";
8648 #endif
8649 default:
8650 abort ();
8651 return NULL;
8652 }
8653 }
8654
8655 #endif /* OBJ_MAYBE_ more than one */
8656
8657 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8658 void
8659 i386_elf_emit_arch_note (void)
8660 {
8661 if (IS_ELF && cpu_arch_name != NULL)
8662 {
8663 char *p;
8664 asection *seg = now_seg;
8665 subsegT subseg = now_subseg;
8666 Elf_Internal_Note i_note;
8667 Elf_External_Note e_note;
8668 asection *note_secp;
8669 int len;
8670
8671 /* Create the .note section. */
8672 note_secp = subseg_new (".note", 0);
8673 bfd_set_section_flags (stdoutput,
8674 note_secp,
8675 SEC_HAS_CONTENTS | SEC_READONLY);
8676
8677 /* Process the arch string. */
8678 len = strlen (cpu_arch_name);
8679
8680 i_note.namesz = len + 1;
8681 i_note.descsz = 0;
8682 i_note.type = NT_ARCH;
8683 p = frag_more (sizeof (e_note.namesz));
8684 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8685 p = frag_more (sizeof (e_note.descsz));
8686 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8687 p = frag_more (sizeof (e_note.type));
8688 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8689 p = frag_more (len + 1);
8690 strcpy (p, cpu_arch_name);
8691
8692 frag_align (2, 0, 0);
8693
8694 subseg_set (seg, subseg);
8695 }
8696 }
8697 #endif
8698 \f
8699 symbolS *
8700 md_undefined_symbol (name)
8701 char *name;
8702 {
8703 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8704 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8705 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8706 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8707 {
8708 if (!GOT_symbol)
8709 {
8710 if (symbol_find (name))
8711 as_bad (_("GOT already in symbol table"));
8712 GOT_symbol = symbol_new (name, undefined_section,
8713 (valueT) 0, &zero_address_frag);
8714 };
8715 return GOT_symbol;
8716 }
8717 return 0;
8718 }
8719
8720 /* Round up a section size to the appropriate boundary. */
8721
8722 valueT
8723 md_section_align (segment, size)
8724 segT segment ATTRIBUTE_UNUSED;
8725 valueT size;
8726 {
8727 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8728 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8729 {
8730 /* For a.out, force the section size to be aligned. If we don't do
8731 this, BFD will align it for us, but it will not write out the
8732 final bytes of the section. This may be a bug in BFD, but it is
8733 easier to fix it here since that is how the other a.out targets
8734 work. */
8735 int align;
8736
8737 align = bfd_get_section_alignment (stdoutput, segment);
8738 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8739 }
8740 #endif
8741
8742 return size;
8743 }
8744
8745 /* On the i386, PC-relative offsets are relative to the start of the
8746 next instruction. That is, the address of the offset, plus its
8747 size, since the offset is always the last part of the insn. */
8748
8749 long
8750 md_pcrel_from (fixS *fixP)
8751 {
8752 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
8753 }
8754
8755 #ifndef I386COFF
8756
8757 static void
8758 s_bss (int ignore ATTRIBUTE_UNUSED)
8759 {
8760 int temp;
8761
8762 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8763 if (IS_ELF)
8764 obj_elf_section_change_hook ();
8765 #endif
8766 temp = get_absolute_expression ();
8767 subseg_set (bss_section, (subsegT) temp);
8768 demand_empty_rest_of_line ();
8769 }
8770
8771 #endif
8772
8773 void
8774 i386_validate_fix (fixS *fixp)
8775 {
8776 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
8777 {
8778 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
8779 {
8780 if (!object_64bit)
8781 abort ();
8782 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
8783 }
8784 else
8785 {
8786 if (!object_64bit)
8787 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
8788 else
8789 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
8790 }
8791 fixp->fx_subsy = 0;
8792 }
8793 }
8794
8795 arelent *
8796 tc_gen_reloc (section, fixp)
8797 asection *section ATTRIBUTE_UNUSED;
8798 fixS *fixp;
8799 {
8800 arelent *rel;
8801 bfd_reloc_code_real_type code;
8802
8803 switch (fixp->fx_r_type)
8804 {
8805 case BFD_RELOC_X86_64_PLT32:
8806 case BFD_RELOC_X86_64_GOT32:
8807 case BFD_RELOC_X86_64_GOTPCREL:
8808 case BFD_RELOC_386_PLT32:
8809 case BFD_RELOC_386_GOT32:
8810 case BFD_RELOC_386_GOTOFF:
8811 case BFD_RELOC_386_GOTPC:
8812 case BFD_RELOC_386_TLS_GD:
8813 case BFD_RELOC_386_TLS_LDM:
8814 case BFD_RELOC_386_TLS_LDO_32:
8815 case BFD_RELOC_386_TLS_IE_32:
8816 case BFD_RELOC_386_TLS_IE:
8817 case BFD_RELOC_386_TLS_GOTIE:
8818 case BFD_RELOC_386_TLS_LE_32:
8819 case BFD_RELOC_386_TLS_LE:
8820 case BFD_RELOC_386_TLS_GOTDESC:
8821 case BFD_RELOC_386_TLS_DESC_CALL:
8822 case BFD_RELOC_X86_64_TLSGD:
8823 case BFD_RELOC_X86_64_TLSLD:
8824 case BFD_RELOC_X86_64_DTPOFF32:
8825 case BFD_RELOC_X86_64_DTPOFF64:
8826 case BFD_RELOC_X86_64_GOTTPOFF:
8827 case BFD_RELOC_X86_64_TPOFF32:
8828 case BFD_RELOC_X86_64_TPOFF64:
8829 case BFD_RELOC_X86_64_GOTOFF64:
8830 case BFD_RELOC_X86_64_GOTPC32:
8831 case BFD_RELOC_X86_64_GOT64:
8832 case BFD_RELOC_X86_64_GOTPCREL64:
8833 case BFD_RELOC_X86_64_GOTPC64:
8834 case BFD_RELOC_X86_64_GOTPLT64:
8835 case BFD_RELOC_X86_64_PLTOFF64:
8836 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8837 case BFD_RELOC_X86_64_TLSDESC_CALL:
8838 case BFD_RELOC_RVA:
8839 case BFD_RELOC_VTABLE_ENTRY:
8840 case BFD_RELOC_VTABLE_INHERIT:
8841 #ifdef TE_PE
8842 case BFD_RELOC_32_SECREL:
8843 #endif
8844 code = fixp->fx_r_type;
8845 break;
8846 case BFD_RELOC_X86_64_32S:
8847 if (!fixp->fx_pcrel)
8848 {
8849 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
8850 code = fixp->fx_r_type;
8851 break;
8852 }
8853 default:
8854 if (fixp->fx_pcrel)
8855 {
8856 switch (fixp->fx_size)
8857 {
8858 default:
8859 as_bad_where (fixp->fx_file, fixp->fx_line,
8860 _("can not do %d byte pc-relative relocation"),
8861 fixp->fx_size);
8862 code = BFD_RELOC_32_PCREL;
8863 break;
8864 case 1: code = BFD_RELOC_8_PCREL; break;
8865 case 2: code = BFD_RELOC_16_PCREL; break;
8866 case 4: code = BFD_RELOC_32_PCREL; break;
8867 #ifdef BFD64
8868 case 8: code = BFD_RELOC_64_PCREL; break;
8869 #endif
8870 }
8871 }
8872 else
8873 {
8874 switch (fixp->fx_size)
8875 {
8876 default:
8877 as_bad_where (fixp->fx_file, fixp->fx_line,
8878 _("can not do %d byte relocation"),
8879 fixp->fx_size);
8880 code = BFD_RELOC_32;
8881 break;
8882 case 1: code = BFD_RELOC_8; break;
8883 case 2: code = BFD_RELOC_16; break;
8884 case 4: code = BFD_RELOC_32; break;
8885 #ifdef BFD64
8886 case 8: code = BFD_RELOC_64; break;
8887 #endif
8888 }
8889 }
8890 break;
8891 }
8892
8893 if ((code == BFD_RELOC_32
8894 || code == BFD_RELOC_32_PCREL
8895 || code == BFD_RELOC_X86_64_32S)
8896 && GOT_symbol
8897 && fixp->fx_addsy == GOT_symbol)
8898 {
8899 if (!object_64bit)
8900 code = BFD_RELOC_386_GOTPC;
8901 else
8902 code = BFD_RELOC_X86_64_GOTPC32;
8903 }
8904 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
8905 && GOT_symbol
8906 && fixp->fx_addsy == GOT_symbol)
8907 {
8908 code = BFD_RELOC_X86_64_GOTPC64;
8909 }
8910
8911 rel = (arelent *) xmalloc (sizeof (arelent));
8912 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
8913 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8914
8915 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
8916
8917 if (!use_rela_relocations)
8918 {
8919 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
8920 vtable entry to be used in the relocation's section offset. */
8921 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
8922 rel->address = fixp->fx_offset;
8923 #if defined (OBJ_COFF) && defined (TE_PE)
8924 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
8925 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
8926 else
8927 #endif
8928 rel->addend = 0;
8929 }
8930 /* Use the rela in 64bit mode. */
8931 else
8932 {
8933 if (disallow_64bit_reloc)
8934 switch (code)
8935 {
8936 case BFD_RELOC_64:
8937 case BFD_RELOC_X86_64_DTPOFF64:
8938 case BFD_RELOC_X86_64_TPOFF64:
8939 case BFD_RELOC_64_PCREL:
8940 case BFD_RELOC_X86_64_GOTOFF64:
8941 case BFD_RELOC_X86_64_GOT64:
8942 case BFD_RELOC_X86_64_GOTPCREL64:
8943 case BFD_RELOC_X86_64_GOTPC64:
8944 case BFD_RELOC_X86_64_GOTPLT64:
8945 case BFD_RELOC_X86_64_PLTOFF64:
8946 as_bad_where (fixp->fx_file, fixp->fx_line,
8947 _("cannot represent relocation type %s in x32 mode"),
8948 bfd_get_reloc_code_name (code));
8949 break;
8950 default:
8951 break;
8952 }
8953
8954 if (!fixp->fx_pcrel)
8955 rel->addend = fixp->fx_offset;
8956 else
8957 switch (code)
8958 {
8959 case BFD_RELOC_X86_64_PLT32:
8960 case BFD_RELOC_X86_64_GOT32:
8961 case BFD_RELOC_X86_64_GOTPCREL:
8962 case BFD_RELOC_X86_64_TLSGD:
8963 case BFD_RELOC_X86_64_TLSLD:
8964 case BFD_RELOC_X86_64_GOTTPOFF:
8965 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8966 case BFD_RELOC_X86_64_TLSDESC_CALL:
8967 rel->addend = fixp->fx_offset - fixp->fx_size;
8968 break;
8969 default:
8970 rel->addend = (section->vma
8971 - fixp->fx_size
8972 + fixp->fx_addnumber
8973 + md_pcrel_from (fixp));
8974 break;
8975 }
8976 }
8977
8978 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
8979 if (rel->howto == NULL)
8980 {
8981 as_bad_where (fixp->fx_file, fixp->fx_line,
8982 _("cannot represent relocation type %s"),
8983 bfd_get_reloc_code_name (code));
8984 /* Set howto to a garbage value so that we can keep going. */
8985 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
8986 gas_assert (rel->howto != NULL);
8987 }
8988
8989 return rel;
8990 }
8991
8992 #include "tc-i386-intel.c"
8993
8994 void
8995 tc_x86_parse_to_dw2regnum (expressionS *exp)
8996 {
8997 int saved_naked_reg;
8998 char saved_register_dot;
8999
9000 saved_naked_reg = allow_naked_reg;
9001 allow_naked_reg = 1;
9002 saved_register_dot = register_chars['.'];
9003 register_chars['.'] = '.';
9004 allow_pseudo_reg = 1;
9005 expression_and_evaluate (exp);
9006 allow_pseudo_reg = 0;
9007 register_chars['.'] = saved_register_dot;
9008 allow_naked_reg = saved_naked_reg;
9009
9010 if (exp->X_op == O_register && exp->X_add_number >= 0)
9011 {
9012 if ((addressT) exp->X_add_number < i386_regtab_size)
9013 {
9014 exp->X_op = O_constant;
9015 exp->X_add_number = i386_regtab[exp->X_add_number]
9016 .dw2_regnum[flag_code >> 1];
9017 }
9018 else
9019 exp->X_op = O_illegal;
9020 }
9021 }
9022
9023 void
9024 tc_x86_frame_initial_instructions (void)
9025 {
9026 static unsigned int sp_regno[2];
9027
9028 if (!sp_regno[flag_code >> 1])
9029 {
9030 char *saved_input = input_line_pointer;
9031 char sp[][4] = {"esp", "rsp"};
9032 expressionS exp;
9033
9034 input_line_pointer = sp[flag_code >> 1];
9035 tc_x86_parse_to_dw2regnum (&exp);
9036 gas_assert (exp.X_op == O_constant);
9037 sp_regno[flag_code >> 1] = exp.X_add_number;
9038 input_line_pointer = saved_input;
9039 }
9040
9041 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9042 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9043 }
9044
9045 int
9046 i386_elf_section_type (const char *str, size_t len)
9047 {
9048 if (flag_code == CODE_64BIT
9049 && len == sizeof ("unwind") - 1
9050 && strncmp (str, "unwind", 6) == 0)
9051 return SHT_X86_64_UNWIND;
9052
9053 return -1;
9054 }
9055
9056 #ifdef TE_SOLARIS
9057 void
9058 i386_solaris_fix_up_eh_frame (segT sec)
9059 {
9060 if (flag_code == CODE_64BIT)
9061 elf_section_type (sec) = SHT_X86_64_UNWIND;
9062 }
9063 #endif
9064
9065 #ifdef TE_PE
9066 void
9067 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9068 {
9069 expressionS exp;
9070
9071 exp.X_op = O_secrel;
9072 exp.X_add_symbol = symbol;
9073 exp.X_add_number = 0;
9074 emit_expr (&exp, size);
9075 }
9076 #endif
9077
9078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9079 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9080
9081 bfd_vma
9082 x86_64_section_letter (int letter, char **ptr_msg)
9083 {
9084 if (flag_code == CODE_64BIT)
9085 {
9086 if (letter == 'l')
9087 return SHF_X86_64_LARGE;
9088
9089 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9090 }
9091 else
9092 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9093 return -1;
9094 }
9095
9096 bfd_vma
9097 x86_64_section_word (char *str, size_t len)
9098 {
9099 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9100 return SHF_X86_64_LARGE;
9101
9102 return -1;
9103 }
9104
9105 static void
9106 handle_large_common (int small ATTRIBUTE_UNUSED)
9107 {
9108 if (flag_code != CODE_64BIT)
9109 {
9110 s_comm_internal (0, elf_common_parse);
9111 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9112 }
9113 else
9114 {
9115 static segT lbss_section;
9116 asection *saved_com_section_ptr = elf_com_section_ptr;
9117 asection *saved_bss_section = bss_section;
9118
9119 if (lbss_section == NULL)
9120 {
9121 flagword applicable;
9122 segT seg = now_seg;
9123 subsegT subseg = now_subseg;
9124
9125 /* The .lbss section is for local .largecomm symbols. */
9126 lbss_section = subseg_new (".lbss", 0);
9127 applicable = bfd_applicable_section_flags (stdoutput);
9128 bfd_set_section_flags (stdoutput, lbss_section,
9129 applicable & SEC_ALLOC);
9130 seg_info (lbss_section)->bss = 1;
9131
9132 subseg_set (seg, subseg);
9133 }
9134
9135 elf_com_section_ptr = &_bfd_elf_large_com_section;
9136 bss_section = lbss_section;
9137
9138 s_comm_internal (0, elf_common_parse);
9139
9140 elf_com_section_ptr = saved_com_section_ptr;
9141 bss_section = saved_bss_section;
9142 }
9143 }
9144 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
This page took 0.230061 seconds and 4 git commands to generate.