Locale changes from Bruno Haible <haible@clisp.cons.org>.
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .alias
28 .eb
29 .estate
30 .lb
31 .popsection
32 .previous
33 .psr
34 .pushsection
35 - labels are wrong if automatic alignment is introduced
36 (e.g., checkout the second real10 definition in test-data.s)
37 - DV-related stuff:
38 <reg>.safe_across_calls and any other DV-related directives I don't
39 have documentation for.
40 verify mod-sched-brs reads/writes are checked/marked (and other
41 notes)
42
43 */
44
45 #include "as.h"
46 #include "safe-ctype.h"
47 #include "dwarf2dbg.h"
48 #include "subsegs.h"
49
50 #include "opcode/ia64.h"
51
52 #include "elf/ia64.h"
53
54 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
55 #define MIN(a,b) ((a) < (b) ? (a) : (b))
56
57 #define NUM_SLOTS 4
58 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
59 #define CURR_SLOT md.slot[md.curr_slot]
60
61 #define O_pseudo_fixup (O_max + 1)
62
63 enum special_section
64 {
65 /* IA-64 ABI section pseudo-ops. */
66 SPECIAL_SECTION_BSS = 0,
67 SPECIAL_SECTION_SBSS,
68 SPECIAL_SECTION_SDATA,
69 SPECIAL_SECTION_RODATA,
70 SPECIAL_SECTION_COMMENT,
71 SPECIAL_SECTION_UNWIND,
72 SPECIAL_SECTION_UNWIND_INFO,
73 /* HPUX specific section pseudo-ops. */
74 SPECIAL_SECTION_INIT_ARRAY,
75 SPECIAL_SECTION_FINI_ARRAY,
76 };
77
78 enum reloc_func
79 {
80 FUNC_FPTR_RELATIVE,
81 FUNC_GP_RELATIVE,
82 FUNC_LT_RELATIVE,
83 FUNC_PC_RELATIVE,
84 FUNC_PLT_RELATIVE,
85 FUNC_SEC_RELATIVE,
86 FUNC_SEG_RELATIVE,
87 FUNC_LTV_RELATIVE,
88 FUNC_LT_FPTR_RELATIVE,
89 FUNC_IPLT_RELOC,
90 };
91
92 enum reg_symbol
93 {
94 REG_GR = 0,
95 REG_FR = (REG_GR + 128),
96 REG_AR = (REG_FR + 128),
97 REG_CR = (REG_AR + 128),
98 REG_P = (REG_CR + 128),
99 REG_BR = (REG_P + 64),
100 REG_IP = (REG_BR + 8),
101 REG_CFM,
102 REG_PR,
103 REG_PR_ROT,
104 REG_PSR,
105 REG_PSR_L,
106 REG_PSR_UM,
107 /* The following are pseudo-registers for use by gas only. */
108 IND_CPUID,
109 IND_DBR,
110 IND_DTR,
111 IND_ITR,
112 IND_IBR,
113 IND_MEM,
114 IND_MSR,
115 IND_PKR,
116 IND_PMC,
117 IND_PMD,
118 IND_RR,
119 /* The following pseudo-registers are used for unwind directives only: */
120 REG_PSP,
121 REG_PRIUNAT,
122 REG_NUM
123 };
124
125 enum dynreg_type
126 {
127 DYNREG_GR = 0, /* dynamic general purpose register */
128 DYNREG_FR, /* dynamic floating point register */
129 DYNREG_PR, /* dynamic predicate register */
130 DYNREG_NUM_TYPES
131 };
132
133 enum operand_match_result
134 {
135 OPERAND_MATCH,
136 OPERAND_OUT_OF_RANGE,
137 OPERAND_MISMATCH
138 };
139
140 /* On the ia64, we can't know the address of a text label until the
141 instructions are packed into a bundle. To handle this, we keep
142 track of the list of labels that appear in front of each
143 instruction. */
144 struct label_fix
145 {
146 struct label_fix *next;
147 struct symbol *sym;
148 };
149
150 extern int target_big_endian;
151
152 /* Characters which always start a comment. */
153 const char comment_chars[] = "";
154
155 /* Characters which start a comment at the beginning of a line. */
156 const char line_comment_chars[] = "#";
157
158 /* Characters which may be used to separate multiple commands on a
159 single line. */
160 const char line_separator_chars[] = ";";
161
162 /* Characters which are used to indicate an exponent in a floating
163 point number. */
164 const char EXP_CHARS[] = "eE";
165
166 /* Characters which mean that a number is a floating point constant,
167 as in 0d1.0. */
168 const char FLT_CHARS[] = "rRsSfFdDxXpP";
169
170 /* ia64-specific option processing: */
171
172 const char *md_shortopts = "m:N:x::";
173
174 struct option md_longopts[] =
175 {
176 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
177 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
178 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
179 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
180 };
181
182 size_t md_longopts_size = sizeof (md_longopts);
183
184 static struct
185 {
186 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
187 struct hash_control *reg_hash; /* register name hash table */
188 struct hash_control *dynreg_hash; /* dynamic register hash table */
189 struct hash_control *const_hash; /* constant hash table */
190 struct hash_control *entry_hash; /* code entry hint hash table */
191
192 symbolS *regsym[REG_NUM];
193
194 /* If X_op is != O_absent, the registername for the instruction's
195 qualifying predicate. If NULL, p0 is assumed for instructions
196 that are predicatable. */
197 expressionS qp;
198
199 unsigned int
200 manual_bundling : 1,
201 debug_dv: 1,
202 detect_dv: 1,
203 explicit_mode : 1, /* which mode we're in */
204 default_explicit_mode : 1, /* which mode is the default */
205 mode_explicitly_set : 1, /* was the current mode explicitly set? */
206 auto_align : 1,
207 keep_pending_output : 1;
208
209 /* Each bundle consists of up to three instructions. We keep
210 track of four most recent instructions so we can correctly set
211 the end_of_insn_group for the last instruction in a bundle. */
212 int curr_slot;
213 int num_slots_in_use;
214 struct slot
215 {
216 unsigned int
217 end_of_insn_group : 1,
218 manual_bundling_on : 1,
219 manual_bundling_off : 1;
220 signed char user_template; /* user-selected template, if any */
221 unsigned char qp_regno; /* qualifying predicate */
222 /* This duplicates a good fraction of "struct fix" but we
223 can't use a "struct fix" instead since we can't call
224 fix_new_exp() until we know the address of the instruction. */
225 int num_fixups;
226 struct insn_fix
227 {
228 bfd_reloc_code_real_type code;
229 enum ia64_opnd opnd; /* type of operand in need of fix */
230 unsigned int is_pcrel : 1; /* is operand pc-relative? */
231 expressionS expr; /* the value to be inserted */
232 }
233 fixup[2]; /* at most two fixups per insn */
234 struct ia64_opcode *idesc;
235 struct label_fix *label_fixups;
236 struct label_fix *tag_fixups;
237 struct unw_rec_list *unwind_record; /* Unwind directive. */
238 expressionS opnd[6];
239 char *src_file;
240 unsigned int src_line;
241 struct dwarf2_line_info debug_line;
242 }
243 slot[NUM_SLOTS];
244
245 segT last_text_seg;
246
247 struct dynreg
248 {
249 struct dynreg *next; /* next dynamic register */
250 const char *name;
251 unsigned short base; /* the base register number */
252 unsigned short num_regs; /* # of registers in this set */
253 }
254 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
255
256 flagword flags; /* ELF-header flags */
257
258 struct mem_offset {
259 unsigned hint:1; /* is this hint currently valid? */
260 bfd_vma offset; /* mem.offset offset */
261 bfd_vma base; /* mem.offset base */
262 } mem_offset;
263
264 int path; /* number of alt. entry points seen */
265 const char **entry_labels; /* labels of all alternate paths in
266 the current DV-checking block. */
267 int maxpaths; /* size currently allocated for
268 entry_labels */
269 /* Support for hardware errata workarounds. */
270
271 /* Record data about the last three insn groups. */
272 struct group
273 {
274 /* B-step workaround.
275 For each predicate register, this is set if the corresponding insn
276 group conditionally sets this register with one of the affected
277 instructions. */
278 int p_reg_set[64];
279 /* B-step workaround.
280 For each general register, this is set if the corresponding insn
281 a) is conditional one one of the predicate registers for which
282 P_REG_SET is 1 in the corresponding entry of the previous group,
283 b) sets this general register with one of the affected
284 instructions. */
285 int g_reg_set_conditionally[128];
286 } last_groups[3];
287 int group_idx;
288
289 int pointer_size; /* size in bytes of a pointer */
290 int pointer_size_shift; /* shift size of a pointer for alignment */
291 }
292 md;
293
294 /* application registers: */
295
296 #define AR_K0 0
297 #define AR_K7 7
298 #define AR_RSC 16
299 #define AR_BSP 17
300 #define AR_BSPSTORE 18
301 #define AR_RNAT 19
302 #define AR_UNAT 36
303 #define AR_FPSR 40
304 #define AR_ITC 44
305 #define AR_PFS 64
306 #define AR_LC 65
307
308 static const struct
309 {
310 const char *name;
311 int regnum;
312 }
313 ar[] =
314 {
315 {"ar.k0", 0}, {"ar.k1", 1}, {"ar.k2", 2}, {"ar.k3", 3},
316 {"ar.k4", 4}, {"ar.k5", 5}, {"ar.k6", 6}, {"ar.k7", 7},
317 {"ar.rsc", 16}, {"ar.bsp", 17},
318 {"ar.bspstore", 18}, {"ar.rnat", 19},
319 {"ar.fcr", 21}, {"ar.eflag", 24},
320 {"ar.csd", 25}, {"ar.ssd", 26},
321 {"ar.cflg", 27}, {"ar.fsr", 28},
322 {"ar.fir", 29}, {"ar.fdr", 30},
323 {"ar.ccv", 32}, {"ar.unat", 36},
324 {"ar.fpsr", 40}, {"ar.itc", 44},
325 {"ar.pfs", 64}, {"ar.lc", 65},
326 {"ar.ec", 66},
327 };
328
329 #define CR_IPSR 16
330 #define CR_ISR 17
331 #define CR_IIP 19
332 #define CR_IFA 20
333 #define CR_ITIR 21
334 #define CR_IIPA 22
335 #define CR_IFS 23
336 #define CR_IIM 24
337 #define CR_IHA 25
338 #define CR_IVR 65
339 #define CR_TPR 66
340 #define CR_EOI 67
341 #define CR_IRR0 68
342 #define CR_IRR3 71
343 #define CR_LRR0 80
344 #define CR_LRR1 81
345
346 /* control registers: */
347 static const struct
348 {
349 const char *name;
350 int regnum;
351 }
352 cr[] =
353 {
354 {"cr.dcr", 0},
355 {"cr.itm", 1},
356 {"cr.iva", 2},
357 {"cr.pta", 8},
358 {"cr.gpta", 9},
359 {"cr.ipsr", 16},
360 {"cr.isr", 17},
361 {"cr.iip", 19},
362 {"cr.ifa", 20},
363 {"cr.itir", 21},
364 {"cr.iipa", 22},
365 {"cr.ifs", 23},
366 {"cr.iim", 24},
367 {"cr.iha", 25},
368 {"cr.lid", 64},
369 {"cr.ivr", 65},
370 {"cr.tpr", 66},
371 {"cr.eoi", 67},
372 {"cr.irr0", 68},
373 {"cr.irr1", 69},
374 {"cr.irr2", 70},
375 {"cr.irr3", 71},
376 {"cr.itv", 72},
377 {"cr.pmv", 73},
378 {"cr.cmcv", 74},
379 {"cr.lrr0", 80},
380 {"cr.lrr1", 81}
381 };
382
383 #define PSR_MFL 4
384 #define PSR_IC 13
385 #define PSR_DFL 18
386 #define PSR_CPL 32
387
388 static const struct const_desc
389 {
390 const char *name;
391 valueT value;
392 }
393 const_bits[] =
394 {
395 /* PSR constant masks: */
396
397 /* 0: reserved */
398 {"psr.be", ((valueT) 1) << 1},
399 {"psr.up", ((valueT) 1) << 2},
400 {"psr.ac", ((valueT) 1) << 3},
401 {"psr.mfl", ((valueT) 1) << 4},
402 {"psr.mfh", ((valueT) 1) << 5},
403 /* 6-12: reserved */
404 {"psr.ic", ((valueT) 1) << 13},
405 {"psr.i", ((valueT) 1) << 14},
406 {"psr.pk", ((valueT) 1) << 15},
407 /* 16: reserved */
408 {"psr.dt", ((valueT) 1) << 17},
409 {"psr.dfl", ((valueT) 1) << 18},
410 {"psr.dfh", ((valueT) 1) << 19},
411 {"psr.sp", ((valueT) 1) << 20},
412 {"psr.pp", ((valueT) 1) << 21},
413 {"psr.di", ((valueT) 1) << 22},
414 {"psr.si", ((valueT) 1) << 23},
415 {"psr.db", ((valueT) 1) << 24},
416 {"psr.lp", ((valueT) 1) << 25},
417 {"psr.tb", ((valueT) 1) << 26},
418 {"psr.rt", ((valueT) 1) << 27},
419 /* 28-31: reserved */
420 /* 32-33: cpl (current privilege level) */
421 {"psr.is", ((valueT) 1) << 34},
422 {"psr.mc", ((valueT) 1) << 35},
423 {"psr.it", ((valueT) 1) << 36},
424 {"psr.id", ((valueT) 1) << 37},
425 {"psr.da", ((valueT) 1) << 38},
426 {"psr.dd", ((valueT) 1) << 39},
427 {"psr.ss", ((valueT) 1) << 40},
428 /* 41-42: ri (restart instruction) */
429 {"psr.ed", ((valueT) 1) << 43},
430 {"psr.bn", ((valueT) 1) << 44},
431 };
432
433 /* indirect register-sets/memory: */
434
435 static const struct
436 {
437 const char *name;
438 int regnum;
439 }
440 indirect_reg[] =
441 {
442 { "CPUID", IND_CPUID },
443 { "cpuid", IND_CPUID },
444 { "dbr", IND_DBR },
445 { "dtr", IND_DTR },
446 { "itr", IND_ITR },
447 { "ibr", IND_IBR },
448 { "msr", IND_MSR },
449 { "pkr", IND_PKR },
450 { "pmc", IND_PMC },
451 { "pmd", IND_PMD },
452 { "rr", IND_RR },
453 };
454
455 /* Pseudo functions used to indicate relocation types (these functions
456 start with an at sign (@). */
457 static struct
458 {
459 const char *name;
460 enum pseudo_type
461 {
462 PSEUDO_FUNC_NONE,
463 PSEUDO_FUNC_RELOC,
464 PSEUDO_FUNC_CONST,
465 PSEUDO_FUNC_REG,
466 PSEUDO_FUNC_FLOAT
467 }
468 type;
469 union
470 {
471 unsigned long ival;
472 symbolS *sym;
473 }
474 u;
475 }
476 pseudo_func[] =
477 {
478 /* reloc pseudo functions (these must come first!): */
479 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
480 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
481 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
482 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
483 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
484 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
485 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
486 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
487 { "", 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
488 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
489
490 /* mbtype4 constants: */
491 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
492 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
493 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
494 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
495 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
496
497 /* fclass constants: */
498 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
499 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
500 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
501 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
502 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
503 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
504 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
505 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
506 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
507
508 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
509
510 /* unwind-related constants: */
511 { "svr4", PSEUDO_FUNC_CONST, { 0 } },
512 { "hpux", PSEUDO_FUNC_CONST, { 1 } },
513 { "nt", PSEUDO_FUNC_CONST, { 2 } },
514
515 /* unwind-related registers: */
516 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
517 };
518
519 /* 41-bit nop opcodes (one per unit): */
520 static const bfd_vma nop[IA64_NUM_UNITS] =
521 {
522 0x0000000000LL, /* NIL => break 0 */
523 0x0008000000LL, /* I-unit nop */
524 0x0008000000LL, /* M-unit nop */
525 0x4000000000LL, /* B-unit nop */
526 0x0008000000LL, /* F-unit nop */
527 0x0008000000LL, /* L-"unit" nop */
528 0x0008000000LL, /* X-unit nop */
529 };
530
531 /* Can't be `const' as it's passed to input routines (which have the
532 habit of setting temporary sentinels. */
533 static char special_section_name[][20] =
534 {
535 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
536 {".IA_64.unwind"}, {".IA_64.unwind_info"},
537 {".init_array"}, {".fini_array"}
538 };
539
540 static char *special_linkonce_name[] =
541 {
542 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
543 };
544
545 /* The best template for a particular sequence of up to three
546 instructions: */
547 #define N IA64_NUM_TYPES
548 static unsigned char best_template[N][N][N];
549 #undef N
550
551 /* Resource dependencies currently in effect */
552 static struct rsrc {
553 int depind; /* dependency index */
554 const struct ia64_dependency *dependency; /* actual dependency */
555 unsigned specific:1, /* is this a specific bit/regno? */
556 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
557 int index; /* specific regno/bit within dependency */
558 int note; /* optional qualifying note (0 if none) */
559 #define STATE_NONE 0
560 #define STATE_STOP 1
561 #define STATE_SRLZ 2
562 int insn_srlz; /* current insn serialization state */
563 int data_srlz; /* current data serialization state */
564 int qp_regno; /* qualifying predicate for this usage */
565 char *file; /* what file marked this dependency */
566 unsigned int line; /* what line marked this dependency */
567 struct mem_offset mem_offset; /* optional memory offset hint */
568 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
569 int path; /* corresponding code entry index */
570 } *regdeps = NULL;
571 static int regdepslen = 0;
572 static int regdepstotlen = 0;
573 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
574 static const char *dv_sem[] = { "none", "implied", "impliedf",
575 "data", "instr", "specific", "stop", "other" };
576 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
577
578 /* Current state of PR mutexation */
579 static struct qpmutex {
580 valueT prmask;
581 int path;
582 } *qp_mutexes = NULL; /* QP mutex bitmasks */
583 static int qp_mutexeslen = 0;
584 static int qp_mutexestotlen = 0;
585 static valueT qp_safe_across_calls = 0;
586
587 /* Current state of PR implications */
588 static struct qp_imply {
589 unsigned p1:6;
590 unsigned p2:6;
591 unsigned p2_branched:1;
592 int path;
593 } *qp_implies = NULL;
594 static int qp_implieslen = 0;
595 static int qp_impliestotlen = 0;
596
597 /* Keep track of static GR values so that indirect register usage can
598 sometimes be tracked. */
599 static struct gr {
600 unsigned known:1;
601 int path;
602 valueT value;
603 } gr_values[128] = {{ 1, 0, 0 }};
604
605 /* These are the routines required to output the various types of
606 unwind records. */
607
608 /* A slot_number is a frag address plus the slot index (0-2). We use the
609 frag address here so that if there is a section switch in the middle of
610 a function, then instructions emitted to a different section are not
611 counted. Since there may be more than one frag for a function, this
612 means we also need to keep track of which frag this address belongs to
613 so we can compute inter-frag distances. This also nicely solves the
614 problem with nops emitted for align directives, which can't easily be
615 counted, but can easily be derived from frag sizes. */
616
617 typedef struct unw_rec_list {
618 unwind_record r;
619 unsigned long slot_number;
620 fragS *slot_frag;
621 struct unw_rec_list *next;
622 } unw_rec_list;
623
624 #define SLOT_NUM_NOT_SET (unsigned)-1
625
626 static struct
627 {
628 unsigned long next_slot_number;
629 fragS *next_slot_frag;
630
631 /* Maintain a list of unwind entries for the current function. */
632 unw_rec_list *list;
633 unw_rec_list *tail;
634
635 /* Any unwind entires that should be attached to the current slot
636 that an insn is being constructed for. */
637 unw_rec_list *current_entry;
638
639 /* These are used to create the unwind table entry for this function. */
640 symbolS *proc_start;
641 symbolS *proc_end;
642 symbolS *info; /* pointer to unwind info */
643 symbolS *personality_routine;
644 segT saved_text_seg;
645 subsegT saved_text_subseg;
646 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
647
648 /* TRUE if processing unwind directives in a prologue region. */
649 int prologue;
650 int prologue_mask;
651 unsigned int prologue_count; /* number of .prologues seen so far */
652 } unwind;
653
654 typedef void (*vbyte_func) PARAMS ((int, char *, char *));
655
656 /* Forward delarations: */
657 static int ar_is_in_integer_unit PARAMS ((int regnum));
658 static void set_section PARAMS ((char *name));
659 static unsigned int set_regstack PARAMS ((unsigned int, unsigned int,
660 unsigned int, unsigned int));
661 static void dot_radix PARAMS ((int));
662 static void dot_special_section PARAMS ((int));
663 static void dot_proc PARAMS ((int));
664 static void dot_fframe PARAMS ((int));
665 static void dot_vframe PARAMS ((int));
666 static void dot_vframesp PARAMS ((int));
667 static void dot_vframepsp PARAMS ((int));
668 static void dot_save PARAMS ((int));
669 static void dot_restore PARAMS ((int));
670 static void dot_restorereg PARAMS ((int));
671 static void dot_restorereg_p PARAMS ((int));
672 static void dot_handlerdata PARAMS ((int));
673 static void dot_unwentry PARAMS ((int));
674 static void dot_altrp PARAMS ((int));
675 static void dot_savemem PARAMS ((int));
676 static void dot_saveg PARAMS ((int));
677 static void dot_savef PARAMS ((int));
678 static void dot_saveb PARAMS ((int));
679 static void dot_savegf PARAMS ((int));
680 static void dot_spill PARAMS ((int));
681 static void dot_spillreg PARAMS ((int));
682 static void dot_spillmem PARAMS ((int));
683 static void dot_spillreg_p PARAMS ((int));
684 static void dot_spillmem_p PARAMS ((int));
685 static void dot_label_state PARAMS ((int));
686 static void dot_copy_state PARAMS ((int));
687 static void dot_unwabi PARAMS ((int));
688 static void dot_personality PARAMS ((int));
689 static void dot_body PARAMS ((int));
690 static void dot_prologue PARAMS ((int));
691 static void dot_endp PARAMS ((int));
692 static void dot_template PARAMS ((int));
693 static void dot_regstk PARAMS ((int));
694 static void dot_rot PARAMS ((int));
695 static void dot_byteorder PARAMS ((int));
696 static void dot_psr PARAMS ((int));
697 static void dot_alias PARAMS ((int));
698 static void dot_ln PARAMS ((int));
699 static char *parse_section_name PARAMS ((void));
700 static void dot_xdata PARAMS ((int));
701 static void stmt_float_cons PARAMS ((int));
702 static void stmt_cons_ua PARAMS ((int));
703 static void dot_xfloat_cons PARAMS ((int));
704 static void dot_xstringer PARAMS ((int));
705 static void dot_xdata_ua PARAMS ((int));
706 static void dot_xfloat_cons_ua PARAMS ((int));
707 static void print_prmask PARAMS ((valueT mask));
708 static void dot_pred_rel PARAMS ((int));
709 static void dot_reg_val PARAMS ((int));
710 static void dot_dv_mode PARAMS ((int));
711 static void dot_entry PARAMS ((int));
712 static void dot_mem_offset PARAMS ((int));
713 static void add_unwind_entry PARAMS((unw_rec_list *ptr));
714 static symbolS *declare_register PARAMS ((const char *name, int regnum));
715 static void declare_register_set PARAMS ((const char *, int, int));
716 static unsigned int operand_width PARAMS ((enum ia64_opnd));
717 static enum operand_match_result operand_match PARAMS ((const struct ia64_opcode *idesc,
718 int index,
719 expressionS *e));
720 static int parse_operand PARAMS ((expressionS *e));
721 static struct ia64_opcode * parse_operands PARAMS ((struct ia64_opcode *));
722 static void build_insn PARAMS ((struct slot *, bfd_vma *));
723 static void emit_one_bundle PARAMS ((void));
724 static void fix_insn PARAMS ((fixS *, const struct ia64_operand *, valueT));
725 static bfd_reloc_code_real_type ia64_gen_real_reloc_type PARAMS ((struct symbol *sym,
726 bfd_reloc_code_real_type r_type));
727 static void insn_group_break PARAMS ((int, int, int));
728 static void mark_resource PARAMS ((struct ia64_opcode *, const struct ia64_dependency *,
729 struct rsrc *, int depind, int path));
730 static void add_qp_mutex PARAMS((valueT mask));
731 static void add_qp_imply PARAMS((int p1, int p2));
732 static void clear_qp_branch_flag PARAMS((valueT mask));
733 static void clear_qp_mutex PARAMS((valueT mask));
734 static void clear_qp_implies PARAMS((valueT p1_mask, valueT p2_mask));
735 static void clear_register_values PARAMS ((void));
736 static void print_dependency PARAMS ((const char *action, int depind));
737 static void instruction_serialization PARAMS ((void));
738 static void data_serialization PARAMS ((void));
739 static void remove_marked_resource PARAMS ((struct rsrc *));
740 static int is_conditional_branch PARAMS ((struct ia64_opcode *));
741 static int is_taken_branch PARAMS ((struct ia64_opcode *));
742 static int is_interruption_or_rfi PARAMS ((struct ia64_opcode *));
743 static int depends_on PARAMS ((int, struct ia64_opcode *));
744 static int specify_resource PARAMS ((const struct ia64_dependency *,
745 struct ia64_opcode *, int, struct rsrc [], int, int));
746 static int check_dv PARAMS((struct ia64_opcode *idesc));
747 static void check_dependencies PARAMS((struct ia64_opcode *));
748 static void mark_resources PARAMS((struct ia64_opcode *));
749 static void update_dependencies PARAMS((struct ia64_opcode *));
750 static void note_register_values PARAMS((struct ia64_opcode *));
751 static int qp_mutex PARAMS ((int, int, int));
752 static int resources_match PARAMS ((struct rsrc *, struct ia64_opcode *, int, int, int));
753 static void output_vbyte_mem PARAMS ((int, char *, char *));
754 static void count_output PARAMS ((int, char *, char *));
755 static void output_R1_format PARAMS ((vbyte_func, unw_record_type, int));
756 static void output_R2_format PARAMS ((vbyte_func, int, int, unsigned long));
757 static void output_R3_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
758 static void output_P1_format PARAMS ((vbyte_func, int));
759 static void output_P2_format PARAMS ((vbyte_func, int, int));
760 static void output_P3_format PARAMS ((vbyte_func, unw_record_type, int));
761 static void output_P4_format PARAMS ((vbyte_func, unsigned char *, unsigned long));
762 static void output_P5_format PARAMS ((vbyte_func, int, unsigned long));
763 static void output_P6_format PARAMS ((vbyte_func, unw_record_type, int));
764 static void output_P7_format PARAMS ((vbyte_func, unw_record_type, unsigned long, unsigned long));
765 static void output_P8_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
766 static void output_P9_format PARAMS ((vbyte_func, int, int));
767 static void output_P10_format PARAMS ((vbyte_func, int, int));
768 static void output_B1_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
769 static void output_B2_format PARAMS ((vbyte_func, unsigned long, unsigned long));
770 static void output_B3_format PARAMS ((vbyte_func, unsigned long, unsigned long));
771 static void output_B4_format PARAMS ((vbyte_func, unw_record_type, unsigned long));
772 static char format_ab_reg PARAMS ((int, int));
773 static void output_X1_format PARAMS ((vbyte_func, unw_record_type, int, int, unsigned long,
774 unsigned long));
775 static void output_X2_format PARAMS ((vbyte_func, int, int, int, int, int, unsigned long));
776 static void output_X3_format PARAMS ((vbyte_func, unw_record_type, int, int, int, unsigned long,
777 unsigned long));
778 static void output_X4_format PARAMS ((vbyte_func, int, int, int, int, int, int, unsigned long));
779 static void free_list_records PARAMS ((unw_rec_list *));
780 static unw_rec_list *output_prologue PARAMS ((void));
781 static unw_rec_list *output_prologue_gr PARAMS ((unsigned int, unsigned int));
782 static unw_rec_list *output_body PARAMS ((void));
783 static unw_rec_list *output_mem_stack_f PARAMS ((unsigned int));
784 static unw_rec_list *output_mem_stack_v PARAMS ((void));
785 static unw_rec_list *output_psp_gr PARAMS ((unsigned int));
786 static unw_rec_list *output_psp_sprel PARAMS ((unsigned int));
787 static unw_rec_list *output_rp_when PARAMS ((void));
788 static unw_rec_list *output_rp_gr PARAMS ((unsigned int));
789 static unw_rec_list *output_rp_br PARAMS ((unsigned int));
790 static unw_rec_list *output_rp_psprel PARAMS ((unsigned int));
791 static unw_rec_list *output_rp_sprel PARAMS ((unsigned int));
792 static unw_rec_list *output_pfs_when PARAMS ((void));
793 static unw_rec_list *output_pfs_gr PARAMS ((unsigned int));
794 static unw_rec_list *output_pfs_psprel PARAMS ((unsigned int));
795 static unw_rec_list *output_pfs_sprel PARAMS ((unsigned int));
796 static unw_rec_list *output_preds_when PARAMS ((void));
797 static unw_rec_list *output_preds_gr PARAMS ((unsigned int));
798 static unw_rec_list *output_preds_psprel PARAMS ((unsigned int));
799 static unw_rec_list *output_preds_sprel PARAMS ((unsigned int));
800 static unw_rec_list *output_fr_mem PARAMS ((unsigned int));
801 static unw_rec_list *output_frgr_mem PARAMS ((unsigned int, unsigned int));
802 static unw_rec_list *output_gr_gr PARAMS ((unsigned int, unsigned int));
803 static unw_rec_list *output_gr_mem PARAMS ((unsigned int));
804 static unw_rec_list *output_br_mem PARAMS ((unsigned int));
805 static unw_rec_list *output_br_gr PARAMS ((unsigned int, unsigned int));
806 static unw_rec_list *output_spill_base PARAMS ((unsigned int));
807 static unw_rec_list *output_unat_when PARAMS ((void));
808 static unw_rec_list *output_unat_gr PARAMS ((unsigned int));
809 static unw_rec_list *output_unat_psprel PARAMS ((unsigned int));
810 static unw_rec_list *output_unat_sprel PARAMS ((unsigned int));
811 static unw_rec_list *output_lc_when PARAMS ((void));
812 static unw_rec_list *output_lc_gr PARAMS ((unsigned int));
813 static unw_rec_list *output_lc_psprel PARAMS ((unsigned int));
814 static unw_rec_list *output_lc_sprel PARAMS ((unsigned int));
815 static unw_rec_list *output_fpsr_when PARAMS ((void));
816 static unw_rec_list *output_fpsr_gr PARAMS ((unsigned int));
817 static unw_rec_list *output_fpsr_psprel PARAMS ((unsigned int));
818 static unw_rec_list *output_fpsr_sprel PARAMS ((unsigned int));
819 static unw_rec_list *output_priunat_when_gr PARAMS ((void));
820 static unw_rec_list *output_priunat_when_mem PARAMS ((void));
821 static unw_rec_list *output_priunat_gr PARAMS ((unsigned int));
822 static unw_rec_list *output_priunat_psprel PARAMS ((unsigned int));
823 static unw_rec_list *output_priunat_sprel PARAMS ((unsigned int));
824 static unw_rec_list *output_bsp_when PARAMS ((void));
825 static unw_rec_list *output_bsp_gr PARAMS ((unsigned int));
826 static unw_rec_list *output_bsp_psprel PARAMS ((unsigned int));
827 static unw_rec_list *output_bsp_sprel PARAMS ((unsigned int));
828 static unw_rec_list *output_bspstore_when PARAMS ((void));
829 static unw_rec_list *output_bspstore_gr PARAMS ((unsigned int));
830 static unw_rec_list *output_bspstore_psprel PARAMS ((unsigned int));
831 static unw_rec_list *output_bspstore_sprel PARAMS ((unsigned int));
832 static unw_rec_list *output_rnat_when PARAMS ((void));
833 static unw_rec_list *output_rnat_gr PARAMS ((unsigned int));
834 static unw_rec_list *output_rnat_psprel PARAMS ((unsigned int));
835 static unw_rec_list *output_rnat_sprel PARAMS ((unsigned int));
836 static unw_rec_list *output_unwabi PARAMS ((unsigned long, unsigned long));
837 static unw_rec_list *output_epilogue PARAMS ((unsigned long));
838 static unw_rec_list *output_label_state PARAMS ((unsigned long));
839 static unw_rec_list *output_copy_state PARAMS ((unsigned long));
840 static unw_rec_list *output_spill_psprel PARAMS ((unsigned int, unsigned int, unsigned int));
841 static unw_rec_list *output_spill_sprel PARAMS ((unsigned int, unsigned int, unsigned int));
842 static unw_rec_list *output_spill_psprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
843 unsigned int));
844 static unw_rec_list *output_spill_sprel_p PARAMS ((unsigned int, unsigned int, unsigned int,
845 unsigned int));
846 static unw_rec_list *output_spill_reg PARAMS ((unsigned int, unsigned int, unsigned int,
847 unsigned int));
848 static unw_rec_list *output_spill_reg_p PARAMS ((unsigned int, unsigned int, unsigned int,
849 unsigned int, unsigned int));
850 static void process_one_record PARAMS ((unw_rec_list *, vbyte_func));
851 static void process_unw_records PARAMS ((unw_rec_list *, vbyte_func));
852 static int calc_record_size PARAMS ((unw_rec_list *));
853 static void set_imask PARAMS ((unw_rec_list *, unsigned long, unsigned long, unsigned int));
854 static int count_bits PARAMS ((unsigned long));
855 static unsigned long slot_index PARAMS ((unsigned long, fragS *,
856 unsigned long, fragS *));
857 static unw_rec_list *optimize_unw_records PARAMS ((unw_rec_list *));
858 static void fixup_unw_records PARAMS ((unw_rec_list *));
859 static int output_unw_records PARAMS ((unw_rec_list *, void **));
860 static int convert_expr_to_ab_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
861 static int convert_expr_to_xy_reg PARAMS ((expressionS *, unsigned int *, unsigned int *));
862 static int generate_unwind_image PARAMS ((const char *));
863
864 /* Build the unwind section name by appending the (possibly stripped)
865 text section NAME to the unwind PREFIX. The resulting string
866 pointer is assigned to RESULT. The string is allocated on the
867 stack, so this must be a macro... */
868 #define make_unw_section_name(special, text_name, result) \
869 { \
870 char *_prefix = special_section_name[special]; \
871 char *_suffix = text_name; \
872 size_t _prefix_len, _suffix_len; \
873 char *_result; \
874 if (strncmp (text_name, ".gnu.linkonce.t.", \
875 sizeof (".gnu.linkonce.t.") - 1) == 0) \
876 { \
877 _prefix = special_linkonce_name[special - SPECIAL_SECTION_UNWIND]; \
878 _suffix += sizeof (".gnu.linkonce.t.") - 1; \
879 } \
880 _prefix_len = strlen (_prefix), _suffix_len = strlen (_suffix); \
881 _result = alloca (_prefix_len + _suffix_len + 1); \
882 memcpy (_result, _prefix, _prefix_len); \
883 memcpy (_result + _prefix_len, _suffix, _suffix_len); \
884 _result[_prefix_len + _suffix_len] = '\0'; \
885 result = _result; \
886 } \
887 while (0)
888
889 /* Determine if application register REGNUM resides in the integer
890 unit (as opposed to the memory unit). */
891 static int
892 ar_is_in_integer_unit (reg)
893 int reg;
894 {
895 reg -= REG_AR;
896
897 return (reg == 64 /* pfs */
898 || reg == 65 /* lc */
899 || reg == 66 /* ec */
900 /* ??? ias accepts and puts these in the integer unit. */
901 || (reg >= 112 && reg <= 127));
902 }
903
904 /* Switch to section NAME and create section if necessary. It's
905 rather ugly that we have to manipulate input_line_pointer but I
906 don't see any other way to accomplish the same thing without
907 changing obj-elf.c (which may be the Right Thing, in the end). */
908 static void
909 set_section (name)
910 char *name;
911 {
912 char *saved_input_line_pointer;
913
914 saved_input_line_pointer = input_line_pointer;
915 input_line_pointer = name;
916 obj_elf_section (0);
917 input_line_pointer = saved_input_line_pointer;
918 }
919
920 /* Map 's' to SHF_IA_64_SHORT. */
921
922 int
923 ia64_elf_section_letter (letter, ptr_msg)
924 int letter;
925 char **ptr_msg;
926 {
927 if (letter == 's')
928 return SHF_IA_64_SHORT;
929
930 *ptr_msg = _("Bad .section directive: want a,s,w,x,M,S in string");
931 return 0;
932 }
933
934 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
935
936 flagword
937 ia64_elf_section_flags (flags, attr, type)
938 flagword flags;
939 int attr, type ATTRIBUTE_UNUSED;
940 {
941 if (attr & SHF_IA_64_SHORT)
942 flags |= SEC_SMALL_DATA;
943 return flags;
944 }
945
946 int
947 ia64_elf_section_type (str, len)
948 const char *str;
949 size_t len;
950 {
951 len = sizeof (ELF_STRING_ia64_unwind_info) - 1;
952 if (strncmp (str, ELF_STRING_ia64_unwind_info, len) == 0)
953 return SHT_PROGBITS;
954
955 len = sizeof (ELF_STRING_ia64_unwind_info_once) - 1;
956 if (strncmp (str, ELF_STRING_ia64_unwind_info_once, len) == 0)
957 return SHT_PROGBITS;
958
959 len = sizeof (ELF_STRING_ia64_unwind) - 1;
960 if (strncmp (str, ELF_STRING_ia64_unwind, len) == 0)
961 return SHT_IA_64_UNWIND;
962
963 len = sizeof (ELF_STRING_ia64_unwind_once) - 1;
964 if (strncmp (str, ELF_STRING_ia64_unwind_once, len) == 0)
965 return SHT_IA_64_UNWIND;
966
967 return -1;
968 }
969
970 static unsigned int
971 set_regstack (ins, locs, outs, rots)
972 unsigned int ins, locs, outs, rots;
973 {
974 /* Size of frame. */
975 unsigned int sof;
976
977 sof = ins + locs + outs;
978 if (sof > 96)
979 {
980 as_bad ("Size of frame exceeds maximum of 96 registers");
981 return 0;
982 }
983 if (rots > sof)
984 {
985 as_warn ("Size of rotating registers exceeds frame size");
986 return 0;
987 }
988 md.in.base = REG_GR + 32;
989 md.loc.base = md.in.base + ins;
990 md.out.base = md.loc.base + locs;
991
992 md.in.num_regs = ins;
993 md.loc.num_regs = locs;
994 md.out.num_regs = outs;
995 md.rot.num_regs = rots;
996 return sof;
997 }
998
999 void
1000 ia64_flush_insns ()
1001 {
1002 struct label_fix *lfix;
1003 segT saved_seg;
1004 subsegT saved_subseg;
1005 unw_rec_list *ptr;
1006
1007 if (!md.last_text_seg)
1008 return;
1009
1010 saved_seg = now_seg;
1011 saved_subseg = now_subseg;
1012
1013 subseg_set (md.last_text_seg, 0);
1014
1015 while (md.num_slots_in_use > 0)
1016 emit_one_bundle (); /* force out queued instructions */
1017
1018 /* In case there are labels following the last instruction, resolve
1019 those now: */
1020 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
1021 {
1022 S_SET_VALUE (lfix->sym, frag_now_fix ());
1023 symbol_set_frag (lfix->sym, frag_now);
1024 }
1025 CURR_SLOT.label_fixups = 0;
1026 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
1027 {
1028 S_SET_VALUE (lfix->sym, frag_now_fix ());
1029 symbol_set_frag (lfix->sym, frag_now);
1030 }
1031 CURR_SLOT.tag_fixups = 0;
1032
1033 /* In case there are unwind directives following the last instruction,
1034 resolve those now. We only handle body and prologue directives here.
1035 Give an error for others. */
1036 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
1037 {
1038 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
1039 || ptr->r.type == body)
1040 {
1041 ptr->slot_number = (unsigned long) frag_more (0);
1042 ptr->slot_frag = frag_now;
1043 }
1044 else
1045 as_bad (_("Unwind directive not followed by an instruction."));
1046 }
1047 unwind.current_entry = NULL;
1048
1049 subseg_set (saved_seg, saved_subseg);
1050
1051 if (md.qp.X_op == O_register)
1052 as_bad ("qualifying predicate not followed by instruction");
1053 }
1054
1055 void
1056 ia64_do_align (nbytes)
1057 int nbytes;
1058 {
1059 char *saved_input_line_pointer = input_line_pointer;
1060
1061 input_line_pointer = "";
1062 s_align_bytes (nbytes);
1063 input_line_pointer = saved_input_line_pointer;
1064 }
1065
1066 void
1067 ia64_cons_align (nbytes)
1068 int nbytes;
1069 {
1070 if (md.auto_align)
1071 {
1072 char *saved_input_line_pointer = input_line_pointer;
1073 input_line_pointer = "";
1074 s_align_bytes (nbytes);
1075 input_line_pointer = saved_input_line_pointer;
1076 }
1077 }
1078
1079 /* Output COUNT bytes to a memory location. */
1080 static unsigned char *vbyte_mem_ptr = NULL;
1081
1082 void
1083 output_vbyte_mem (count, ptr, comment)
1084 int count;
1085 char *ptr;
1086 char *comment ATTRIBUTE_UNUSED;
1087 {
1088 int x;
1089 if (vbyte_mem_ptr == NULL)
1090 abort ();
1091
1092 if (count == 0)
1093 return;
1094 for (x = 0; x < count; x++)
1095 *(vbyte_mem_ptr++) = ptr[x];
1096 }
1097
1098 /* Count the number of bytes required for records. */
1099 static int vbyte_count = 0;
1100 void
1101 count_output (count, ptr, comment)
1102 int count;
1103 char *ptr ATTRIBUTE_UNUSED;
1104 char *comment ATTRIBUTE_UNUSED;
1105 {
1106 vbyte_count += count;
1107 }
1108
1109 static void
1110 output_R1_format (f, rtype, rlen)
1111 vbyte_func f;
1112 unw_record_type rtype;
1113 int rlen;
1114 {
1115 int r = 0;
1116 char byte;
1117 if (rlen > 0x1f)
1118 {
1119 output_R3_format (f, rtype, rlen);
1120 return;
1121 }
1122
1123 if (rtype == body)
1124 r = 1;
1125 else if (rtype != prologue)
1126 as_bad ("record type is not valid");
1127
1128 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1129 (*f) (1, &byte, NULL);
1130 }
1131
1132 static void
1133 output_R2_format (f, mask, grsave, rlen)
1134 vbyte_func f;
1135 int mask, grsave;
1136 unsigned long rlen;
1137 {
1138 char bytes[20];
1139 int count = 2;
1140 mask = (mask & 0x0f);
1141 grsave = (grsave & 0x7f);
1142
1143 bytes[0] = (UNW_R2 | (mask >> 1));
1144 bytes[1] = (((mask & 0x01) << 7) | grsave);
1145 count += output_leb128 (bytes + 2, rlen, 0);
1146 (*f) (count, bytes, NULL);
1147 }
1148
1149 static void
1150 output_R3_format (f, rtype, rlen)
1151 vbyte_func f;
1152 unw_record_type rtype;
1153 unsigned long rlen;
1154 {
1155 int r = 0, count;
1156 char bytes[20];
1157 if (rlen <= 0x1f)
1158 {
1159 output_R1_format (f, rtype, rlen);
1160 return;
1161 }
1162
1163 if (rtype == body)
1164 r = 1;
1165 else if (rtype != prologue)
1166 as_bad ("record type is not valid");
1167 bytes[0] = (UNW_R3 | r);
1168 count = output_leb128 (bytes + 1, rlen, 0);
1169 (*f) (count + 1, bytes, NULL);
1170 }
1171
1172 static void
1173 output_P1_format (f, brmask)
1174 vbyte_func f;
1175 int brmask;
1176 {
1177 char byte;
1178 byte = UNW_P1 | (brmask & 0x1f);
1179 (*f) (1, &byte, NULL);
1180 }
1181
1182 static void
1183 output_P2_format (f, brmask, gr)
1184 vbyte_func f;
1185 int brmask;
1186 int gr;
1187 {
1188 char bytes[2];
1189 brmask = (brmask & 0x1f);
1190 bytes[0] = UNW_P2 | (brmask >> 1);
1191 bytes[1] = (((brmask & 1) << 7) | gr);
1192 (*f) (2, bytes, NULL);
1193 }
1194
1195 static void
1196 output_P3_format (f, rtype, reg)
1197 vbyte_func f;
1198 unw_record_type rtype;
1199 int reg;
1200 {
1201 char bytes[2];
1202 int r = 0;
1203 reg = (reg & 0x7f);
1204 switch (rtype)
1205 {
1206 case psp_gr:
1207 r = 0;
1208 break;
1209 case rp_gr:
1210 r = 1;
1211 break;
1212 case pfs_gr:
1213 r = 2;
1214 break;
1215 case preds_gr:
1216 r = 3;
1217 break;
1218 case unat_gr:
1219 r = 4;
1220 break;
1221 case lc_gr:
1222 r = 5;
1223 break;
1224 case rp_br:
1225 r = 6;
1226 break;
1227 case rnat_gr:
1228 r = 7;
1229 break;
1230 case bsp_gr:
1231 r = 8;
1232 break;
1233 case bspstore_gr:
1234 r = 9;
1235 break;
1236 case fpsr_gr:
1237 r = 10;
1238 break;
1239 case priunat_gr:
1240 r = 11;
1241 break;
1242 default:
1243 as_bad ("Invalid record type for P3 format.");
1244 }
1245 bytes[0] = (UNW_P3 | (r >> 1));
1246 bytes[1] = (((r & 1) << 7) | reg);
1247 (*f) (2, bytes, NULL);
1248 }
1249
1250 static void
1251 output_P4_format (f, imask, imask_size)
1252 vbyte_func f;
1253 unsigned char *imask;
1254 unsigned long imask_size;
1255 {
1256 imask[0] = UNW_P4;
1257 (*f) (imask_size, imask, NULL);
1258 }
1259
1260 static void
1261 output_P5_format (f, grmask, frmask)
1262 vbyte_func f;
1263 int grmask;
1264 unsigned long frmask;
1265 {
1266 char bytes[4];
1267 grmask = (grmask & 0x0f);
1268
1269 bytes[0] = UNW_P5;
1270 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1271 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1272 bytes[3] = (frmask & 0x000000ff);
1273 (*f) (4, bytes, NULL);
1274 }
1275
1276 static void
1277 output_P6_format (f, rtype, rmask)
1278 vbyte_func f;
1279 unw_record_type rtype;
1280 int rmask;
1281 {
1282 char byte;
1283 int r = 0;
1284
1285 if (rtype == gr_mem)
1286 r = 1;
1287 else if (rtype != fr_mem)
1288 as_bad ("Invalid record type for format P6");
1289 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1290 (*f) (1, &byte, NULL);
1291 }
1292
1293 static void
1294 output_P7_format (f, rtype, w1, w2)
1295 vbyte_func f;
1296 unw_record_type rtype;
1297 unsigned long w1;
1298 unsigned long w2;
1299 {
1300 char bytes[20];
1301 int count = 1;
1302 int r = 0;
1303 count += output_leb128 (bytes + 1, w1, 0);
1304 switch (rtype)
1305 {
1306 case mem_stack_f:
1307 r = 0;
1308 count += output_leb128 (bytes + count, w2 >> 4, 0);
1309 break;
1310 case mem_stack_v:
1311 r = 1;
1312 break;
1313 case spill_base:
1314 r = 2;
1315 break;
1316 case psp_sprel:
1317 r = 3;
1318 break;
1319 case rp_when:
1320 r = 4;
1321 break;
1322 case rp_psprel:
1323 r = 5;
1324 break;
1325 case pfs_when:
1326 r = 6;
1327 break;
1328 case pfs_psprel:
1329 r = 7;
1330 break;
1331 case preds_when:
1332 r = 8;
1333 break;
1334 case preds_psprel:
1335 r = 9;
1336 break;
1337 case lc_when:
1338 r = 10;
1339 break;
1340 case lc_psprel:
1341 r = 11;
1342 break;
1343 case unat_when:
1344 r = 12;
1345 break;
1346 case unat_psprel:
1347 r = 13;
1348 break;
1349 case fpsr_when:
1350 r = 14;
1351 break;
1352 case fpsr_psprel:
1353 r = 15;
1354 break;
1355 default:
1356 break;
1357 }
1358 bytes[0] = (UNW_P7 | r);
1359 (*f) (count, bytes, NULL);
1360 }
1361
1362 static void
1363 output_P8_format (f, rtype, t)
1364 vbyte_func f;
1365 unw_record_type rtype;
1366 unsigned long t;
1367 {
1368 char bytes[20];
1369 int r = 0;
1370 int count = 2;
1371 bytes[0] = UNW_P8;
1372 switch (rtype)
1373 {
1374 case rp_sprel:
1375 r = 1;
1376 break;
1377 case pfs_sprel:
1378 r = 2;
1379 break;
1380 case preds_sprel:
1381 r = 3;
1382 break;
1383 case lc_sprel:
1384 r = 4;
1385 break;
1386 case unat_sprel:
1387 r = 5;
1388 break;
1389 case fpsr_sprel:
1390 r = 6;
1391 break;
1392 case bsp_when:
1393 r = 7;
1394 break;
1395 case bsp_psprel:
1396 r = 8;
1397 break;
1398 case bsp_sprel:
1399 r = 9;
1400 break;
1401 case bspstore_when:
1402 r = 10;
1403 break;
1404 case bspstore_psprel:
1405 r = 11;
1406 break;
1407 case bspstore_sprel:
1408 r = 12;
1409 break;
1410 case rnat_when:
1411 r = 13;
1412 break;
1413 case rnat_psprel:
1414 r = 14;
1415 break;
1416 case rnat_sprel:
1417 r = 15;
1418 break;
1419 case priunat_when_gr:
1420 r = 16;
1421 break;
1422 case priunat_psprel:
1423 r = 17;
1424 break;
1425 case priunat_sprel:
1426 r = 18;
1427 break;
1428 case priunat_when_mem:
1429 r = 19;
1430 break;
1431 default:
1432 break;
1433 }
1434 bytes[1] = r;
1435 count += output_leb128 (bytes + 2, t, 0);
1436 (*f) (count, bytes, NULL);
1437 }
1438
1439 static void
1440 output_P9_format (f, grmask, gr)
1441 vbyte_func f;
1442 int grmask;
1443 int gr;
1444 {
1445 char bytes[3];
1446 bytes[0] = UNW_P9;
1447 bytes[1] = (grmask & 0x0f);
1448 bytes[2] = (gr & 0x7f);
1449 (*f) (3, bytes, NULL);
1450 }
1451
1452 static void
1453 output_P10_format (f, abi, context)
1454 vbyte_func f;
1455 int abi;
1456 int context;
1457 {
1458 char bytes[3];
1459 bytes[0] = UNW_P10;
1460 bytes[1] = (abi & 0xff);
1461 bytes[2] = (context & 0xff);
1462 (*f) (3, bytes, NULL);
1463 }
1464
1465 static void
1466 output_B1_format (f, rtype, label)
1467 vbyte_func f;
1468 unw_record_type rtype;
1469 unsigned long label;
1470 {
1471 char byte;
1472 int r = 0;
1473 if (label > 0x1f)
1474 {
1475 output_B4_format (f, rtype, label);
1476 return;
1477 }
1478 if (rtype == copy_state)
1479 r = 1;
1480 else if (rtype != label_state)
1481 as_bad ("Invalid record type for format B1");
1482
1483 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1484 (*f) (1, &byte, NULL);
1485 }
1486
1487 static void
1488 output_B2_format (f, ecount, t)
1489 vbyte_func f;
1490 unsigned long ecount;
1491 unsigned long t;
1492 {
1493 char bytes[20];
1494 int count = 1;
1495 if (ecount > 0x1f)
1496 {
1497 output_B3_format (f, ecount, t);
1498 return;
1499 }
1500 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1501 count += output_leb128 (bytes + 1, t, 0);
1502 (*f) (count, bytes, NULL);
1503 }
1504
1505 static void
1506 output_B3_format (f, ecount, t)
1507 vbyte_func f;
1508 unsigned long ecount;
1509 unsigned long t;
1510 {
1511 char bytes[20];
1512 int count = 1;
1513 if (ecount <= 0x1f)
1514 {
1515 output_B2_format (f, ecount, t);
1516 return;
1517 }
1518 bytes[0] = UNW_B3;
1519 count += output_leb128 (bytes + 1, t, 0);
1520 count += output_leb128 (bytes + count, ecount, 0);
1521 (*f) (count, bytes, NULL);
1522 }
1523
1524 static void
1525 output_B4_format (f, rtype, label)
1526 vbyte_func f;
1527 unw_record_type rtype;
1528 unsigned long label;
1529 {
1530 char bytes[20];
1531 int r = 0;
1532 int count = 1;
1533 if (label <= 0x1f)
1534 {
1535 output_B1_format (f, rtype, label);
1536 return;
1537 }
1538
1539 if (rtype == copy_state)
1540 r = 1;
1541 else if (rtype != label_state)
1542 as_bad ("Invalid record type for format B1");
1543
1544 bytes[0] = (UNW_B4 | (r << 3));
1545 count += output_leb128 (bytes + 1, label, 0);
1546 (*f) (count, bytes, NULL);
1547 }
1548
1549 static char
1550 format_ab_reg (ab, reg)
1551 int ab;
1552 int reg;
1553 {
1554 int ret;
1555 ab = (ab & 3);
1556 reg = (reg & 0x1f);
1557 ret = (ab << 5) | reg;
1558 return ret;
1559 }
1560
1561 static void
1562 output_X1_format (f, rtype, ab, reg, t, w1)
1563 vbyte_func f;
1564 unw_record_type rtype;
1565 int ab, reg;
1566 unsigned long t;
1567 unsigned long w1;
1568 {
1569 char bytes[20];
1570 int r = 0;
1571 int count = 2;
1572 bytes[0] = UNW_X1;
1573
1574 if (rtype == spill_sprel)
1575 r = 1;
1576 else if (rtype != spill_psprel)
1577 as_bad ("Invalid record type for format X1");
1578 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1579 count += output_leb128 (bytes + 2, t, 0);
1580 count += output_leb128 (bytes + count, w1, 0);
1581 (*f) (count, bytes, NULL);
1582 }
1583
1584 static void
1585 output_X2_format (f, ab, reg, x, y, treg, t)
1586 vbyte_func f;
1587 int ab, reg;
1588 int x, y, treg;
1589 unsigned long t;
1590 {
1591 char bytes[20];
1592 int count = 3;
1593 bytes[0] = UNW_X2;
1594 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1595 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1596 count += output_leb128 (bytes + 3, t, 0);
1597 (*f) (count, bytes, NULL);
1598 }
1599
1600 static void
1601 output_X3_format (f, rtype, qp, ab, reg, t, w1)
1602 vbyte_func f;
1603 unw_record_type rtype;
1604 int qp;
1605 int ab, reg;
1606 unsigned long t;
1607 unsigned long w1;
1608 {
1609 char bytes[20];
1610 int r = 0;
1611 int count = 3;
1612 bytes[0] = UNW_X3;
1613
1614 if (rtype == spill_sprel_p)
1615 r = 1;
1616 else if (rtype != spill_psprel_p)
1617 as_bad ("Invalid record type for format X3");
1618 bytes[1] = ((r << 7) | (qp & 0x3f));
1619 bytes[2] = format_ab_reg (ab, reg);
1620 count += output_leb128 (bytes + 3, t, 0);
1621 count += output_leb128 (bytes + count, w1, 0);
1622 (*f) (count, bytes, NULL);
1623 }
1624
1625 static void
1626 output_X4_format (f, qp, ab, reg, x, y, treg, t)
1627 vbyte_func f;
1628 int qp;
1629 int ab, reg;
1630 int x, y, treg;
1631 unsigned long t;
1632 {
1633 char bytes[20];
1634 int count = 4;
1635 bytes[0] = UNW_X4;
1636 bytes[1] = (qp & 0x3f);
1637 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1638 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1639 count += output_leb128 (bytes + 4, t, 0);
1640 (*f) (count, bytes, NULL);
1641 }
1642
1643 /* This function allocates a record list structure, and initializes fields. */
1644
1645 static unw_rec_list *
1646 alloc_record (unw_record_type t)
1647 {
1648 unw_rec_list *ptr;
1649 ptr = xmalloc (sizeof (*ptr));
1650 ptr->next = NULL;
1651 ptr->slot_number = SLOT_NUM_NOT_SET;
1652 ptr->r.type = t;
1653 return ptr;
1654 }
1655
1656 /* This function frees an entire list of record structures. */
1657
1658 void
1659 free_list_records (unw_rec_list *first)
1660 {
1661 unw_rec_list *ptr;
1662 for (ptr = first; ptr != NULL;)
1663 {
1664 unw_rec_list *tmp = ptr;
1665
1666 if ((tmp->r.type == prologue || tmp->r.type == prologue_gr)
1667 && tmp->r.record.r.mask.i)
1668 free (tmp->r.record.r.mask.i);
1669
1670 ptr = ptr->next;
1671 free (tmp);
1672 }
1673 }
1674
1675 static unw_rec_list *
1676 output_prologue ()
1677 {
1678 unw_rec_list *ptr = alloc_record (prologue);
1679 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1680 return ptr;
1681 }
1682
1683 static unw_rec_list *
1684 output_prologue_gr (saved_mask, reg)
1685 unsigned int saved_mask;
1686 unsigned int reg;
1687 {
1688 unw_rec_list *ptr = alloc_record (prologue_gr);
1689 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1690 ptr->r.record.r.grmask = saved_mask;
1691 ptr->r.record.r.grsave = reg;
1692 return ptr;
1693 }
1694
1695 static unw_rec_list *
1696 output_body ()
1697 {
1698 unw_rec_list *ptr = alloc_record (body);
1699 return ptr;
1700 }
1701
1702 static unw_rec_list *
1703 output_mem_stack_f (size)
1704 unsigned int size;
1705 {
1706 unw_rec_list *ptr = alloc_record (mem_stack_f);
1707 ptr->r.record.p.size = size;
1708 return ptr;
1709 }
1710
1711 static unw_rec_list *
1712 output_mem_stack_v ()
1713 {
1714 unw_rec_list *ptr = alloc_record (mem_stack_v);
1715 return ptr;
1716 }
1717
1718 static unw_rec_list *
1719 output_psp_gr (gr)
1720 unsigned int gr;
1721 {
1722 unw_rec_list *ptr = alloc_record (psp_gr);
1723 ptr->r.record.p.gr = gr;
1724 return ptr;
1725 }
1726
1727 static unw_rec_list *
1728 output_psp_sprel (offset)
1729 unsigned int offset;
1730 {
1731 unw_rec_list *ptr = alloc_record (psp_sprel);
1732 ptr->r.record.p.spoff = offset / 4;
1733 return ptr;
1734 }
1735
1736 static unw_rec_list *
1737 output_rp_when ()
1738 {
1739 unw_rec_list *ptr = alloc_record (rp_when);
1740 return ptr;
1741 }
1742
1743 static unw_rec_list *
1744 output_rp_gr (gr)
1745 unsigned int gr;
1746 {
1747 unw_rec_list *ptr = alloc_record (rp_gr);
1748 ptr->r.record.p.gr = gr;
1749 return ptr;
1750 }
1751
1752 static unw_rec_list *
1753 output_rp_br (br)
1754 unsigned int br;
1755 {
1756 unw_rec_list *ptr = alloc_record (rp_br);
1757 ptr->r.record.p.br = br;
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_rp_psprel (offset)
1763 unsigned int offset;
1764 {
1765 unw_rec_list *ptr = alloc_record (rp_psprel);
1766 ptr->r.record.p.pspoff = offset / 4;
1767 return ptr;
1768 }
1769
1770 static unw_rec_list *
1771 output_rp_sprel (offset)
1772 unsigned int offset;
1773 {
1774 unw_rec_list *ptr = alloc_record (rp_sprel);
1775 ptr->r.record.p.spoff = offset / 4;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_pfs_when ()
1781 {
1782 unw_rec_list *ptr = alloc_record (pfs_when);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_pfs_gr (gr)
1788 unsigned int gr;
1789 {
1790 unw_rec_list *ptr = alloc_record (pfs_gr);
1791 ptr->r.record.p.gr = gr;
1792 return ptr;
1793 }
1794
1795 static unw_rec_list *
1796 output_pfs_psprel (offset)
1797 unsigned int offset;
1798 {
1799 unw_rec_list *ptr = alloc_record (pfs_psprel);
1800 ptr->r.record.p.pspoff = offset / 4;
1801 return ptr;
1802 }
1803
1804 static unw_rec_list *
1805 output_pfs_sprel (offset)
1806 unsigned int offset;
1807 {
1808 unw_rec_list *ptr = alloc_record (pfs_sprel);
1809 ptr->r.record.p.spoff = offset / 4;
1810 return ptr;
1811 }
1812
1813 static unw_rec_list *
1814 output_preds_when ()
1815 {
1816 unw_rec_list *ptr = alloc_record (preds_when);
1817 return ptr;
1818 }
1819
1820 static unw_rec_list *
1821 output_preds_gr (gr)
1822 unsigned int gr;
1823 {
1824 unw_rec_list *ptr = alloc_record (preds_gr);
1825 ptr->r.record.p.gr = gr;
1826 return ptr;
1827 }
1828
1829 static unw_rec_list *
1830 output_preds_psprel (offset)
1831 unsigned int offset;
1832 {
1833 unw_rec_list *ptr = alloc_record (preds_psprel);
1834 ptr->r.record.p.pspoff = offset / 4;
1835 return ptr;
1836 }
1837
1838 static unw_rec_list *
1839 output_preds_sprel (offset)
1840 unsigned int offset;
1841 {
1842 unw_rec_list *ptr = alloc_record (preds_sprel);
1843 ptr->r.record.p.spoff = offset / 4;
1844 return ptr;
1845 }
1846
1847 static unw_rec_list *
1848 output_fr_mem (mask)
1849 unsigned int mask;
1850 {
1851 unw_rec_list *ptr = alloc_record (fr_mem);
1852 ptr->r.record.p.rmask = mask;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_frgr_mem (gr_mask, fr_mask)
1858 unsigned int gr_mask;
1859 unsigned int fr_mask;
1860 {
1861 unw_rec_list *ptr = alloc_record (frgr_mem);
1862 ptr->r.record.p.grmask = gr_mask;
1863 ptr->r.record.p.frmask = fr_mask;
1864 return ptr;
1865 }
1866
1867 static unw_rec_list *
1868 output_gr_gr (mask, reg)
1869 unsigned int mask;
1870 unsigned int reg;
1871 {
1872 unw_rec_list *ptr = alloc_record (gr_gr);
1873 ptr->r.record.p.grmask = mask;
1874 ptr->r.record.p.gr = reg;
1875 return ptr;
1876 }
1877
1878 static unw_rec_list *
1879 output_gr_mem (mask)
1880 unsigned int mask;
1881 {
1882 unw_rec_list *ptr = alloc_record (gr_mem);
1883 ptr->r.record.p.rmask = mask;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_br_mem (unsigned int mask)
1889 {
1890 unw_rec_list *ptr = alloc_record (br_mem);
1891 ptr->r.record.p.brmask = mask;
1892 return ptr;
1893 }
1894
1895 static unw_rec_list *
1896 output_br_gr (save_mask, reg)
1897 unsigned int save_mask;
1898 unsigned int reg;
1899 {
1900 unw_rec_list *ptr = alloc_record (br_gr);
1901 ptr->r.record.p.brmask = save_mask;
1902 ptr->r.record.p.gr = reg;
1903 return ptr;
1904 }
1905
1906 static unw_rec_list *
1907 output_spill_base (offset)
1908 unsigned int offset;
1909 {
1910 unw_rec_list *ptr = alloc_record (spill_base);
1911 ptr->r.record.p.pspoff = offset / 4;
1912 return ptr;
1913 }
1914
1915 static unw_rec_list *
1916 output_unat_when ()
1917 {
1918 unw_rec_list *ptr = alloc_record (unat_when);
1919 return ptr;
1920 }
1921
1922 static unw_rec_list *
1923 output_unat_gr (gr)
1924 unsigned int gr;
1925 {
1926 unw_rec_list *ptr = alloc_record (unat_gr);
1927 ptr->r.record.p.gr = gr;
1928 return ptr;
1929 }
1930
1931 static unw_rec_list *
1932 output_unat_psprel (offset)
1933 unsigned int offset;
1934 {
1935 unw_rec_list *ptr = alloc_record (unat_psprel);
1936 ptr->r.record.p.pspoff = offset / 4;
1937 return ptr;
1938 }
1939
1940 static unw_rec_list *
1941 output_unat_sprel (offset)
1942 unsigned int offset;
1943 {
1944 unw_rec_list *ptr = alloc_record (unat_sprel);
1945 ptr->r.record.p.spoff = offset / 4;
1946 return ptr;
1947 }
1948
1949 static unw_rec_list *
1950 output_lc_when ()
1951 {
1952 unw_rec_list *ptr = alloc_record (lc_when);
1953 return ptr;
1954 }
1955
1956 static unw_rec_list *
1957 output_lc_gr (gr)
1958 unsigned int gr;
1959 {
1960 unw_rec_list *ptr = alloc_record (lc_gr);
1961 ptr->r.record.p.gr = gr;
1962 return ptr;
1963 }
1964
1965 static unw_rec_list *
1966 output_lc_psprel (offset)
1967 unsigned int offset;
1968 {
1969 unw_rec_list *ptr = alloc_record (lc_psprel);
1970 ptr->r.record.p.pspoff = offset / 4;
1971 return ptr;
1972 }
1973
1974 static unw_rec_list *
1975 output_lc_sprel (offset)
1976 unsigned int offset;
1977 {
1978 unw_rec_list *ptr = alloc_record (lc_sprel);
1979 ptr->r.record.p.spoff = offset / 4;
1980 return ptr;
1981 }
1982
1983 static unw_rec_list *
1984 output_fpsr_when ()
1985 {
1986 unw_rec_list *ptr = alloc_record (fpsr_when);
1987 return ptr;
1988 }
1989
1990 static unw_rec_list *
1991 output_fpsr_gr (gr)
1992 unsigned int gr;
1993 {
1994 unw_rec_list *ptr = alloc_record (fpsr_gr);
1995 ptr->r.record.p.gr = gr;
1996 return ptr;
1997 }
1998
1999 static unw_rec_list *
2000 output_fpsr_psprel (offset)
2001 unsigned int offset;
2002 {
2003 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2004 ptr->r.record.p.pspoff = offset / 4;
2005 return ptr;
2006 }
2007
2008 static unw_rec_list *
2009 output_fpsr_sprel (offset)
2010 unsigned int offset;
2011 {
2012 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2013 ptr->r.record.p.spoff = offset / 4;
2014 return ptr;
2015 }
2016
2017 static unw_rec_list *
2018 output_priunat_when_gr ()
2019 {
2020 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2021 return ptr;
2022 }
2023
2024 static unw_rec_list *
2025 output_priunat_when_mem ()
2026 {
2027 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2028 return ptr;
2029 }
2030
2031 static unw_rec_list *
2032 output_priunat_gr (gr)
2033 unsigned int gr;
2034 {
2035 unw_rec_list *ptr = alloc_record (priunat_gr);
2036 ptr->r.record.p.gr = gr;
2037 return ptr;
2038 }
2039
2040 static unw_rec_list *
2041 output_priunat_psprel (offset)
2042 unsigned int offset;
2043 {
2044 unw_rec_list *ptr = alloc_record (priunat_psprel);
2045 ptr->r.record.p.pspoff = offset / 4;
2046 return ptr;
2047 }
2048
2049 static unw_rec_list *
2050 output_priunat_sprel (offset)
2051 unsigned int offset;
2052 {
2053 unw_rec_list *ptr = alloc_record (priunat_sprel);
2054 ptr->r.record.p.spoff = offset / 4;
2055 return ptr;
2056 }
2057
2058 static unw_rec_list *
2059 output_bsp_when ()
2060 {
2061 unw_rec_list *ptr = alloc_record (bsp_when);
2062 return ptr;
2063 }
2064
2065 static unw_rec_list *
2066 output_bsp_gr (gr)
2067 unsigned int gr;
2068 {
2069 unw_rec_list *ptr = alloc_record (bsp_gr);
2070 ptr->r.record.p.gr = gr;
2071 return ptr;
2072 }
2073
2074 static unw_rec_list *
2075 output_bsp_psprel (offset)
2076 unsigned int offset;
2077 {
2078 unw_rec_list *ptr = alloc_record (bsp_psprel);
2079 ptr->r.record.p.pspoff = offset / 4;
2080 return ptr;
2081 }
2082
2083 static unw_rec_list *
2084 output_bsp_sprel (offset)
2085 unsigned int offset;
2086 {
2087 unw_rec_list *ptr = alloc_record (bsp_sprel);
2088 ptr->r.record.p.spoff = offset / 4;
2089 return ptr;
2090 }
2091
2092 static unw_rec_list *
2093 output_bspstore_when ()
2094 {
2095 unw_rec_list *ptr = alloc_record (bspstore_when);
2096 return ptr;
2097 }
2098
2099 static unw_rec_list *
2100 output_bspstore_gr (gr)
2101 unsigned int gr;
2102 {
2103 unw_rec_list *ptr = alloc_record (bspstore_gr);
2104 ptr->r.record.p.gr = gr;
2105 return ptr;
2106 }
2107
2108 static unw_rec_list *
2109 output_bspstore_psprel (offset)
2110 unsigned int offset;
2111 {
2112 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2113 ptr->r.record.p.pspoff = offset / 4;
2114 return ptr;
2115 }
2116
2117 static unw_rec_list *
2118 output_bspstore_sprel (offset)
2119 unsigned int offset;
2120 {
2121 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2122 ptr->r.record.p.spoff = offset / 4;
2123 return ptr;
2124 }
2125
2126 static unw_rec_list *
2127 output_rnat_when ()
2128 {
2129 unw_rec_list *ptr = alloc_record (rnat_when);
2130 return ptr;
2131 }
2132
2133 static unw_rec_list *
2134 output_rnat_gr (gr)
2135 unsigned int gr;
2136 {
2137 unw_rec_list *ptr = alloc_record (rnat_gr);
2138 ptr->r.record.p.gr = gr;
2139 return ptr;
2140 }
2141
2142 static unw_rec_list *
2143 output_rnat_psprel (offset)
2144 unsigned int offset;
2145 {
2146 unw_rec_list *ptr = alloc_record (rnat_psprel);
2147 ptr->r.record.p.pspoff = offset / 4;
2148 return ptr;
2149 }
2150
2151 static unw_rec_list *
2152 output_rnat_sprel (offset)
2153 unsigned int offset;
2154 {
2155 unw_rec_list *ptr = alloc_record (rnat_sprel);
2156 ptr->r.record.p.spoff = offset / 4;
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_unwabi (abi, context)
2162 unsigned long abi;
2163 unsigned long context;
2164 {
2165 unw_rec_list *ptr = alloc_record (unwabi);
2166 ptr->r.record.p.abi = abi;
2167 ptr->r.record.p.context = context;
2168 return ptr;
2169 }
2170
2171 static unw_rec_list *
2172 output_epilogue (unsigned long ecount)
2173 {
2174 unw_rec_list *ptr = alloc_record (epilogue);
2175 ptr->r.record.b.ecount = ecount;
2176 return ptr;
2177 }
2178
2179 static unw_rec_list *
2180 output_label_state (unsigned long label)
2181 {
2182 unw_rec_list *ptr = alloc_record (label_state);
2183 ptr->r.record.b.label = label;
2184 return ptr;
2185 }
2186
2187 static unw_rec_list *
2188 output_copy_state (unsigned long label)
2189 {
2190 unw_rec_list *ptr = alloc_record (copy_state);
2191 ptr->r.record.b.label = label;
2192 return ptr;
2193 }
2194
2195 static unw_rec_list *
2196 output_spill_psprel (ab, reg, offset)
2197 unsigned int ab;
2198 unsigned int reg;
2199 unsigned int offset;
2200 {
2201 unw_rec_list *ptr = alloc_record (spill_psprel);
2202 ptr->r.record.x.ab = ab;
2203 ptr->r.record.x.reg = reg;
2204 ptr->r.record.x.pspoff = offset / 4;
2205 return ptr;
2206 }
2207
2208 static unw_rec_list *
2209 output_spill_sprel (ab, reg, offset)
2210 unsigned int ab;
2211 unsigned int reg;
2212 unsigned int offset;
2213 {
2214 unw_rec_list *ptr = alloc_record (spill_sprel);
2215 ptr->r.record.x.ab = ab;
2216 ptr->r.record.x.reg = reg;
2217 ptr->r.record.x.spoff = offset / 4;
2218 return ptr;
2219 }
2220
2221 static unw_rec_list *
2222 output_spill_psprel_p (ab, reg, offset, predicate)
2223 unsigned int ab;
2224 unsigned int reg;
2225 unsigned int offset;
2226 unsigned int predicate;
2227 {
2228 unw_rec_list *ptr = alloc_record (spill_psprel_p);
2229 ptr->r.record.x.ab = ab;
2230 ptr->r.record.x.reg = reg;
2231 ptr->r.record.x.pspoff = offset / 4;
2232 ptr->r.record.x.qp = predicate;
2233 return ptr;
2234 }
2235
2236 static unw_rec_list *
2237 output_spill_sprel_p (ab, reg, offset, predicate)
2238 unsigned int ab;
2239 unsigned int reg;
2240 unsigned int offset;
2241 unsigned int predicate;
2242 {
2243 unw_rec_list *ptr = alloc_record (spill_sprel_p);
2244 ptr->r.record.x.ab = ab;
2245 ptr->r.record.x.reg = reg;
2246 ptr->r.record.x.spoff = offset / 4;
2247 ptr->r.record.x.qp = predicate;
2248 return ptr;
2249 }
2250
2251 static unw_rec_list *
2252 output_spill_reg (ab, reg, targ_reg, xy)
2253 unsigned int ab;
2254 unsigned int reg;
2255 unsigned int targ_reg;
2256 unsigned int xy;
2257 {
2258 unw_rec_list *ptr = alloc_record (spill_reg);
2259 ptr->r.record.x.ab = ab;
2260 ptr->r.record.x.reg = reg;
2261 ptr->r.record.x.treg = targ_reg;
2262 ptr->r.record.x.xy = xy;
2263 return ptr;
2264 }
2265
2266 static unw_rec_list *
2267 output_spill_reg_p (ab, reg, targ_reg, xy, predicate)
2268 unsigned int ab;
2269 unsigned int reg;
2270 unsigned int targ_reg;
2271 unsigned int xy;
2272 unsigned int predicate;
2273 {
2274 unw_rec_list *ptr = alloc_record (spill_reg_p);
2275 ptr->r.record.x.ab = ab;
2276 ptr->r.record.x.reg = reg;
2277 ptr->r.record.x.treg = targ_reg;
2278 ptr->r.record.x.xy = xy;
2279 ptr->r.record.x.qp = predicate;
2280 return ptr;
2281 }
2282
2283 /* Given a unw_rec_list process the correct format with the
2284 specified function. */
2285
2286 static void
2287 process_one_record (ptr, f)
2288 unw_rec_list *ptr;
2289 vbyte_func f;
2290 {
2291 unsigned long fr_mask, gr_mask;
2292
2293 switch (ptr->r.type)
2294 {
2295 case gr_mem:
2296 case fr_mem:
2297 case br_mem:
2298 case frgr_mem:
2299 /* These are taken care of by prologue/prologue_gr. */
2300 break;
2301
2302 case prologue_gr:
2303 case prologue:
2304 if (ptr->r.type == prologue_gr)
2305 output_R2_format (f, ptr->r.record.r.grmask,
2306 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2307 else
2308 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2309
2310 /* Output descriptor(s) for union of register spills (if any). */
2311 gr_mask = ptr->r.record.r.mask.gr_mem;
2312 fr_mask = ptr->r.record.r.mask.fr_mem;
2313 if (fr_mask)
2314 {
2315 if ((fr_mask & ~0xfUL) == 0)
2316 output_P6_format (f, fr_mem, fr_mask);
2317 else
2318 {
2319 output_P5_format (f, gr_mask, fr_mask);
2320 gr_mask = 0;
2321 }
2322 }
2323 if (gr_mask)
2324 output_P6_format (f, gr_mem, gr_mask);
2325 if (ptr->r.record.r.mask.br_mem)
2326 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2327
2328 /* output imask descriptor if necessary: */
2329 if (ptr->r.record.r.mask.i)
2330 output_P4_format (f, ptr->r.record.r.mask.i,
2331 ptr->r.record.r.imask_size);
2332 break;
2333
2334 case body:
2335 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2336 break;
2337 case mem_stack_f:
2338 case mem_stack_v:
2339 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2340 ptr->r.record.p.size);
2341 break;
2342 case psp_gr:
2343 case rp_gr:
2344 case pfs_gr:
2345 case preds_gr:
2346 case unat_gr:
2347 case lc_gr:
2348 case fpsr_gr:
2349 case priunat_gr:
2350 case bsp_gr:
2351 case bspstore_gr:
2352 case rnat_gr:
2353 output_P3_format (f, ptr->r.type, ptr->r.record.p.gr);
2354 break;
2355 case rp_br:
2356 output_P3_format (f, rp_br, ptr->r.record.p.br);
2357 break;
2358 case psp_sprel:
2359 output_P7_format (f, psp_sprel, ptr->r.record.p.spoff, 0);
2360 break;
2361 case rp_when:
2362 case pfs_when:
2363 case preds_when:
2364 case unat_when:
2365 case lc_when:
2366 case fpsr_when:
2367 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2368 break;
2369 case rp_psprel:
2370 case pfs_psprel:
2371 case preds_psprel:
2372 case unat_psprel:
2373 case lc_psprel:
2374 case fpsr_psprel:
2375 case spill_base:
2376 output_P7_format (f, ptr->r.type, ptr->r.record.p.pspoff, 0);
2377 break;
2378 case rp_sprel:
2379 case pfs_sprel:
2380 case preds_sprel:
2381 case unat_sprel:
2382 case lc_sprel:
2383 case fpsr_sprel:
2384 case priunat_sprel:
2385 case bsp_sprel:
2386 case bspstore_sprel:
2387 case rnat_sprel:
2388 output_P8_format (f, ptr->r.type, ptr->r.record.p.spoff);
2389 break;
2390 case gr_gr:
2391 output_P9_format (f, ptr->r.record.p.grmask, ptr->r.record.p.gr);
2392 break;
2393 case br_gr:
2394 output_P2_format (f, ptr->r.record.p.brmask, ptr->r.record.p.gr);
2395 break;
2396 case spill_mask:
2397 as_bad ("spill_mask record unimplemented.");
2398 break;
2399 case priunat_when_gr:
2400 case priunat_when_mem:
2401 case bsp_when:
2402 case bspstore_when:
2403 case rnat_when:
2404 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2405 break;
2406 case priunat_psprel:
2407 case bsp_psprel:
2408 case bspstore_psprel:
2409 case rnat_psprel:
2410 output_P8_format (f, ptr->r.type, ptr->r.record.p.pspoff);
2411 break;
2412 case unwabi:
2413 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2414 break;
2415 case epilogue:
2416 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2417 break;
2418 case label_state:
2419 case copy_state:
2420 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2421 break;
2422 case spill_psprel:
2423 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2424 ptr->r.record.x.reg, ptr->r.record.x.t,
2425 ptr->r.record.x.pspoff);
2426 break;
2427 case spill_sprel:
2428 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2429 ptr->r.record.x.reg, ptr->r.record.x.t,
2430 ptr->r.record.x.spoff);
2431 break;
2432 case spill_reg:
2433 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2434 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2435 ptr->r.record.x.treg, ptr->r.record.x.t);
2436 break;
2437 case spill_psprel_p:
2438 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2439 ptr->r.record.x.ab, ptr->r.record.x.reg,
2440 ptr->r.record.x.t, ptr->r.record.x.pspoff);
2441 break;
2442 case spill_sprel_p:
2443 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2444 ptr->r.record.x.ab, ptr->r.record.x.reg,
2445 ptr->r.record.x.t, ptr->r.record.x.spoff);
2446 break;
2447 case spill_reg_p:
2448 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2449 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2450 ptr->r.record.x.xy, ptr->r.record.x.treg,
2451 ptr->r.record.x.t);
2452 break;
2453 default:
2454 as_bad ("record_type_not_valid");
2455 break;
2456 }
2457 }
2458
2459 /* Given a unw_rec_list list, process all the records with
2460 the specified function. */
2461 static void
2462 process_unw_records (list, f)
2463 unw_rec_list *list;
2464 vbyte_func f;
2465 {
2466 unw_rec_list *ptr;
2467 for (ptr = list; ptr; ptr = ptr->next)
2468 process_one_record (ptr, f);
2469 }
2470
2471 /* Determine the size of a record list in bytes. */
2472 static int
2473 calc_record_size (list)
2474 unw_rec_list *list;
2475 {
2476 vbyte_count = 0;
2477 process_unw_records (list, count_output);
2478 return vbyte_count;
2479 }
2480
2481 /* Update IMASK bitmask to reflect the fact that one or more registers
2482 of type TYPE are saved starting at instruction with index T. If N
2483 bits are set in REGMASK, it is assumed that instructions T through
2484 T+N-1 save these registers.
2485
2486 TYPE values:
2487 0: no save
2488 1: instruction saves next fp reg
2489 2: instruction saves next general reg
2490 3: instruction saves next branch reg */
2491 static void
2492 set_imask (region, regmask, t, type)
2493 unw_rec_list *region;
2494 unsigned long regmask;
2495 unsigned long t;
2496 unsigned int type;
2497 {
2498 unsigned char *imask;
2499 unsigned long imask_size;
2500 unsigned int i;
2501 int pos;
2502
2503 imask = region->r.record.r.mask.i;
2504 imask_size = region->r.record.r.imask_size;
2505 if (!imask)
2506 {
2507 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2508 imask = xmalloc (imask_size);
2509 memset (imask, 0, imask_size);
2510
2511 region->r.record.r.imask_size = imask_size;
2512 region->r.record.r.mask.i = imask;
2513 }
2514
2515 i = (t / 4) + 1;
2516 pos = 2 * (3 - t % 4);
2517 while (regmask)
2518 {
2519 if (i >= imask_size)
2520 {
2521 as_bad ("Ignoring attempt to spill beyond end of region");
2522 return;
2523 }
2524
2525 imask[i] |= (type & 0x3) << pos;
2526
2527 regmask &= (regmask - 1);
2528 pos -= 2;
2529 if (pos < 0)
2530 {
2531 pos = 0;
2532 ++i;
2533 }
2534 }
2535 }
2536
2537 static int
2538 count_bits (unsigned long mask)
2539 {
2540 int n = 0;
2541
2542 while (mask)
2543 {
2544 mask &= mask - 1;
2545 ++n;
2546 }
2547 return n;
2548 }
2549
2550 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2551 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2552 containing FIRST_ADDR. */
2553
2554 unsigned long
2555 slot_index (slot_addr, slot_frag, first_addr, first_frag)
2556 unsigned long slot_addr;
2557 fragS *slot_frag;
2558 unsigned long first_addr;
2559 fragS *first_frag;
2560 {
2561 unsigned long index = 0;
2562
2563 /* First time we are called, the initial address and frag are invalid. */
2564 if (first_addr == 0)
2565 return 0;
2566
2567 /* If the two addresses are in different frags, then we need to add in
2568 the remaining size of this frag, and then the entire size of intermediate
2569 frags. */
2570 while (slot_frag != first_frag)
2571 {
2572 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2573
2574 /* Add in the full size of the frag converted to instruction slots. */
2575 index += 3 * (first_frag->fr_fix >> 4);
2576 /* Subtract away the initial part before first_addr. */
2577 index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2578 + ((first_addr & 0x3) - (start_addr & 0x3)));
2579
2580 /* Move to the beginning of the next frag. */
2581 first_frag = first_frag->fr_next;
2582 first_addr = (unsigned long) &first_frag->fr_literal;
2583 }
2584
2585 /* Add in the used part of the last frag. */
2586 index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2587 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2588 return index;
2589 }
2590
2591 /* Optimize unwind record directives. */
2592
2593 static unw_rec_list *
2594 optimize_unw_records (list)
2595 unw_rec_list *list;
2596 {
2597 if (!list)
2598 return NULL;
2599
2600 /* If the only unwind record is ".prologue" or ".prologue" followed
2601 by ".body", then we can optimize the unwind directives away. */
2602 if (list->r.type == prologue
2603 && (list->next == NULL
2604 || (list->next->r.type == body && list->next->next == NULL)))
2605 return NULL;
2606
2607 return list;
2608 }
2609
2610 /* Given a complete record list, process any records which have
2611 unresolved fields, (ie length counts for a prologue). After
2612 this has been run, all neccessary information should be available
2613 within each record to generate an image. */
2614
2615 static void
2616 fixup_unw_records (list)
2617 unw_rec_list *list;
2618 {
2619 unw_rec_list *ptr, *region = 0;
2620 unsigned long first_addr = 0, rlen = 0, t;
2621 fragS *first_frag = 0;
2622
2623 for (ptr = list; ptr; ptr = ptr->next)
2624 {
2625 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2626 as_bad (" Insn slot not set in unwind record.");
2627 t = slot_index (ptr->slot_number, ptr->slot_frag,
2628 first_addr, first_frag);
2629 switch (ptr->r.type)
2630 {
2631 case prologue:
2632 case prologue_gr:
2633 case body:
2634 {
2635 unw_rec_list *last;
2636 int size, dir_len = 0;
2637 unsigned long last_addr;
2638 fragS *last_frag;
2639
2640 first_addr = ptr->slot_number;
2641 first_frag = ptr->slot_frag;
2642 ptr->slot_number = 0;
2643 /* Find either the next body/prologue start, or the end of
2644 the list, and determine the size of the region. */
2645 last_addr = unwind.next_slot_number;
2646 last_frag = unwind.next_slot_frag;
2647 for (last = ptr->next; last != NULL; last = last->next)
2648 if (last->r.type == prologue || last->r.type == prologue_gr
2649 || last->r.type == body)
2650 {
2651 last_addr = last->slot_number;
2652 last_frag = last->slot_frag;
2653 break;
2654 }
2655 else if (!last->next)
2656 {
2657 /* In the absence of an explicit .body directive,
2658 the prologue ends after the last instruction
2659 covered by an unwind directive. */
2660 if (ptr->r.type != body)
2661 {
2662 last_addr = last->slot_number;
2663 last_frag = last->slot_frag;
2664 switch (last->r.type)
2665 {
2666 case frgr_mem:
2667 dir_len = (count_bits (last->r.record.p.frmask)
2668 + count_bits (last->r.record.p.grmask));
2669 break;
2670 case fr_mem:
2671 case gr_mem:
2672 dir_len += count_bits (last->r.record.p.rmask);
2673 break;
2674 case br_mem:
2675 case br_gr:
2676 dir_len += count_bits (last->r.record.p.brmask);
2677 break;
2678 case gr_gr:
2679 dir_len += count_bits (last->r.record.p.grmask);
2680 break;
2681 default:
2682 dir_len = 1;
2683 break;
2684 }
2685 }
2686 break;
2687 }
2688 size = (slot_index (last_addr, last_frag, first_addr, first_frag)
2689 + dir_len);
2690 rlen = ptr->r.record.r.rlen = size;
2691 region = ptr;
2692 break;
2693 }
2694 case epilogue:
2695 ptr->r.record.b.t = rlen - 1 - t;
2696 break;
2697
2698 case mem_stack_f:
2699 case mem_stack_v:
2700 case rp_when:
2701 case pfs_when:
2702 case preds_when:
2703 case unat_when:
2704 case lc_when:
2705 case fpsr_when:
2706 case priunat_when_gr:
2707 case priunat_when_mem:
2708 case bsp_when:
2709 case bspstore_when:
2710 case rnat_when:
2711 ptr->r.record.p.t = t;
2712 break;
2713
2714 case spill_reg:
2715 case spill_sprel:
2716 case spill_psprel:
2717 case spill_reg_p:
2718 case spill_sprel_p:
2719 case spill_psprel_p:
2720 ptr->r.record.x.t = t;
2721 break;
2722
2723 case frgr_mem:
2724 if (!region)
2725 {
2726 as_bad ("frgr_mem record before region record!\n");
2727 return;
2728 }
2729 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2730 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2731 set_imask (region, ptr->r.record.p.frmask, t, 1);
2732 set_imask (region, ptr->r.record.p.grmask, t, 2);
2733 break;
2734 case fr_mem:
2735 if (!region)
2736 {
2737 as_bad ("fr_mem record before region record!\n");
2738 return;
2739 }
2740 region->r.record.r.mask.fr_mem |= ptr->r.record.p.rmask;
2741 set_imask (region, ptr->r.record.p.rmask, t, 1);
2742 break;
2743 case gr_mem:
2744 if (!region)
2745 {
2746 as_bad ("gr_mem record before region record!\n");
2747 return;
2748 }
2749 region->r.record.r.mask.gr_mem |= ptr->r.record.p.rmask;
2750 set_imask (region, ptr->r.record.p.rmask, t, 2);
2751 break;
2752 case br_mem:
2753 if (!region)
2754 {
2755 as_bad ("br_mem record before region record!\n");
2756 return;
2757 }
2758 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2759 set_imask (region, ptr->r.record.p.brmask, t, 3);
2760 break;
2761
2762 case gr_gr:
2763 if (!region)
2764 {
2765 as_bad ("gr_gr record before region record!\n");
2766 return;
2767 }
2768 set_imask (region, ptr->r.record.p.grmask, t, 2);
2769 break;
2770 case br_gr:
2771 if (!region)
2772 {
2773 as_bad ("br_gr record before region record!\n");
2774 return;
2775 }
2776 set_imask (region, ptr->r.record.p.brmask, t, 3);
2777 break;
2778
2779 default:
2780 break;
2781 }
2782 }
2783 }
2784
2785 /* Helper routine for output_unw_records. Emits the header for the unwind
2786 info. */
2787
2788 static int
2789 setup_unwind_header (int size, unsigned char **mem)
2790 {
2791 int x, extra = 0;
2792
2793 /* pad to pointer-size boundry. */
2794 x = size % md.pointer_size;
2795 if (x != 0)
2796 extra = md.pointer_size - x;
2797
2798 /* Add 8 for the header + a pointer for the
2799 personality offset. */
2800 *mem = xmalloc (size + extra + 8 + md.pointer_size);
2801
2802 /* Clear the padding area and personality. */
2803 memset (*mem + 8 + size, 0 , extra + md.pointer_size);
2804 /* Initialize the header area. */
2805
2806 md_number_to_chars (*mem, (((bfd_vma) 1 << 48) /* version */
2807 | (unwind.personality_routine
2808 ? ((bfd_vma) 3 << 32) /* U & E handler flags */
2809 : 0)
2810 | ((size + extra) / md.pointer_size)), /* length */
2811 8);
2812
2813 return extra;
2814 }
2815
2816 /* Generate an unwind image from a record list. Returns the number of
2817 bytes in the resulting image. The memory image itselof is returned
2818 in the 'ptr' parameter. */
2819 static int
2820 output_unw_records (list, ptr)
2821 unw_rec_list *list;
2822 void **ptr;
2823 {
2824 int size, extra;
2825 unsigned char *mem;
2826
2827 *ptr = NULL;
2828
2829 list = optimize_unw_records (list);
2830 fixup_unw_records (list);
2831 size = calc_record_size (list);
2832
2833 if (size > 0 || unwind.force_unwind_entry)
2834 {
2835 unwind.force_unwind_entry = 0;
2836 extra = setup_unwind_header (size, &mem);
2837
2838 vbyte_mem_ptr = mem + 8;
2839 process_unw_records (list, output_vbyte_mem);
2840
2841 *ptr = mem;
2842
2843 size += extra + 8 + md.pointer_size;
2844 }
2845 return size;
2846 }
2847
2848 static int
2849 convert_expr_to_ab_reg (e, ab, regp)
2850 expressionS *e;
2851 unsigned int *ab;
2852 unsigned int *regp;
2853 {
2854 unsigned int reg;
2855
2856 if (e->X_op != O_register)
2857 return 0;
2858
2859 reg = e->X_add_number;
2860 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
2861 {
2862 *ab = 0;
2863 *regp = reg - REG_GR;
2864 }
2865 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
2866 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
2867 {
2868 *ab = 1;
2869 *regp = reg - REG_FR;
2870 }
2871 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
2872 {
2873 *ab = 2;
2874 *regp = reg - REG_BR;
2875 }
2876 else
2877 {
2878 *ab = 3;
2879 switch (reg)
2880 {
2881 case REG_PR: *regp = 0; break;
2882 case REG_PSP: *regp = 1; break;
2883 case REG_PRIUNAT: *regp = 2; break;
2884 case REG_BR + 0: *regp = 3; break;
2885 case REG_AR + AR_BSP: *regp = 4; break;
2886 case REG_AR + AR_BSPSTORE: *regp = 5; break;
2887 case REG_AR + AR_RNAT: *regp = 6; break;
2888 case REG_AR + AR_UNAT: *regp = 7; break;
2889 case REG_AR + AR_FPSR: *regp = 8; break;
2890 case REG_AR + AR_PFS: *regp = 9; break;
2891 case REG_AR + AR_LC: *regp = 10; break;
2892
2893 default:
2894 return 0;
2895 }
2896 }
2897 return 1;
2898 }
2899
2900 static int
2901 convert_expr_to_xy_reg (e, xy, regp)
2902 expressionS *e;
2903 unsigned int *xy;
2904 unsigned int *regp;
2905 {
2906 unsigned int reg;
2907
2908 if (e->X_op != O_register)
2909 return 0;
2910
2911 reg = e->X_add_number;
2912
2913 if (/* reg >= REG_GR && */ reg <= (REG_GR + 127))
2914 {
2915 *xy = 0;
2916 *regp = reg - REG_GR;
2917 }
2918 else if (reg >= REG_FR && reg <= (REG_FR + 127))
2919 {
2920 *xy = 1;
2921 *regp = reg - REG_FR;
2922 }
2923 else if (reg >= REG_BR && reg <= (REG_BR + 7))
2924 {
2925 *xy = 2;
2926 *regp = reg - REG_BR;
2927 }
2928 else
2929 return -1;
2930 return 1;
2931 }
2932
2933 static void
2934 dot_radix (dummy)
2935 int dummy ATTRIBUTE_UNUSED;
2936 {
2937 int radix;
2938
2939 SKIP_WHITESPACE ();
2940 radix = *input_line_pointer++;
2941
2942 if (radix != 'C' && !is_end_of_line[(unsigned char) radix])
2943 {
2944 as_bad ("Radix `%c' unsupported", *input_line_pointer);
2945 ignore_rest_of_line ();
2946 return;
2947 }
2948 }
2949
2950 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
2951 static void
2952 dot_special_section (which)
2953 int which;
2954 {
2955 set_section ((char *) special_section_name[which]);
2956 }
2957
2958 static void
2959 add_unwind_entry (ptr)
2960 unw_rec_list *ptr;
2961 {
2962 if (unwind.tail)
2963 unwind.tail->next = ptr;
2964 else
2965 unwind.list = ptr;
2966 unwind.tail = ptr;
2967
2968 /* The current entry can in fact be a chain of unwind entries. */
2969 if (unwind.current_entry == NULL)
2970 unwind.current_entry = ptr;
2971 }
2972
2973 static void
2974 dot_fframe (dummy)
2975 int dummy ATTRIBUTE_UNUSED;
2976 {
2977 expressionS e;
2978
2979 parse_operand (&e);
2980
2981 if (e.X_op != O_constant)
2982 as_bad ("Operand to .fframe must be a constant");
2983 else
2984 add_unwind_entry (output_mem_stack_f (e.X_add_number));
2985 }
2986
2987 static void
2988 dot_vframe (dummy)
2989 int dummy ATTRIBUTE_UNUSED;
2990 {
2991 expressionS e;
2992 unsigned reg;
2993
2994 parse_operand (&e);
2995 reg = e.X_add_number - REG_GR;
2996 if (e.X_op == O_register && reg < 128)
2997 {
2998 add_unwind_entry (output_mem_stack_v ());
2999 if (! (unwind.prologue_mask & 2))
3000 add_unwind_entry (output_psp_gr (reg));
3001 }
3002 else
3003 as_bad ("First operand to .vframe must be a general register");
3004 }
3005
3006 static void
3007 dot_vframesp (dummy)
3008 int dummy ATTRIBUTE_UNUSED;
3009 {
3010 expressionS e;
3011
3012 parse_operand (&e);
3013 if (e.X_op == O_constant)
3014 {
3015 add_unwind_entry (output_mem_stack_v ());
3016 add_unwind_entry (output_psp_sprel (e.X_add_number));
3017 }
3018 else
3019 as_bad ("First operand to .vframesp must be a general register");
3020 }
3021
3022 static void
3023 dot_vframepsp (dummy)
3024 int dummy ATTRIBUTE_UNUSED;
3025 {
3026 expressionS e;
3027
3028 parse_operand (&e);
3029 if (e.X_op == O_constant)
3030 {
3031 add_unwind_entry (output_mem_stack_v ());
3032 add_unwind_entry (output_psp_sprel (e.X_add_number));
3033 }
3034 else
3035 as_bad ("First operand to .vframepsp must be a general register");
3036 }
3037
3038 static void
3039 dot_save (dummy)
3040 int dummy ATTRIBUTE_UNUSED;
3041 {
3042 expressionS e1, e2;
3043 int sep;
3044 int reg1, reg2;
3045
3046 sep = parse_operand (&e1);
3047 if (sep != ',')
3048 as_bad ("No second operand to .save");
3049 sep = parse_operand (&e2);
3050
3051 reg1 = e1.X_add_number;
3052 reg2 = e2.X_add_number - REG_GR;
3053
3054 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3055 if (e1.X_op == O_register)
3056 {
3057 if (e2.X_op == O_register && reg2 >= 0 && reg2 < 128)
3058 {
3059 switch (reg1)
3060 {
3061 case REG_AR + AR_BSP:
3062 add_unwind_entry (output_bsp_when ());
3063 add_unwind_entry (output_bsp_gr (reg2));
3064 break;
3065 case REG_AR + AR_BSPSTORE:
3066 add_unwind_entry (output_bspstore_when ());
3067 add_unwind_entry (output_bspstore_gr (reg2));
3068 break;
3069 case REG_AR + AR_RNAT:
3070 add_unwind_entry (output_rnat_when ());
3071 add_unwind_entry (output_rnat_gr (reg2));
3072 break;
3073 case REG_AR + AR_UNAT:
3074 add_unwind_entry (output_unat_when ());
3075 add_unwind_entry (output_unat_gr (reg2));
3076 break;
3077 case REG_AR + AR_FPSR:
3078 add_unwind_entry (output_fpsr_when ());
3079 add_unwind_entry (output_fpsr_gr (reg2));
3080 break;
3081 case REG_AR + AR_PFS:
3082 add_unwind_entry (output_pfs_when ());
3083 if (! (unwind.prologue_mask & 4))
3084 add_unwind_entry (output_pfs_gr (reg2));
3085 break;
3086 case REG_AR + AR_LC:
3087 add_unwind_entry (output_lc_when ());
3088 add_unwind_entry (output_lc_gr (reg2));
3089 break;
3090 case REG_BR:
3091 add_unwind_entry (output_rp_when ());
3092 if (! (unwind.prologue_mask & 8))
3093 add_unwind_entry (output_rp_gr (reg2));
3094 break;
3095 case REG_PR:
3096 add_unwind_entry (output_preds_when ());
3097 if (! (unwind.prologue_mask & 1))
3098 add_unwind_entry (output_preds_gr (reg2));
3099 break;
3100 case REG_PRIUNAT:
3101 add_unwind_entry (output_priunat_when_gr ());
3102 add_unwind_entry (output_priunat_gr (reg2));
3103 break;
3104 default:
3105 as_bad ("First operand not a valid register");
3106 }
3107 }
3108 else
3109 as_bad (" Second operand not a valid register");
3110 }
3111 else
3112 as_bad ("First operand not a register");
3113 }
3114
3115 static void
3116 dot_restore (dummy)
3117 int dummy ATTRIBUTE_UNUSED;
3118 {
3119 expressionS e1, e2;
3120 unsigned long ecount; /* # of _additional_ regions to pop */
3121 int sep;
3122
3123 sep = parse_operand (&e1);
3124 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3125 {
3126 as_bad ("First operand to .restore must be stack pointer (sp)");
3127 return;
3128 }
3129
3130 if (sep == ',')
3131 {
3132 parse_operand (&e2);
3133 if (e2.X_op != O_constant || e2.X_add_number < 0)
3134 {
3135 as_bad ("Second operand to .restore must be a constant >= 0");
3136 return;
3137 }
3138 ecount = e2.X_add_number;
3139 }
3140 else
3141 ecount = unwind.prologue_count - 1;
3142 add_unwind_entry (output_epilogue (ecount));
3143
3144 if (ecount < unwind.prologue_count)
3145 unwind.prologue_count -= ecount + 1;
3146 else
3147 unwind.prologue_count = 0;
3148 }
3149
3150 static void
3151 dot_restorereg (dummy)
3152 int dummy ATTRIBUTE_UNUSED;
3153 {
3154 unsigned int ab, reg;
3155 expressionS e;
3156
3157 parse_operand (&e);
3158
3159 if (!convert_expr_to_ab_reg (&e, &ab, &reg))
3160 {
3161 as_bad ("First operand to .restorereg must be a preserved register");
3162 return;
3163 }
3164 add_unwind_entry (output_spill_reg (ab, reg, 0, 0));
3165 }
3166
3167 static void
3168 dot_restorereg_p (dummy)
3169 int dummy ATTRIBUTE_UNUSED;
3170 {
3171 unsigned int qp, ab, reg;
3172 expressionS e1, e2;
3173 int sep;
3174
3175 sep = parse_operand (&e1);
3176 if (sep != ',')
3177 {
3178 as_bad ("No second operand to .restorereg.p");
3179 return;
3180 }
3181
3182 parse_operand (&e2);
3183
3184 qp = e1.X_add_number - REG_P;
3185 if (e1.X_op != O_register || qp > 63)
3186 {
3187 as_bad ("First operand to .restorereg.p must be a predicate");
3188 return;
3189 }
3190
3191 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3192 {
3193 as_bad ("Second operand to .restorereg.p must be a preserved register");
3194 return;
3195 }
3196 add_unwind_entry (output_spill_reg_p (ab, reg, 0, 0, qp));
3197 }
3198
3199 static int
3200 generate_unwind_image (text_name)
3201 const char *text_name;
3202 {
3203 int size;
3204 unsigned char *unw_rec;
3205
3206 /* Force out pending instructions, to make sure all unwind records have
3207 a valid slot_number field. */
3208 ia64_flush_insns ();
3209
3210 /* Generate the unwind record. */
3211 size = output_unw_records (unwind.list, (void **) &unw_rec);
3212 if (size % md.pointer_size != 0)
3213 as_bad ("Unwind record is not a multiple of %d bytes.", md.pointer_size);
3214
3215 /* If there are unwind records, switch sections, and output the info. */
3216 if (size != 0)
3217 {
3218 unsigned char *where;
3219 char *sec_name;
3220 expressionS exp;
3221
3222 make_unw_section_name (SPECIAL_SECTION_UNWIND_INFO, text_name, sec_name);
3223 set_section (sec_name);
3224 bfd_set_section_flags (stdoutput, now_seg,
3225 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3226
3227 /* Make sure the section has 4 byte alignment for ILP32 and
3228 8 byte alignment for LP64. */
3229 frag_align (md.pointer_size_shift, 0, 0);
3230 record_alignment (now_seg, md.pointer_size_shift);
3231
3232 /* Set expression which points to start of unwind descriptor area. */
3233 unwind.info = expr_build_dot ();
3234
3235 where = (unsigned char *) frag_more (size);
3236
3237 /* Issue a label for this address, and keep track of it to put it
3238 in the unwind section. */
3239
3240 /* Copy the information from the unwind record into this section. The
3241 data is already in the correct byte order. */
3242 memcpy (where, unw_rec, size);
3243
3244 /* Add the personality address to the image. */
3245 if (unwind.personality_routine != 0)
3246 {
3247 exp.X_op = O_symbol;
3248 exp.X_add_symbol = unwind.personality_routine;
3249 exp.X_add_number = 0;
3250 fix_new_exp (frag_now, frag_now_fix () - 8, 8,
3251 &exp, 0, BFD_RELOC_IA64_LTOFF_FPTR64LSB);
3252 unwind.personality_routine = 0;
3253 }
3254 }
3255
3256 free_list_records (unwind.list);
3257 unwind.list = unwind.tail = unwind.current_entry = NULL;
3258
3259 return size;
3260 }
3261
3262 static void
3263 dot_handlerdata (dummy)
3264 int dummy ATTRIBUTE_UNUSED;
3265 {
3266 const char *text_name = segment_name (now_seg);
3267
3268 /* If text section name starts with ".text" (which it should),
3269 strip this prefix off. */
3270 if (strcmp (text_name, ".text") == 0)
3271 text_name = "";
3272
3273 unwind.force_unwind_entry = 1;
3274
3275 /* Remember which segment we're in so we can switch back after .endp */
3276 unwind.saved_text_seg = now_seg;
3277 unwind.saved_text_subseg = now_subseg;
3278
3279 /* Generate unwind info into unwind-info section and then leave that
3280 section as the currently active one so dataXX directives go into
3281 the language specific data area of the unwind info block. */
3282 generate_unwind_image (text_name);
3283 demand_empty_rest_of_line ();
3284 }
3285
3286 static void
3287 dot_unwentry (dummy)
3288 int dummy ATTRIBUTE_UNUSED;
3289 {
3290 unwind.force_unwind_entry = 1;
3291 demand_empty_rest_of_line ();
3292 }
3293
3294 static void
3295 dot_altrp (dummy)
3296 int dummy ATTRIBUTE_UNUSED;
3297 {
3298 expressionS e;
3299 unsigned reg;
3300
3301 parse_operand (&e);
3302 reg = e.X_add_number - REG_BR;
3303 if (e.X_op == O_register && reg < 8)
3304 add_unwind_entry (output_rp_br (reg));
3305 else
3306 as_bad ("First operand not a valid branch register");
3307 }
3308
3309 static void
3310 dot_savemem (psprel)
3311 int psprel;
3312 {
3313 expressionS e1, e2;
3314 int sep;
3315 int reg1, val;
3316
3317 sep = parse_operand (&e1);
3318 if (sep != ',')
3319 as_bad ("No second operand to .save%ssp", psprel ? "p" : "");
3320 sep = parse_operand (&e2);
3321
3322 reg1 = e1.X_add_number;
3323 val = e2.X_add_number;
3324
3325 /* Make sure its a valid ar.xxx reg, OR its br0, aka 'rp'. */
3326 if (e1.X_op == O_register)
3327 {
3328 if (e2.X_op == O_constant)
3329 {
3330 switch (reg1)
3331 {
3332 case REG_AR + AR_BSP:
3333 add_unwind_entry (output_bsp_when ());
3334 add_unwind_entry ((psprel
3335 ? output_bsp_psprel
3336 : output_bsp_sprel) (val));
3337 break;
3338 case REG_AR + AR_BSPSTORE:
3339 add_unwind_entry (output_bspstore_when ());
3340 add_unwind_entry ((psprel
3341 ? output_bspstore_psprel
3342 : output_bspstore_sprel) (val));
3343 break;
3344 case REG_AR + AR_RNAT:
3345 add_unwind_entry (output_rnat_when ());
3346 add_unwind_entry ((psprel
3347 ? output_rnat_psprel
3348 : output_rnat_sprel) (val));
3349 break;
3350 case REG_AR + AR_UNAT:
3351 add_unwind_entry (output_unat_when ());
3352 add_unwind_entry ((psprel
3353 ? output_unat_psprel
3354 : output_unat_sprel) (val));
3355 break;
3356 case REG_AR + AR_FPSR:
3357 add_unwind_entry (output_fpsr_when ());
3358 add_unwind_entry ((psprel
3359 ? output_fpsr_psprel
3360 : output_fpsr_sprel) (val));
3361 break;
3362 case REG_AR + AR_PFS:
3363 add_unwind_entry (output_pfs_when ());
3364 add_unwind_entry ((psprel
3365 ? output_pfs_psprel
3366 : output_pfs_sprel) (val));
3367 break;
3368 case REG_AR + AR_LC:
3369 add_unwind_entry (output_lc_when ());
3370 add_unwind_entry ((psprel
3371 ? output_lc_psprel
3372 : output_lc_sprel) (val));
3373 break;
3374 case REG_BR:
3375 add_unwind_entry (output_rp_when ());
3376 add_unwind_entry ((psprel
3377 ? output_rp_psprel
3378 : output_rp_sprel) (val));
3379 break;
3380 case REG_PR:
3381 add_unwind_entry (output_preds_when ());
3382 add_unwind_entry ((psprel
3383 ? output_preds_psprel
3384 : output_preds_sprel) (val));
3385 break;
3386 case REG_PRIUNAT:
3387 add_unwind_entry (output_priunat_when_mem ());
3388 add_unwind_entry ((psprel
3389 ? output_priunat_psprel
3390 : output_priunat_sprel) (val));
3391 break;
3392 default:
3393 as_bad ("First operand not a valid register");
3394 }
3395 }
3396 else
3397 as_bad (" Second operand not a valid constant");
3398 }
3399 else
3400 as_bad ("First operand not a register");
3401 }
3402
3403 static void
3404 dot_saveg (dummy)
3405 int dummy ATTRIBUTE_UNUSED;
3406 {
3407 expressionS e1, e2;
3408 int sep;
3409 sep = parse_operand (&e1);
3410 if (sep == ',')
3411 parse_operand (&e2);
3412
3413 if (e1.X_op != O_constant)
3414 as_bad ("First operand to .save.g must be a constant.");
3415 else
3416 {
3417 int grmask = e1.X_add_number;
3418 if (sep != ',')
3419 add_unwind_entry (output_gr_mem (grmask));
3420 else
3421 {
3422 int reg = e2.X_add_number - REG_GR;
3423 if (e2.X_op == O_register && reg >= 0 && reg < 128)
3424 add_unwind_entry (output_gr_gr (grmask, reg));
3425 else
3426 as_bad ("Second operand is an invalid register.");
3427 }
3428 }
3429 }
3430
3431 static void
3432 dot_savef (dummy)
3433 int dummy ATTRIBUTE_UNUSED;
3434 {
3435 expressionS e1;
3436 int sep;
3437 sep = parse_operand (&e1);
3438
3439 if (e1.X_op != O_constant)
3440 as_bad ("Operand to .save.f must be a constant.");
3441 else
3442 add_unwind_entry (output_fr_mem (e1.X_add_number));
3443 }
3444
3445 static void
3446 dot_saveb (dummy)
3447 int dummy ATTRIBUTE_UNUSED;
3448 {
3449 expressionS e1, e2;
3450 unsigned int reg;
3451 unsigned char sep;
3452 int brmask;
3453
3454 sep = parse_operand (&e1);
3455 if (e1.X_op != O_constant)
3456 {
3457 as_bad ("First operand to .save.b must be a constant.");
3458 return;
3459 }
3460 brmask = e1.X_add_number;
3461
3462 if (sep == ',')
3463 {
3464 sep = parse_operand (&e2);
3465 reg = e2.X_add_number - REG_GR;
3466 if (e2.X_op != O_register || reg > 127)
3467 {
3468 as_bad ("Second operand to .save.b must be a general register.");
3469 return;
3470 }
3471 add_unwind_entry (output_br_gr (brmask, e2.X_add_number));
3472 }
3473 else
3474 add_unwind_entry (output_br_mem (brmask));
3475
3476 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3477 ignore_rest_of_line ();
3478 }
3479
3480 static void
3481 dot_savegf (dummy)
3482 int dummy ATTRIBUTE_UNUSED;
3483 {
3484 expressionS e1, e2;
3485 int sep;
3486 sep = parse_operand (&e1);
3487 if (sep == ',')
3488 parse_operand (&e2);
3489
3490 if (e1.X_op != O_constant || sep != ',' || e2.X_op != O_constant)
3491 as_bad ("Both operands of .save.gf must be constants.");
3492 else
3493 {
3494 int grmask = e1.X_add_number;
3495 int frmask = e2.X_add_number;
3496 add_unwind_entry (output_frgr_mem (grmask, frmask));
3497 }
3498 }
3499
3500 static void
3501 dot_spill (dummy)
3502 int dummy ATTRIBUTE_UNUSED;
3503 {
3504 expressionS e;
3505 unsigned char sep;
3506
3507 sep = parse_operand (&e);
3508 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3509 ignore_rest_of_line ();
3510
3511 if (e.X_op != O_constant)
3512 as_bad ("Operand to .spill must be a constant");
3513 else
3514 add_unwind_entry (output_spill_base (e.X_add_number));
3515 }
3516
3517 static void
3518 dot_spillreg (dummy)
3519 int dummy ATTRIBUTE_UNUSED;
3520 {
3521 int sep, ab, xy, reg, treg;
3522 expressionS e1, e2;
3523
3524 sep = parse_operand (&e1);
3525 if (sep != ',')
3526 {
3527 as_bad ("No second operand to .spillreg");
3528 return;
3529 }
3530
3531 parse_operand (&e2);
3532
3533 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3534 {
3535 as_bad ("First operand to .spillreg must be a preserved register");
3536 return;
3537 }
3538
3539 if (!convert_expr_to_xy_reg (&e2, &xy, &treg))
3540 {
3541 as_bad ("Second operand to .spillreg must be a register");
3542 return;
3543 }
3544
3545 add_unwind_entry (output_spill_reg (ab, reg, treg, xy));
3546 }
3547
3548 static void
3549 dot_spillmem (psprel)
3550 int psprel;
3551 {
3552 expressionS e1, e2;
3553 int sep, ab, reg;
3554
3555 sep = parse_operand (&e1);
3556 if (sep != ',')
3557 {
3558 as_bad ("Second operand missing");
3559 return;
3560 }
3561
3562 parse_operand (&e2);
3563
3564 if (!convert_expr_to_ab_reg (&e1, &ab, &reg))
3565 {
3566 as_bad ("First operand to .spill%s must be a preserved register",
3567 psprel ? "psp" : "sp");
3568 return;
3569 }
3570
3571 if (e2.X_op != O_constant)
3572 {
3573 as_bad ("Second operand to .spill%s must be a constant",
3574 psprel ? "psp" : "sp");
3575 return;
3576 }
3577
3578 if (psprel)
3579 add_unwind_entry (output_spill_psprel (ab, reg, e2.X_add_number));
3580 else
3581 add_unwind_entry (output_spill_sprel (ab, reg, e2.X_add_number));
3582 }
3583
3584 static void
3585 dot_spillreg_p (dummy)
3586 int dummy ATTRIBUTE_UNUSED;
3587 {
3588 int sep, ab, xy, reg, treg;
3589 expressionS e1, e2, e3;
3590 unsigned int qp;
3591
3592 sep = parse_operand (&e1);
3593 if (sep != ',')
3594 {
3595 as_bad ("No second and third operand to .spillreg.p");
3596 return;
3597 }
3598
3599 sep = parse_operand (&e2);
3600 if (sep != ',')
3601 {
3602 as_bad ("No third operand to .spillreg.p");
3603 return;
3604 }
3605
3606 parse_operand (&e3);
3607
3608 qp = e1.X_add_number - REG_P;
3609
3610 if (e1.X_op != O_register || qp > 63)
3611 {
3612 as_bad ("First operand to .spillreg.p must be a predicate");
3613 return;
3614 }
3615
3616 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3617 {
3618 as_bad ("Second operand to .spillreg.p must be a preserved register");
3619 return;
3620 }
3621
3622 if (!convert_expr_to_xy_reg (&e3, &xy, &treg))
3623 {
3624 as_bad ("Third operand to .spillreg.p must be a register");
3625 return;
3626 }
3627
3628 add_unwind_entry (output_spill_reg_p (ab, reg, treg, xy, qp));
3629 }
3630
3631 static void
3632 dot_spillmem_p (psprel)
3633 int psprel;
3634 {
3635 expressionS e1, e2, e3;
3636 int sep, ab, reg;
3637 unsigned int qp;
3638
3639 sep = parse_operand (&e1);
3640 if (sep != ',')
3641 {
3642 as_bad ("Second operand missing");
3643 return;
3644 }
3645
3646 parse_operand (&e2);
3647 if (sep != ',')
3648 {
3649 as_bad ("Second operand missing");
3650 return;
3651 }
3652
3653 parse_operand (&e3);
3654
3655 qp = e1.X_add_number - REG_P;
3656 if (e1.X_op != O_register || qp > 63)
3657 {
3658 as_bad ("First operand to .spill%s_p must be a predicate",
3659 psprel ? "psp" : "sp");
3660 return;
3661 }
3662
3663 if (!convert_expr_to_ab_reg (&e2, &ab, &reg))
3664 {
3665 as_bad ("Second operand to .spill%s_p must be a preserved register",
3666 psprel ? "psp" : "sp");
3667 return;
3668 }
3669
3670 if (e3.X_op != O_constant)
3671 {
3672 as_bad ("Third operand to .spill%s_p must be a constant",
3673 psprel ? "psp" : "sp");
3674 return;
3675 }
3676
3677 if (psprel)
3678 add_unwind_entry (output_spill_psprel_p (ab, reg, e3.X_add_number, qp));
3679 else
3680 add_unwind_entry (output_spill_sprel_p (ab, reg, e3.X_add_number, qp));
3681 }
3682
3683 static void
3684 dot_label_state (dummy)
3685 int dummy ATTRIBUTE_UNUSED;
3686 {
3687 expressionS e;
3688
3689 parse_operand (&e);
3690 if (e.X_op != O_constant)
3691 {
3692 as_bad ("Operand to .label_state must be a constant");
3693 return;
3694 }
3695 add_unwind_entry (output_label_state (e.X_add_number));
3696 }
3697
3698 static void
3699 dot_copy_state (dummy)
3700 int dummy ATTRIBUTE_UNUSED;
3701 {
3702 expressionS e;
3703
3704 parse_operand (&e);
3705 if (e.X_op != O_constant)
3706 {
3707 as_bad ("Operand to .copy_state must be a constant");
3708 return;
3709 }
3710 add_unwind_entry (output_copy_state (e.X_add_number));
3711 }
3712
3713 static void
3714 dot_unwabi (dummy)
3715 int dummy ATTRIBUTE_UNUSED;
3716 {
3717 expressionS e1, e2;
3718 unsigned char sep;
3719
3720 sep = parse_operand (&e1);
3721 if (sep != ',')
3722 {
3723 as_bad ("Second operand to .unwabi missing");
3724 return;
3725 }
3726 sep = parse_operand (&e2);
3727 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3728 ignore_rest_of_line ();
3729
3730 if (e1.X_op != O_constant)
3731 {
3732 as_bad ("First operand to .unwabi must be a constant");
3733 return;
3734 }
3735
3736 if (e2.X_op != O_constant)
3737 {
3738 as_bad ("Second operand to .unwabi must be a constant");
3739 return;
3740 }
3741
3742 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number));
3743 }
3744
3745 static void
3746 dot_personality (dummy)
3747 int dummy ATTRIBUTE_UNUSED;
3748 {
3749 char *name, *p, c;
3750 SKIP_WHITESPACE ();
3751 name = input_line_pointer;
3752 c = get_symbol_end ();
3753 p = input_line_pointer;
3754 unwind.personality_routine = symbol_find_or_make (name);
3755 unwind.force_unwind_entry = 1;
3756 *p = c;
3757 SKIP_WHITESPACE ();
3758 demand_empty_rest_of_line ();
3759 }
3760
3761 static void
3762 dot_proc (dummy)
3763 int dummy ATTRIBUTE_UNUSED;
3764 {
3765 char *name, *p, c;
3766 symbolS *sym;
3767
3768 unwind.proc_start = expr_build_dot ();
3769 /* Parse names of main and alternate entry points and mark them as
3770 function symbols: */
3771 while (1)
3772 {
3773 SKIP_WHITESPACE ();
3774 name = input_line_pointer;
3775 c = get_symbol_end ();
3776 p = input_line_pointer;
3777 sym = symbol_find_or_make (name);
3778 if (unwind.proc_start == 0)
3779 {
3780 unwind.proc_start = sym;
3781 }
3782 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
3783 *p = c;
3784 SKIP_WHITESPACE ();
3785 if (*input_line_pointer != ',')
3786 break;
3787 ++input_line_pointer;
3788 }
3789 demand_empty_rest_of_line ();
3790 ia64_do_align (16);
3791
3792 unwind.prologue_count = 0;
3793 unwind.list = unwind.tail = unwind.current_entry = NULL;
3794 unwind.personality_routine = 0;
3795 }
3796
3797 static void
3798 dot_body (dummy)
3799 int dummy ATTRIBUTE_UNUSED;
3800 {
3801 unwind.prologue = 0;
3802 unwind.prologue_mask = 0;
3803
3804 add_unwind_entry (output_body ());
3805 demand_empty_rest_of_line ();
3806 }
3807
3808 static void
3809 dot_prologue (dummy)
3810 int dummy ATTRIBUTE_UNUSED;
3811 {
3812 unsigned char sep;
3813 int mask = 0, grsave = 0;
3814
3815 if (!is_it_end_of_statement ())
3816 {
3817 expressionS e1, e2;
3818 sep = parse_operand (&e1);
3819 if (sep != ',')
3820 as_bad ("No second operand to .prologue");
3821 sep = parse_operand (&e2);
3822 if (!is_end_of_line[sep] && !is_it_end_of_statement ())
3823 ignore_rest_of_line ();
3824
3825 if (e1.X_op == O_constant)
3826 {
3827 mask = e1.X_add_number;
3828
3829 if (e2.X_op == O_constant)
3830 grsave = e2.X_add_number;
3831 else if (e2.X_op == O_register
3832 && (grsave = e2.X_add_number - REG_GR) < 128)
3833 ;
3834 else
3835 as_bad ("Second operand not a constant or general register");
3836
3837 add_unwind_entry (output_prologue_gr (mask, grsave));
3838 }
3839 else
3840 as_bad ("First operand not a constant");
3841 }
3842 else
3843 add_unwind_entry (output_prologue ());
3844
3845 unwind.prologue = 1;
3846 unwind.prologue_mask = mask;
3847 ++unwind.prologue_count;
3848 }
3849
3850 static void
3851 dot_endp (dummy)
3852 int dummy ATTRIBUTE_UNUSED;
3853 {
3854 expressionS e;
3855 unsigned char *ptr;
3856 int bytes_per_address;
3857 long where;
3858 segT saved_seg;
3859 subsegT saved_subseg;
3860 const char *sec_name, *text_name;
3861 char *name, *p, c;
3862 symbolS *sym;
3863
3864 if (unwind.saved_text_seg)
3865 {
3866 saved_seg = unwind.saved_text_seg;
3867 saved_subseg = unwind.saved_text_subseg;
3868 unwind.saved_text_seg = NULL;
3869 }
3870 else
3871 {
3872 saved_seg = now_seg;
3873 saved_subseg = now_subseg;
3874 }
3875
3876 /*
3877 Use a slightly ugly scheme to derive the unwind section names from
3878 the text section name:
3879
3880 text sect. unwind table sect.
3881 name: name: comments:
3882 ---------- ----------------- --------------------------------
3883 .text .IA_64.unwind
3884 .text.foo .IA_64.unwind.text.foo
3885 .foo .IA_64.unwind.foo
3886 .gnu.linkonce.t.foo
3887 .gnu.linkonce.ia64unw.foo
3888 _info .IA_64.unwind_info gas issues error message (ditto)
3889 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3890
3891 This mapping is done so that:
3892
3893 (a) An object file with unwind info only in .text will use
3894 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3895 This follows the letter of the ABI and also ensures backwards
3896 compatibility with older toolchains.
3897
3898 (b) An object file with unwind info in multiple text sections
3899 will use separate unwind sections for each text section.
3900 This allows us to properly set the "sh_info" and "sh_link"
3901 fields in SHT_IA_64_UNWIND as required by the ABI and also
3902 lets GNU ld support programs with multiple segments
3903 containing unwind info (as might be the case for certain
3904 embedded applications).
3905
3906 (c) An error is issued if there would be a name clash.
3907 */
3908 text_name = segment_name (saved_seg);
3909 if (strncmp (text_name, "_info", 5) == 0)
3910 {
3911 as_bad ("Illegal section name `%s' (causes unwind section name clash)",
3912 text_name);
3913 ignore_rest_of_line ();
3914 return;
3915 }
3916 if (strcmp (text_name, ".text") == 0)
3917 text_name = "";
3918
3919 insn_group_break (1, 0, 0);
3920
3921 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
3922 if (!unwind.info)
3923 generate_unwind_image (text_name);
3924
3925 if (unwind.info || unwind.force_unwind_entry)
3926 {
3927 subseg_set (md.last_text_seg, 0);
3928 unwind.proc_end = expr_build_dot ();
3929
3930 make_unw_section_name (SPECIAL_SECTION_UNWIND, text_name, sec_name);
3931 set_section ((char *) sec_name);
3932 bfd_set_section_flags (stdoutput, now_seg,
3933 SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3934
3935 /* Make sure that section has 4 byte alignment for ILP32 and
3936 8 byte alignment for LP64. */
3937 record_alignment (now_seg, md.pointer_size_shift);
3938
3939 /* Need space for 3 pointers for procedure start, procedure end,
3940 and unwind info. */
3941 ptr = frag_more (3 * md.pointer_size);
3942 where = frag_now_fix () - (3 * md.pointer_size);
3943 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
3944
3945 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
3946 e.X_op = O_pseudo_fixup;
3947 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3948 e.X_add_number = 0;
3949 e.X_add_symbol = unwind.proc_start;
3950 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e);
3951
3952 e.X_op = O_pseudo_fixup;
3953 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3954 e.X_add_number = 0;
3955 e.X_add_symbol = unwind.proc_end;
3956 ia64_cons_fix_new (frag_now, where + bytes_per_address,
3957 bytes_per_address, &e);
3958
3959 if (unwind.info)
3960 {
3961 e.X_op = O_pseudo_fixup;
3962 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
3963 e.X_add_number = 0;
3964 e.X_add_symbol = unwind.info;
3965 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
3966 bytes_per_address, &e);
3967 }
3968 else
3969 md_number_to_chars (ptr + (bytes_per_address * 2), 0,
3970 bytes_per_address);
3971
3972 }
3973 subseg_set (saved_seg, saved_subseg);
3974
3975 /* Parse names of main and alternate entry points and set symbol sizes. */
3976 while (1)
3977 {
3978 SKIP_WHITESPACE ();
3979 name = input_line_pointer;
3980 c = get_symbol_end ();
3981 p = input_line_pointer;
3982 sym = symbol_find (name);
3983 if (sym && unwind.proc_start
3984 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION)
3985 && S_GET_SIZE (sym) == 0 && symbol_get_obj (sym)->size == NULL)
3986 {
3987 fragS *fr = symbol_get_frag (unwind.proc_start);
3988 fragS *frag = symbol_get_frag (sym);
3989
3990 /* Check whether the function label is at or beyond last
3991 .proc directive. */
3992 while (fr && fr != frag)
3993 fr = fr->fr_next;
3994 if (fr)
3995 {
3996 if (frag == frag_now && SEG_NORMAL (now_seg))
3997 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
3998 else
3999 {
4000 symbol_get_obj (sym)->size =
4001 (expressionS *) xmalloc (sizeof (expressionS));
4002 symbol_get_obj (sym)->size->X_op = O_subtract;
4003 symbol_get_obj (sym)->size->X_add_symbol
4004 = symbol_new (FAKE_LABEL_NAME, now_seg,
4005 frag_now_fix (), frag_now);
4006 symbol_get_obj (sym)->size->X_op_symbol = sym;
4007 symbol_get_obj (sym)->size->X_add_number = 0;
4008 }
4009 }
4010 }
4011 *p = c;
4012 SKIP_WHITESPACE ();
4013 if (*input_line_pointer != ',')
4014 break;
4015 ++input_line_pointer;
4016 }
4017 demand_empty_rest_of_line ();
4018 unwind.proc_start = unwind.proc_end = unwind.info = 0;
4019 }
4020
4021 static void
4022 dot_template (template)
4023 int template;
4024 {
4025 CURR_SLOT.user_template = template;
4026 }
4027
4028 static void
4029 dot_regstk (dummy)
4030 int dummy ATTRIBUTE_UNUSED;
4031 {
4032 int ins, locs, outs, rots;
4033
4034 if (is_it_end_of_statement ())
4035 ins = locs = outs = rots = 0;
4036 else
4037 {
4038 ins = get_absolute_expression ();
4039 if (*input_line_pointer++ != ',')
4040 goto err;
4041 locs = get_absolute_expression ();
4042 if (*input_line_pointer++ != ',')
4043 goto err;
4044 outs = get_absolute_expression ();
4045 if (*input_line_pointer++ != ',')
4046 goto err;
4047 rots = get_absolute_expression ();
4048 }
4049 set_regstack (ins, locs, outs, rots);
4050 return;
4051
4052 err:
4053 as_bad ("Comma expected");
4054 ignore_rest_of_line ();
4055 }
4056
4057 static void
4058 dot_rot (type)
4059 int type;
4060 {
4061 unsigned num_regs, num_alloced = 0;
4062 struct dynreg **drpp, *dr;
4063 int ch, base_reg = 0;
4064 char *name, *start;
4065 size_t len;
4066
4067 switch (type)
4068 {
4069 case DYNREG_GR: base_reg = REG_GR + 32; break;
4070 case DYNREG_FR: base_reg = REG_FR + 32; break;
4071 case DYNREG_PR: base_reg = REG_P + 16; break;
4072 default: break;
4073 }
4074
4075 /* First, remove existing names from hash table. */
4076 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4077 {
4078 hash_delete (md.dynreg_hash, dr->name);
4079 dr->num_regs = 0;
4080 }
4081
4082 drpp = &md.dynreg[type];
4083 while (1)
4084 {
4085 start = input_line_pointer;
4086 ch = get_symbol_end ();
4087 *input_line_pointer = ch;
4088 len = (input_line_pointer - start);
4089
4090 SKIP_WHITESPACE ();
4091 if (*input_line_pointer != '[')
4092 {
4093 as_bad ("Expected '['");
4094 goto err;
4095 }
4096 ++input_line_pointer; /* skip '[' */
4097
4098 num_regs = get_absolute_expression ();
4099
4100 if (*input_line_pointer++ != ']')
4101 {
4102 as_bad ("Expected ']'");
4103 goto err;
4104 }
4105 SKIP_WHITESPACE ();
4106
4107 num_alloced += num_regs;
4108 switch (type)
4109 {
4110 case DYNREG_GR:
4111 if (num_alloced > md.rot.num_regs)
4112 {
4113 as_bad ("Used more than the declared %d rotating registers",
4114 md.rot.num_regs);
4115 goto err;
4116 }
4117 break;
4118 case DYNREG_FR:
4119 if (num_alloced > 96)
4120 {
4121 as_bad ("Used more than the available 96 rotating registers");
4122 goto err;
4123 }
4124 break;
4125 case DYNREG_PR:
4126 if (num_alloced > 48)
4127 {
4128 as_bad ("Used more than the available 48 rotating registers");
4129 goto err;
4130 }
4131 break;
4132
4133 default:
4134 break;
4135 }
4136
4137 name = obstack_alloc (&notes, len + 1);
4138 memcpy (name, start, len);
4139 name[len] = '\0';
4140
4141 if (!*drpp)
4142 {
4143 *drpp = obstack_alloc (&notes, sizeof (*dr));
4144 memset (*drpp, 0, sizeof (*dr));
4145 }
4146
4147 dr = *drpp;
4148 dr->name = name;
4149 dr->num_regs = num_regs;
4150 dr->base = base_reg;
4151 drpp = &dr->next;
4152 base_reg += num_regs;
4153
4154 if (hash_insert (md.dynreg_hash, name, dr))
4155 {
4156 as_bad ("Attempt to redefine register set `%s'", name);
4157 goto err;
4158 }
4159
4160 if (*input_line_pointer != ',')
4161 break;
4162 ++input_line_pointer; /* skip comma */
4163 SKIP_WHITESPACE ();
4164 }
4165 demand_empty_rest_of_line ();
4166 return;
4167
4168 err:
4169 ignore_rest_of_line ();
4170 }
4171
4172 static void
4173 dot_byteorder (byteorder)
4174 int byteorder;
4175 {
4176 target_big_endian = byteorder;
4177 }
4178
4179 static void
4180 dot_psr (dummy)
4181 int dummy ATTRIBUTE_UNUSED;
4182 {
4183 char *option;
4184 int ch;
4185
4186 while (1)
4187 {
4188 option = input_line_pointer;
4189 ch = get_symbol_end ();
4190 if (strcmp (option, "lsb") == 0)
4191 md.flags &= ~EF_IA_64_BE;
4192 else if (strcmp (option, "msb") == 0)
4193 md.flags |= EF_IA_64_BE;
4194 else if (strcmp (option, "abi32") == 0)
4195 md.flags &= ~EF_IA_64_ABI64;
4196 else if (strcmp (option, "abi64") == 0)
4197 md.flags |= EF_IA_64_ABI64;
4198 else
4199 as_bad ("Unknown psr option `%s'", option);
4200 *input_line_pointer = ch;
4201
4202 SKIP_WHITESPACE ();
4203 if (*input_line_pointer != ',')
4204 break;
4205
4206 ++input_line_pointer;
4207 SKIP_WHITESPACE ();
4208 }
4209 demand_empty_rest_of_line ();
4210 }
4211
4212 static void
4213 dot_alias (dummy)
4214 int dummy ATTRIBUTE_UNUSED;
4215 {
4216 as_bad (".alias not implemented yet");
4217 }
4218
4219 static void
4220 dot_ln (dummy)
4221 int dummy ATTRIBUTE_UNUSED;
4222 {
4223 new_logical_line (0, get_absolute_expression ());
4224 demand_empty_rest_of_line ();
4225 }
4226
4227 static char *
4228 parse_section_name ()
4229 {
4230 char *name;
4231 int len;
4232
4233 SKIP_WHITESPACE ();
4234 if (*input_line_pointer != '"')
4235 {
4236 as_bad ("Missing section name");
4237 ignore_rest_of_line ();
4238 return 0;
4239 }
4240 name = demand_copy_C_string (&len);
4241 if (!name)
4242 {
4243 ignore_rest_of_line ();
4244 return 0;
4245 }
4246 SKIP_WHITESPACE ();
4247 if (*input_line_pointer != ',')
4248 {
4249 as_bad ("Comma expected after section name");
4250 ignore_rest_of_line ();
4251 return 0;
4252 }
4253 ++input_line_pointer; /* skip comma */
4254 return name;
4255 }
4256
4257 static void
4258 dot_xdata (size)
4259 int size;
4260 {
4261 char *name = parse_section_name ();
4262 if (!name)
4263 return;
4264
4265 md.keep_pending_output = 1;
4266 set_section (name);
4267 cons (size);
4268 obj_elf_previous (0);
4269 md.keep_pending_output = 0;
4270 }
4271
4272 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4273
4274 static void
4275 stmt_float_cons (kind)
4276 int kind;
4277 {
4278 size_t size;
4279
4280 switch (kind)
4281 {
4282 case 'd': size = 8; break;
4283 case 'x': size = 10; break;
4284
4285 case 'f':
4286 default:
4287 size = 4;
4288 break;
4289 }
4290 ia64_do_align (size);
4291 float_cons (kind);
4292 }
4293
4294 static void
4295 stmt_cons_ua (size)
4296 int size;
4297 {
4298 int saved_auto_align = md.auto_align;
4299
4300 md.auto_align = 0;
4301 cons (size);
4302 md.auto_align = saved_auto_align;
4303 }
4304
4305 static void
4306 dot_xfloat_cons (kind)
4307 int kind;
4308 {
4309 char *name = parse_section_name ();
4310 if (!name)
4311 return;
4312
4313 md.keep_pending_output = 1;
4314 set_section (name);
4315 stmt_float_cons (kind);
4316 obj_elf_previous (0);
4317 md.keep_pending_output = 0;
4318 }
4319
4320 static void
4321 dot_xstringer (zero)
4322 int zero;
4323 {
4324 char *name = parse_section_name ();
4325 if (!name)
4326 return;
4327
4328 md.keep_pending_output = 1;
4329 set_section (name);
4330 stringer (zero);
4331 obj_elf_previous (0);
4332 md.keep_pending_output = 0;
4333 }
4334
4335 static void
4336 dot_xdata_ua (size)
4337 int size;
4338 {
4339 int saved_auto_align = md.auto_align;
4340 char *name = parse_section_name ();
4341 if (!name)
4342 return;
4343
4344 md.keep_pending_output = 1;
4345 set_section (name);
4346 md.auto_align = 0;
4347 cons (size);
4348 md.auto_align = saved_auto_align;
4349 obj_elf_previous (0);
4350 md.keep_pending_output = 0;
4351 }
4352
4353 static void
4354 dot_xfloat_cons_ua (kind)
4355 int kind;
4356 {
4357 int saved_auto_align = md.auto_align;
4358 char *name = parse_section_name ();
4359 if (!name)
4360 return;
4361
4362 md.keep_pending_output = 1;
4363 set_section (name);
4364 md.auto_align = 0;
4365 stmt_float_cons (kind);
4366 md.auto_align = saved_auto_align;
4367 obj_elf_previous (0);
4368 md.keep_pending_output = 0;
4369 }
4370
4371 /* .reg.val <regname>,value */
4372
4373 static void
4374 dot_reg_val (dummy)
4375 int dummy ATTRIBUTE_UNUSED;
4376 {
4377 expressionS reg;
4378
4379 expression (&reg);
4380 if (reg.X_op != O_register)
4381 {
4382 as_bad (_("Register name expected"));
4383 ignore_rest_of_line ();
4384 }
4385 else if (*input_line_pointer++ != ',')
4386 {
4387 as_bad (_("Comma expected"));
4388 ignore_rest_of_line ();
4389 }
4390 else
4391 {
4392 valueT value = get_absolute_expression ();
4393 int regno = reg.X_add_number;
4394 if (regno < REG_GR || regno > REG_GR + 128)
4395 as_warn (_("Register value annotation ignored"));
4396 else
4397 {
4398 gr_values[regno - REG_GR].known = 1;
4399 gr_values[regno - REG_GR].value = value;
4400 gr_values[regno - REG_GR].path = md.path;
4401 }
4402 }
4403 demand_empty_rest_of_line ();
4404 }
4405
4406 /* select dv checking mode
4407 .auto
4408 .explicit
4409 .default
4410
4411 A stop is inserted when changing modes
4412 */
4413
4414 static void
4415 dot_dv_mode (type)
4416 int type;
4417 {
4418 if (md.manual_bundling)
4419 as_warn (_("Directive invalid within a bundle"));
4420
4421 if (type == 'E' || type == 'A')
4422 md.mode_explicitly_set = 0;
4423 else
4424 md.mode_explicitly_set = 1;
4425
4426 md.detect_dv = 1;
4427 switch (type)
4428 {
4429 case 'A':
4430 case 'a':
4431 if (md.explicit_mode)
4432 insn_group_break (1, 0, 0);
4433 md.explicit_mode = 0;
4434 break;
4435 case 'E':
4436 case 'e':
4437 if (!md.explicit_mode)
4438 insn_group_break (1, 0, 0);
4439 md.explicit_mode = 1;
4440 break;
4441 default:
4442 case 'd':
4443 if (md.explicit_mode != md.default_explicit_mode)
4444 insn_group_break (1, 0, 0);
4445 md.explicit_mode = md.default_explicit_mode;
4446 md.mode_explicitly_set = 0;
4447 break;
4448 }
4449 }
4450
4451 static void
4452 print_prmask (mask)
4453 valueT mask;
4454 {
4455 int regno;
4456 char *comma = "";
4457 for (regno = 0; regno < 64; regno++)
4458 {
4459 if (mask & ((valueT) 1 << regno))
4460 {
4461 fprintf (stderr, "%s p%d", comma, regno);
4462 comma = ",";
4463 }
4464 }
4465 }
4466
4467 /*
4468 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear")
4469 .pred.rel.imply p1, p2 (also .pred.rel "imply")
4470 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex")
4471 .pred.safe_across_calls p1 [, p2 [,...]]
4472 */
4473
4474 static void
4475 dot_pred_rel (type)
4476 int type;
4477 {
4478 valueT mask = 0;
4479 int count = 0;
4480 int p1 = -1, p2 = -1;
4481
4482 if (type == 0)
4483 {
4484 if (*input_line_pointer != '"')
4485 {
4486 as_bad (_("Missing predicate relation type"));
4487 ignore_rest_of_line ();
4488 return;
4489 }
4490 else
4491 {
4492 int len;
4493 char *form = demand_copy_C_string (&len);
4494 if (strcmp (form, "mutex") == 0)
4495 type = 'm';
4496 else if (strcmp (form, "clear") == 0)
4497 type = 'c';
4498 else if (strcmp (form, "imply") == 0)
4499 type = 'i';
4500 else
4501 {
4502 as_bad (_("Unrecognized predicate relation type"));
4503 ignore_rest_of_line ();
4504 return;
4505 }
4506 }
4507 if (*input_line_pointer == ',')
4508 ++input_line_pointer;
4509 SKIP_WHITESPACE ();
4510 }
4511
4512 SKIP_WHITESPACE ();
4513 while (1)
4514 {
4515 valueT bit = 1;
4516 int regno;
4517
4518 if (TOUPPER (*input_line_pointer) != 'P'
4519 || (regno = atoi (++input_line_pointer)) < 0
4520 || regno > 63)
4521 {
4522 as_bad (_("Predicate register expected"));
4523 ignore_rest_of_line ();
4524 return;
4525 }
4526 while (ISDIGIT (*input_line_pointer))
4527 ++input_line_pointer;
4528 if (p1 == -1)
4529 p1 = regno;
4530 else if (p2 == -1)
4531 p2 = regno;
4532 bit <<= regno;
4533 if (mask & bit)
4534 as_warn (_("Duplicate predicate register ignored"));
4535 mask |= bit;
4536 count++;
4537 /* See if it's a range. */
4538 if (*input_line_pointer == '-')
4539 {
4540 valueT stop = 1;
4541 ++input_line_pointer;
4542
4543 if (TOUPPER (*input_line_pointer) != 'P'
4544 || (regno = atoi (++input_line_pointer)) < 0
4545 || regno > 63)
4546 {
4547 as_bad (_("Predicate register expected"));
4548 ignore_rest_of_line ();
4549 return;
4550 }
4551 while (ISDIGIT (*input_line_pointer))
4552 ++input_line_pointer;
4553 stop <<= regno;
4554 if (bit >= stop)
4555 {
4556 as_bad (_("Bad register range"));
4557 ignore_rest_of_line ();
4558 return;
4559 }
4560 while (bit < stop)
4561 {
4562 bit <<= 1;
4563 mask |= bit;
4564 count++;
4565 }
4566 SKIP_WHITESPACE ();
4567 }
4568 if (*input_line_pointer != ',')
4569 break;
4570 ++input_line_pointer;
4571 SKIP_WHITESPACE ();
4572 }
4573
4574 switch (type)
4575 {
4576 case 'c':
4577 if (count == 0)
4578 mask = ~(valueT) 0;
4579 clear_qp_mutex (mask);
4580 clear_qp_implies (mask, (valueT) 0);
4581 break;
4582 case 'i':
4583 if (count != 2 || p1 == -1 || p2 == -1)
4584 as_bad (_("Predicate source and target required"));
4585 else if (p1 == 0 || p2 == 0)
4586 as_bad (_("Use of p0 is not valid in this context"));
4587 else
4588 add_qp_imply (p1, p2);
4589 break;
4590 case 'm':
4591 if (count < 2)
4592 {
4593 as_bad (_("At least two PR arguments expected"));
4594 break;
4595 }
4596 else if (mask & 1)
4597 {
4598 as_bad (_("Use of p0 is not valid in this context"));
4599 break;
4600 }
4601 add_qp_mutex (mask);
4602 break;
4603 case 's':
4604 /* note that we don't override any existing relations */
4605 if (count == 0)
4606 {
4607 as_bad (_("At least one PR argument expected"));
4608 break;
4609 }
4610 if (md.debug_dv)
4611 {
4612 fprintf (stderr, "Safe across calls: ");
4613 print_prmask (mask);
4614 fprintf (stderr, "\n");
4615 }
4616 qp_safe_across_calls = mask;
4617 break;
4618 }
4619 demand_empty_rest_of_line ();
4620 }
4621
4622 /* .entry label [, label [, ...]]
4623 Hint to DV code that the given labels are to be considered entry points.
4624 Otherwise, only global labels are considered entry points. */
4625
4626 static void
4627 dot_entry (dummy)
4628 int dummy ATTRIBUTE_UNUSED;
4629 {
4630 const char *err;
4631 char *name;
4632 int c;
4633 symbolS *symbolP;
4634
4635 do
4636 {
4637 name = input_line_pointer;
4638 c = get_symbol_end ();
4639 symbolP = symbol_find_or_make (name);
4640
4641 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (PTR) symbolP);
4642 if (err)
4643 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
4644 name, err);
4645
4646 *input_line_pointer = c;
4647 SKIP_WHITESPACE ();
4648 c = *input_line_pointer;
4649 if (c == ',')
4650 {
4651 input_line_pointer++;
4652 SKIP_WHITESPACE ();
4653 if (*input_line_pointer == '\n')
4654 c = '\n';
4655 }
4656 }
4657 while (c == ',');
4658
4659 demand_empty_rest_of_line ();
4660 }
4661
4662 /* .mem.offset offset, base
4663 "base" is used to distinguish between offsets from a different base. */
4664
4665 static void
4666 dot_mem_offset (dummy)
4667 int dummy ATTRIBUTE_UNUSED;
4668 {
4669 md.mem_offset.hint = 1;
4670 md.mem_offset.offset = get_absolute_expression ();
4671 if (*input_line_pointer != ',')
4672 {
4673 as_bad (_("Comma expected"));
4674 ignore_rest_of_line ();
4675 return;
4676 }
4677 ++input_line_pointer;
4678 md.mem_offset.base = get_absolute_expression ();
4679 demand_empty_rest_of_line ();
4680 }
4681
4682 /* ia64-specific pseudo-ops: */
4683 const pseudo_typeS md_pseudo_table[] =
4684 {
4685 { "radix", dot_radix, 0 },
4686 { "lcomm", s_lcomm_bytes, 1 },
4687 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
4688 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
4689 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
4690 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
4691 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
4692 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
4693 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
4694 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
4695 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
4696 { "proc", dot_proc, 0 },
4697 { "body", dot_body, 0 },
4698 { "prologue", dot_prologue, 0 },
4699 { "endp", dot_endp, 0 },
4700 { "file", dwarf2_directive_file, 0 },
4701 { "loc", dwarf2_directive_loc, 0 },
4702
4703 { "fframe", dot_fframe, 0 },
4704 { "vframe", dot_vframe, 0 },
4705 { "vframesp", dot_vframesp, 0 },
4706 { "vframepsp", dot_vframepsp, 0 },
4707 { "save", dot_save, 0 },
4708 { "restore", dot_restore, 0 },
4709 { "restorereg", dot_restorereg, 0 },
4710 { "restorereg.p", dot_restorereg_p, 0 },
4711 { "handlerdata", dot_handlerdata, 0 },
4712 { "unwentry", dot_unwentry, 0 },
4713 { "altrp", dot_altrp, 0 },
4714 { "savesp", dot_savemem, 0 },
4715 { "savepsp", dot_savemem, 1 },
4716 { "save.g", dot_saveg, 0 },
4717 { "save.f", dot_savef, 0 },
4718 { "save.b", dot_saveb, 0 },
4719 { "save.gf", dot_savegf, 0 },
4720 { "spill", dot_spill, 0 },
4721 { "spillreg", dot_spillreg, 0 },
4722 { "spillsp", dot_spillmem, 0 },
4723 { "spillpsp", dot_spillmem, 1 },
4724 { "spillreg.p", dot_spillreg_p, 0 },
4725 { "spillsp.p", dot_spillmem_p, 0 },
4726 { "spillpsp.p", dot_spillmem_p, 1 },
4727 { "label_state", dot_label_state, 0 },
4728 { "copy_state", dot_copy_state, 0 },
4729 { "unwabi", dot_unwabi, 0 },
4730 { "personality", dot_personality, 0 },
4731 #if 0
4732 { "estate", dot_estate, 0 },
4733 #endif
4734 { "mii", dot_template, 0x0 },
4735 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
4736 { "mlx", dot_template, 0x2 },
4737 { "mmi", dot_template, 0x4 },
4738 { "mfi", dot_template, 0x6 },
4739 { "mmf", dot_template, 0x7 },
4740 { "mib", dot_template, 0x8 },
4741 { "mbb", dot_template, 0x9 },
4742 { "bbb", dot_template, 0xb },
4743 { "mmb", dot_template, 0xc },
4744 { "mfb", dot_template, 0xe },
4745 #if 0
4746 { "lb", dot_scope, 0 },
4747 { "le", dot_scope, 1 },
4748 #endif
4749 { "align", s_align_bytes, 0 },
4750 { "regstk", dot_regstk, 0 },
4751 { "rotr", dot_rot, DYNREG_GR },
4752 { "rotf", dot_rot, DYNREG_FR },
4753 { "rotp", dot_rot, DYNREG_PR },
4754 { "lsb", dot_byteorder, 0 },
4755 { "msb", dot_byteorder, 1 },
4756 { "psr", dot_psr, 0 },
4757 { "alias", dot_alias, 0 },
4758 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
4759
4760 { "xdata1", dot_xdata, 1 },
4761 { "xdata2", dot_xdata, 2 },
4762 { "xdata4", dot_xdata, 4 },
4763 { "xdata8", dot_xdata, 8 },
4764 { "xreal4", dot_xfloat_cons, 'f' },
4765 { "xreal8", dot_xfloat_cons, 'd' },
4766 { "xreal10", dot_xfloat_cons, 'x' },
4767 { "xstring", dot_xstringer, 0 },
4768 { "xstringz", dot_xstringer, 1 },
4769
4770 /* unaligned versions: */
4771 { "xdata2.ua", dot_xdata_ua, 2 },
4772 { "xdata4.ua", dot_xdata_ua, 4 },
4773 { "xdata8.ua", dot_xdata_ua, 8 },
4774 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
4775 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
4776 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
4777
4778 /* annotations/DV checking support */
4779 { "entry", dot_entry, 0 },
4780 { "mem.offset", dot_mem_offset, 0 },
4781 { "pred.rel", dot_pred_rel, 0 },
4782 { "pred.rel.clear", dot_pred_rel, 'c' },
4783 { "pred.rel.imply", dot_pred_rel, 'i' },
4784 { "pred.rel.mutex", dot_pred_rel, 'm' },
4785 { "pred.safe_across_calls", dot_pred_rel, 's' },
4786 { "reg.val", dot_reg_val, 0 },
4787 { "auto", dot_dv_mode, 'a' },
4788 { "explicit", dot_dv_mode, 'e' },
4789 { "default", dot_dv_mode, 'd' },
4790
4791 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
4792 IA-64 aligns data allocation pseudo-ops by default, so we have to
4793 tell it that these ones are supposed to be unaligned. Long term,
4794 should rewrite so that only IA-64 specific data allocation pseudo-ops
4795 are aligned by default. */
4796 {"2byte", stmt_cons_ua, 2},
4797 {"4byte", stmt_cons_ua, 4},
4798 {"8byte", stmt_cons_ua, 8},
4799
4800 { NULL, 0, 0 }
4801 };
4802
4803 static const struct pseudo_opcode
4804 {
4805 const char *name;
4806 void (*handler) (int);
4807 int arg;
4808 }
4809 pseudo_opcode[] =
4810 {
4811 /* these are more like pseudo-ops, but don't start with a dot */
4812 { "data1", cons, 1 },
4813 { "data2", cons, 2 },
4814 { "data4", cons, 4 },
4815 { "data8", cons, 8 },
4816 { "data16", cons, 16 },
4817 { "real4", stmt_float_cons, 'f' },
4818 { "real8", stmt_float_cons, 'd' },
4819 { "real10", stmt_float_cons, 'x' },
4820 { "string", stringer, 0 },
4821 { "stringz", stringer, 1 },
4822
4823 /* unaligned versions: */
4824 { "data2.ua", stmt_cons_ua, 2 },
4825 { "data4.ua", stmt_cons_ua, 4 },
4826 { "data8.ua", stmt_cons_ua, 8 },
4827 { "data16.ua", stmt_cons_ua, 16 },
4828 { "real4.ua", float_cons, 'f' },
4829 { "real8.ua", float_cons, 'd' },
4830 { "real10.ua", float_cons, 'x' },
4831 };
4832
4833 /* Declare a register by creating a symbol for it and entering it in
4834 the symbol table. */
4835
4836 static symbolS *
4837 declare_register (name, regnum)
4838 const char *name;
4839 int regnum;
4840 {
4841 const char *err;
4842 symbolS *sym;
4843
4844 sym = symbol_new (name, reg_section, regnum, &zero_address_frag);
4845
4846 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (PTR) sym);
4847 if (err)
4848 as_fatal ("Inserting \"%s\" into register table failed: %s",
4849 name, err);
4850
4851 return sym;
4852 }
4853
4854 static void
4855 declare_register_set (prefix, num_regs, base_regnum)
4856 const char *prefix;
4857 int num_regs;
4858 int base_regnum;
4859 {
4860 char name[8];
4861 int i;
4862
4863 for (i = 0; i < num_regs; ++i)
4864 {
4865 sprintf (name, "%s%u", prefix, i);
4866 declare_register (name, base_regnum + i);
4867 }
4868 }
4869
4870 static unsigned int
4871 operand_width (opnd)
4872 enum ia64_opnd opnd;
4873 {
4874 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
4875 unsigned int bits = 0;
4876 int i;
4877
4878 bits = 0;
4879 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
4880 bits += odesc->field[i].bits;
4881
4882 return bits;
4883 }
4884
4885 static enum operand_match_result
4886 operand_match (idesc, index, e)
4887 const struct ia64_opcode *idesc;
4888 int index;
4889 expressionS *e;
4890 {
4891 enum ia64_opnd opnd = idesc->operands[index];
4892 int bits, relocatable = 0;
4893 struct insn_fix *fix;
4894 bfd_signed_vma val;
4895
4896 switch (opnd)
4897 {
4898 /* constants: */
4899
4900 case IA64_OPND_AR_CCV:
4901 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
4902 return OPERAND_MATCH;
4903 break;
4904
4905 case IA64_OPND_AR_PFS:
4906 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
4907 return OPERAND_MATCH;
4908 break;
4909
4910 case IA64_OPND_GR0:
4911 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
4912 return OPERAND_MATCH;
4913 break;
4914
4915 case IA64_OPND_IP:
4916 if (e->X_op == O_register && e->X_add_number == REG_IP)
4917 return OPERAND_MATCH;
4918 break;
4919
4920 case IA64_OPND_PR:
4921 if (e->X_op == O_register && e->X_add_number == REG_PR)
4922 return OPERAND_MATCH;
4923 break;
4924
4925 case IA64_OPND_PR_ROT:
4926 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
4927 return OPERAND_MATCH;
4928 break;
4929
4930 case IA64_OPND_PSR:
4931 if (e->X_op == O_register && e->X_add_number == REG_PSR)
4932 return OPERAND_MATCH;
4933 break;
4934
4935 case IA64_OPND_PSR_L:
4936 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
4937 return OPERAND_MATCH;
4938 break;
4939
4940 case IA64_OPND_PSR_UM:
4941 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
4942 return OPERAND_MATCH;
4943 break;
4944
4945 case IA64_OPND_C1:
4946 if (e->X_op == O_constant)
4947 {
4948 if (e->X_add_number == 1)
4949 return OPERAND_MATCH;
4950 else
4951 return OPERAND_OUT_OF_RANGE;
4952 }
4953 break;
4954
4955 case IA64_OPND_C8:
4956 if (e->X_op == O_constant)
4957 {
4958 if (e->X_add_number == 8)
4959 return OPERAND_MATCH;
4960 else
4961 return OPERAND_OUT_OF_RANGE;
4962 }
4963 break;
4964
4965 case IA64_OPND_C16:
4966 if (e->X_op == O_constant)
4967 {
4968 if (e->X_add_number == 16)
4969 return OPERAND_MATCH;
4970 else
4971 return OPERAND_OUT_OF_RANGE;
4972 }
4973 break;
4974
4975 /* register operands: */
4976
4977 case IA64_OPND_AR3:
4978 if (e->X_op == O_register && e->X_add_number >= REG_AR
4979 && e->X_add_number < REG_AR + 128)
4980 return OPERAND_MATCH;
4981 break;
4982
4983 case IA64_OPND_B1:
4984 case IA64_OPND_B2:
4985 if (e->X_op == O_register && e->X_add_number >= REG_BR
4986 && e->X_add_number < REG_BR + 8)
4987 return OPERAND_MATCH;
4988 break;
4989
4990 case IA64_OPND_CR3:
4991 if (e->X_op == O_register && e->X_add_number >= REG_CR
4992 && e->X_add_number < REG_CR + 128)
4993 return OPERAND_MATCH;
4994 break;
4995
4996 case IA64_OPND_F1:
4997 case IA64_OPND_F2:
4998 case IA64_OPND_F3:
4999 case IA64_OPND_F4:
5000 if (e->X_op == O_register && e->X_add_number >= REG_FR
5001 && e->X_add_number < REG_FR + 128)
5002 return OPERAND_MATCH;
5003 break;
5004
5005 case IA64_OPND_P1:
5006 case IA64_OPND_P2:
5007 if (e->X_op == O_register && e->X_add_number >= REG_P
5008 && e->X_add_number < REG_P + 64)
5009 return OPERAND_MATCH;
5010 break;
5011
5012 case IA64_OPND_R1:
5013 case IA64_OPND_R2:
5014 case IA64_OPND_R3:
5015 if (e->X_op == O_register && e->X_add_number >= REG_GR
5016 && e->X_add_number < REG_GR + 128)
5017 return OPERAND_MATCH;
5018 break;
5019
5020 case IA64_OPND_R3_2:
5021 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5022 {
5023 if (e->X_add_number < REG_GR + 4)
5024 return OPERAND_MATCH;
5025 else if (e->X_add_number < REG_GR + 128)
5026 return OPERAND_OUT_OF_RANGE;
5027 }
5028 break;
5029
5030 /* indirect operands: */
5031 case IA64_OPND_CPUID_R3:
5032 case IA64_OPND_DBR_R3:
5033 case IA64_OPND_DTR_R3:
5034 case IA64_OPND_ITR_R3:
5035 case IA64_OPND_IBR_R3:
5036 case IA64_OPND_MSR_R3:
5037 case IA64_OPND_PKR_R3:
5038 case IA64_OPND_PMC_R3:
5039 case IA64_OPND_PMD_R3:
5040 case IA64_OPND_RR_R3:
5041 if (e->X_op == O_index && e->X_op_symbol
5042 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5043 == opnd - IA64_OPND_CPUID_R3))
5044 return OPERAND_MATCH;
5045 break;
5046
5047 case IA64_OPND_MR3:
5048 if (e->X_op == O_index && !e->X_op_symbol)
5049 return OPERAND_MATCH;
5050 break;
5051
5052 /* immediate operands: */
5053 case IA64_OPND_CNT2a:
5054 case IA64_OPND_LEN4:
5055 case IA64_OPND_LEN6:
5056 bits = operand_width (idesc->operands[index]);
5057 if (e->X_op == O_constant)
5058 {
5059 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5060 return OPERAND_MATCH;
5061 else
5062 return OPERAND_OUT_OF_RANGE;
5063 }
5064 break;
5065
5066 case IA64_OPND_CNT2b:
5067 if (e->X_op == O_constant)
5068 {
5069 if ((bfd_vma) (e->X_add_number - 1) < 3)
5070 return OPERAND_MATCH;
5071 else
5072 return OPERAND_OUT_OF_RANGE;
5073 }
5074 break;
5075
5076 case IA64_OPND_CNT2c:
5077 val = e->X_add_number;
5078 if (e->X_op == O_constant)
5079 {
5080 if ((val == 0 || val == 7 || val == 15 || val == 16))
5081 return OPERAND_MATCH;
5082 else
5083 return OPERAND_OUT_OF_RANGE;
5084 }
5085 break;
5086
5087 case IA64_OPND_SOR:
5088 /* SOR must be an integer multiple of 8 */
5089 if (e->X_op == O_constant && e->X_add_number & 0x7)
5090 return OPERAND_OUT_OF_RANGE;
5091 case IA64_OPND_SOF:
5092 case IA64_OPND_SOL:
5093 if (e->X_op == O_constant)
5094 {
5095 if ((bfd_vma) e->X_add_number <= 96)
5096 return OPERAND_MATCH;
5097 else
5098 return OPERAND_OUT_OF_RANGE;
5099 }
5100 break;
5101
5102 case IA64_OPND_IMMU62:
5103 if (e->X_op == O_constant)
5104 {
5105 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5106 return OPERAND_MATCH;
5107 else
5108 return OPERAND_OUT_OF_RANGE;
5109 }
5110 else
5111 {
5112 /* FIXME -- need 62-bit relocation type */
5113 as_bad (_("62-bit relocation not yet implemented"));
5114 }
5115 break;
5116
5117 case IA64_OPND_IMMU64:
5118 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5119 || e->X_op == O_subtract)
5120 {
5121 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5122 fix->code = BFD_RELOC_IA64_IMM64;
5123 if (e->X_op != O_subtract)
5124 {
5125 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5126 if (e->X_op == O_pseudo_fixup)
5127 e->X_op = O_symbol;
5128 }
5129
5130 fix->opnd = idesc->operands[index];
5131 fix->expr = *e;
5132 fix->is_pcrel = 0;
5133 ++CURR_SLOT.num_fixups;
5134 return OPERAND_MATCH;
5135 }
5136 else if (e->X_op == O_constant)
5137 return OPERAND_MATCH;
5138 break;
5139
5140 case IA64_OPND_CCNT5:
5141 case IA64_OPND_CNT5:
5142 case IA64_OPND_CNT6:
5143 case IA64_OPND_CPOS6a:
5144 case IA64_OPND_CPOS6b:
5145 case IA64_OPND_CPOS6c:
5146 case IA64_OPND_IMMU2:
5147 case IA64_OPND_IMMU7a:
5148 case IA64_OPND_IMMU7b:
5149 case IA64_OPND_IMMU21:
5150 case IA64_OPND_IMMU24:
5151 case IA64_OPND_MBTYPE4:
5152 case IA64_OPND_MHTYPE8:
5153 case IA64_OPND_POS6:
5154 bits = operand_width (idesc->operands[index]);
5155 if (e->X_op == O_constant)
5156 {
5157 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5158 return OPERAND_MATCH;
5159 else
5160 return OPERAND_OUT_OF_RANGE;
5161 }
5162 break;
5163
5164 case IA64_OPND_IMMU9:
5165 bits = operand_width (idesc->operands[index]);
5166 if (e->X_op == O_constant)
5167 {
5168 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5169 {
5170 int lobits = e->X_add_number & 0x3;
5171 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5172 e->X_add_number |= (bfd_vma) 0x3;
5173 return OPERAND_MATCH;
5174 }
5175 else
5176 return OPERAND_OUT_OF_RANGE;
5177 }
5178 break;
5179
5180 case IA64_OPND_IMM44:
5181 /* least 16 bits must be zero */
5182 if ((e->X_add_number & 0xffff) != 0)
5183 /* XXX technically, this is wrong: we should not be issuing warning
5184 messages until we're sure this instruction pattern is going to
5185 be used! */
5186 as_warn (_("lower 16 bits of mask ignored"));
5187
5188 if (e->X_op == O_constant)
5189 {
5190 if (((e->X_add_number >= 0
5191 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5192 || (e->X_add_number < 0
5193 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5194 {
5195 /* sign-extend */
5196 if (e->X_add_number >= 0
5197 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5198 {
5199 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5200 }
5201 return OPERAND_MATCH;
5202 }
5203 else
5204 return OPERAND_OUT_OF_RANGE;
5205 }
5206 break;
5207
5208 case IA64_OPND_IMM17:
5209 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5210 if (e->X_op == O_constant)
5211 {
5212 if (((e->X_add_number >= 0
5213 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5214 || (e->X_add_number < 0
5215 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5216 {
5217 /* sign-extend */
5218 if (e->X_add_number >= 0
5219 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5220 {
5221 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5222 }
5223 return OPERAND_MATCH;
5224 }
5225 else
5226 return OPERAND_OUT_OF_RANGE;
5227 }
5228 break;
5229
5230 case IA64_OPND_IMM14:
5231 case IA64_OPND_IMM22:
5232 relocatable = 1;
5233 case IA64_OPND_IMM1:
5234 case IA64_OPND_IMM8:
5235 case IA64_OPND_IMM8U4:
5236 case IA64_OPND_IMM8M1:
5237 case IA64_OPND_IMM8M1U4:
5238 case IA64_OPND_IMM8M1U8:
5239 case IA64_OPND_IMM9a:
5240 case IA64_OPND_IMM9b:
5241 bits = operand_width (idesc->operands[index]);
5242 if (relocatable && (e->X_op == O_symbol
5243 || e->X_op == O_subtract
5244 || e->X_op == O_pseudo_fixup))
5245 {
5246 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5247
5248 if (idesc->operands[index] == IA64_OPND_IMM14)
5249 fix->code = BFD_RELOC_IA64_IMM14;
5250 else
5251 fix->code = BFD_RELOC_IA64_IMM22;
5252
5253 if (e->X_op != O_subtract)
5254 {
5255 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5256 if (e->X_op == O_pseudo_fixup)
5257 e->X_op = O_symbol;
5258 }
5259
5260 fix->opnd = idesc->operands[index];
5261 fix->expr = *e;
5262 fix->is_pcrel = 0;
5263 ++CURR_SLOT.num_fixups;
5264 return OPERAND_MATCH;
5265 }
5266 else if (e->X_op != O_constant
5267 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5268 return OPERAND_MISMATCH;
5269
5270 if (opnd == IA64_OPND_IMM8M1U4)
5271 {
5272 /* Zero is not valid for unsigned compares that take an adjusted
5273 constant immediate range. */
5274 if (e->X_add_number == 0)
5275 return OPERAND_OUT_OF_RANGE;
5276
5277 /* Sign-extend 32-bit unsigned numbers, so that the following range
5278 checks will work. */
5279 val = e->X_add_number;
5280 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5281 && ((val & ((bfd_vma) 1 << 31)) != 0))
5282 val = ((val << 32) >> 32);
5283
5284 /* Check for 0x100000000. This is valid because
5285 0x100000000-1 is the same as ((uint32_t) -1). */
5286 if (val == ((bfd_signed_vma) 1 << 32))
5287 return OPERAND_MATCH;
5288
5289 val = val - 1;
5290 }
5291 else if (opnd == IA64_OPND_IMM8M1U8)
5292 {
5293 /* Zero is not valid for unsigned compares that take an adjusted
5294 constant immediate range. */
5295 if (e->X_add_number == 0)
5296 return OPERAND_OUT_OF_RANGE;
5297
5298 /* Check for 0x10000000000000000. */
5299 if (e->X_op == O_big)
5300 {
5301 if (generic_bignum[0] == 0
5302 && generic_bignum[1] == 0
5303 && generic_bignum[2] == 0
5304 && generic_bignum[3] == 0
5305 && generic_bignum[4] == 1)
5306 return OPERAND_MATCH;
5307 else
5308 return OPERAND_OUT_OF_RANGE;
5309 }
5310 else
5311 val = e->X_add_number - 1;
5312 }
5313 else if (opnd == IA64_OPND_IMM8M1)
5314 val = e->X_add_number - 1;
5315 else if (opnd == IA64_OPND_IMM8U4)
5316 {
5317 /* Sign-extend 32-bit unsigned numbers, so that the following range
5318 checks will work. */
5319 val = e->X_add_number;
5320 if (((val & (~(bfd_vma) 0 << 32)) == 0)
5321 && ((val & ((bfd_vma) 1 << 31)) != 0))
5322 val = ((val << 32) >> 32);
5323 }
5324 else
5325 val = e->X_add_number;
5326
5327 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5328 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5329 return OPERAND_MATCH;
5330 else
5331 return OPERAND_OUT_OF_RANGE;
5332
5333 case IA64_OPND_INC3:
5334 /* +/- 1, 4, 8, 16 */
5335 val = e->X_add_number;
5336 if (val < 0)
5337 val = -val;
5338 if (e->X_op == O_constant)
5339 {
5340 if ((val == 1 || val == 4 || val == 8 || val == 16))
5341 return OPERAND_MATCH;
5342 else
5343 return OPERAND_OUT_OF_RANGE;
5344 }
5345 break;
5346
5347 case IA64_OPND_TGT25:
5348 case IA64_OPND_TGT25b:
5349 case IA64_OPND_TGT25c:
5350 case IA64_OPND_TGT64:
5351 if (e->X_op == O_symbol)
5352 {
5353 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5354 if (opnd == IA64_OPND_TGT25)
5355 fix->code = BFD_RELOC_IA64_PCREL21F;
5356 else if (opnd == IA64_OPND_TGT25b)
5357 fix->code = BFD_RELOC_IA64_PCREL21M;
5358 else if (opnd == IA64_OPND_TGT25c)
5359 fix->code = BFD_RELOC_IA64_PCREL21B;
5360 else if (opnd == IA64_OPND_TGT64)
5361 fix->code = BFD_RELOC_IA64_PCREL60B;
5362 else
5363 abort ();
5364
5365 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5366 fix->opnd = idesc->operands[index];
5367 fix->expr = *e;
5368 fix->is_pcrel = 1;
5369 ++CURR_SLOT.num_fixups;
5370 return OPERAND_MATCH;
5371 }
5372 case IA64_OPND_TAG13:
5373 case IA64_OPND_TAG13b:
5374 switch (e->X_op)
5375 {
5376 case O_constant:
5377 return OPERAND_MATCH;
5378
5379 case O_symbol:
5380 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5381 /* There are no external relocs for TAG13/TAG13b fields, so we
5382 create a dummy reloc. This will not live past md_apply_fix3. */
5383 fix->code = BFD_RELOC_UNUSED;
5384 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5385 fix->opnd = idesc->operands[index];
5386 fix->expr = *e;
5387 fix->is_pcrel = 1;
5388 ++CURR_SLOT.num_fixups;
5389 return OPERAND_MATCH;
5390
5391 default:
5392 break;
5393 }
5394 break;
5395
5396 default:
5397 break;
5398 }
5399 return OPERAND_MISMATCH;
5400 }
5401
5402 static int
5403 parse_operand (e)
5404 expressionS *e;
5405 {
5406 int sep = '\0';
5407
5408 memset (e, 0, sizeof (*e));
5409 e->X_op = O_absent;
5410 SKIP_WHITESPACE ();
5411 if (*input_line_pointer != '}')
5412 expression (e);
5413 sep = *input_line_pointer++;
5414
5415 if (sep == '}')
5416 {
5417 if (!md.manual_bundling)
5418 as_warn ("Found '}' when manual bundling is off");
5419 else
5420 CURR_SLOT.manual_bundling_off = 1;
5421 md.manual_bundling = 0;
5422 sep = '\0';
5423 }
5424 return sep;
5425 }
5426
5427 /* Returns the next entry in the opcode table that matches the one in
5428 IDESC, and frees the entry in IDESC. If no matching entry is
5429 found, NULL is returned instead. */
5430
5431 static struct ia64_opcode *
5432 get_next_opcode (struct ia64_opcode *idesc)
5433 {
5434 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
5435 ia64_free_opcode (idesc);
5436 return next;
5437 }
5438
5439 /* Parse the operands for the opcode and find the opcode variant that
5440 matches the specified operands, or NULL if no match is possible. */
5441
5442 static struct ia64_opcode *
5443 parse_operands (idesc)
5444 struct ia64_opcode *idesc;
5445 {
5446 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
5447 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
5448 enum ia64_opnd expected_operand = IA64_OPND_NIL;
5449 enum operand_match_result result;
5450 char mnemonic[129];
5451 char *first_arg = 0, *end, *saved_input_pointer;
5452 unsigned int sof;
5453
5454 assert (strlen (idesc->name) <= 128);
5455
5456 strcpy (mnemonic, idesc->name);
5457 if (idesc->operands[2] == IA64_OPND_SOF)
5458 {
5459 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
5460 can't parse the first operand until we have parsed the
5461 remaining operands of the "alloc" instruction. */
5462 SKIP_WHITESPACE ();
5463 first_arg = input_line_pointer;
5464 end = strchr (input_line_pointer, '=');
5465 if (!end)
5466 {
5467 as_bad ("Expected separator `='");
5468 return 0;
5469 }
5470 input_line_pointer = end + 1;
5471 ++i;
5472 ++num_outputs;
5473 }
5474
5475 for (; i < NELEMS (CURR_SLOT.opnd); ++i)
5476 {
5477 sep = parse_operand (CURR_SLOT.opnd + i);
5478 if (CURR_SLOT.opnd[i].X_op == O_absent)
5479 break;
5480
5481 ++num_operands;
5482
5483 if (sep != '=' && sep != ',')
5484 break;
5485
5486 if (sep == '=')
5487 {
5488 if (num_outputs > 0)
5489 as_bad ("Duplicate equal sign (=) in instruction");
5490 else
5491 num_outputs = i + 1;
5492 }
5493 }
5494 if (sep != '\0')
5495 {
5496 as_bad ("Illegal operand separator `%c'", sep);
5497 return 0;
5498 }
5499
5500 if (idesc->operands[2] == IA64_OPND_SOF)
5501 {
5502 /* map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r */
5503 know (strcmp (idesc->name, "alloc") == 0);
5504 if (num_operands == 5 /* first_arg not included in this count! */
5505 && CURR_SLOT.opnd[2].X_op == O_constant
5506 && CURR_SLOT.opnd[3].X_op == O_constant
5507 && CURR_SLOT.opnd[4].X_op == O_constant
5508 && CURR_SLOT.opnd[5].X_op == O_constant)
5509 {
5510 sof = set_regstack (CURR_SLOT.opnd[2].X_add_number,
5511 CURR_SLOT.opnd[3].X_add_number,
5512 CURR_SLOT.opnd[4].X_add_number,
5513 CURR_SLOT.opnd[5].X_add_number);
5514
5515 /* now we can parse the first arg: */
5516 saved_input_pointer = input_line_pointer;
5517 input_line_pointer = first_arg;
5518 sep = parse_operand (CURR_SLOT.opnd + 0);
5519 if (sep != '=')
5520 --num_outputs; /* force error */
5521 input_line_pointer = saved_input_pointer;
5522
5523 CURR_SLOT.opnd[2].X_add_number = sof;
5524 CURR_SLOT.opnd[3].X_add_number
5525 = sof - CURR_SLOT.opnd[4].X_add_number;
5526 CURR_SLOT.opnd[4] = CURR_SLOT.opnd[5];
5527 }
5528 }
5529
5530 highest_unmatched_operand = 0;
5531 curr_out_of_range_pos = -1;
5532 error_pos = 0;
5533 expected_operand = idesc->operands[0];
5534 for (; idesc; idesc = get_next_opcode (idesc))
5535 {
5536 if (num_outputs != idesc->num_outputs)
5537 continue; /* mismatch in # of outputs */
5538
5539 CURR_SLOT.num_fixups = 0;
5540
5541 /* Try to match all operands. If we see an out-of-range operand,
5542 then continue trying to match the rest of the operands, since if
5543 the rest match, then this idesc will give the best error message. */
5544
5545 out_of_range_pos = -1;
5546 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
5547 {
5548 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
5549 if (result != OPERAND_MATCH)
5550 {
5551 if (result != OPERAND_OUT_OF_RANGE)
5552 break;
5553 if (out_of_range_pos < 0)
5554 /* remember position of the first out-of-range operand: */
5555 out_of_range_pos = i;
5556 }
5557 }
5558
5559 /* If we did not match all operands, or if at least one operand was
5560 out-of-range, then this idesc does not match. Keep track of which
5561 idesc matched the most operands before failing. If we have two
5562 idescs that failed at the same position, and one had an out-of-range
5563 operand, then prefer the out-of-range operand. Thus if we have
5564 "add r0=0x1000000,r1" we get an error saying the constant is out
5565 of range instead of an error saying that the constant should have been
5566 a register. */
5567
5568 if (i != num_operands || out_of_range_pos >= 0)
5569 {
5570 if (i > highest_unmatched_operand
5571 || (i == highest_unmatched_operand
5572 && out_of_range_pos > curr_out_of_range_pos))
5573 {
5574 highest_unmatched_operand = i;
5575 if (out_of_range_pos >= 0)
5576 {
5577 expected_operand = idesc->operands[out_of_range_pos];
5578 error_pos = out_of_range_pos;
5579 }
5580 else
5581 {
5582 expected_operand = idesc->operands[i];
5583 error_pos = i;
5584 }
5585 curr_out_of_range_pos = out_of_range_pos;
5586 }
5587 continue;
5588 }
5589
5590 if (num_operands < NELEMS (idesc->operands)
5591 && idesc->operands[num_operands])
5592 continue; /* mismatch in number of arguments */
5593
5594 break;
5595 }
5596 if (!idesc)
5597 {
5598 if (expected_operand)
5599 as_bad ("Operand %u of `%s' should be %s",
5600 error_pos + 1, mnemonic,
5601 elf64_ia64_operands[expected_operand].desc);
5602 else
5603 as_bad ("Operand mismatch");
5604 return 0;
5605 }
5606 return idesc;
5607 }
5608
5609 /* Keep track of state necessary to determine whether a NOP is necessary
5610 to avoid an erratum in A and B step Itanium chips, and return 1 if we
5611 detect a case where additional NOPs may be necessary. */
5612 static int
5613 errata_nop_necessary_p (slot, insn_unit)
5614 struct slot *slot;
5615 enum ia64_unit insn_unit;
5616 {
5617 int i;
5618 struct group *this_group = md.last_groups + md.group_idx;
5619 struct group *prev_group = md.last_groups + (md.group_idx + 2) % 3;
5620 struct ia64_opcode *idesc = slot->idesc;
5621
5622 /* Test whether this could be the first insn in a problematic sequence. */
5623 if (insn_unit == IA64_UNIT_F)
5624 {
5625 for (i = 0; i < idesc->num_outputs; i++)
5626 if (idesc->operands[i] == IA64_OPND_P1
5627 || idesc->operands[i] == IA64_OPND_P2)
5628 {
5629 int regno = slot->opnd[i].X_add_number - REG_P;
5630 /* Ignore invalid operands; they generate errors elsewhere. */
5631 if (regno >= 64)
5632 return 0;
5633 this_group->p_reg_set[regno] = 1;
5634 }
5635 }
5636
5637 /* Test whether this could be the second insn in a problematic sequence. */
5638 if (insn_unit == IA64_UNIT_M && slot->qp_regno > 0
5639 && prev_group->p_reg_set[slot->qp_regno])
5640 {
5641 for (i = 0; i < idesc->num_outputs; i++)
5642 if (idesc->operands[i] == IA64_OPND_R1
5643 || idesc->operands[i] == IA64_OPND_R2
5644 || idesc->operands[i] == IA64_OPND_R3)
5645 {
5646 int regno = slot->opnd[i].X_add_number - REG_GR;
5647 /* Ignore invalid operands; they generate errors elsewhere. */
5648 if (regno >= 128)
5649 return 0;
5650 if (strncmp (idesc->name, "add", 3) != 0
5651 && strncmp (idesc->name, "sub", 3) != 0
5652 && strncmp (idesc->name, "shladd", 6) != 0
5653 && (idesc->flags & IA64_OPCODE_POSTINC) == 0)
5654 this_group->g_reg_set_conditionally[regno] = 1;
5655 }
5656 }
5657
5658 /* Test whether this could be the third insn in a problematic sequence. */
5659 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; i++)
5660 {
5661 if (/* For fc, ptc, ptr, tak, thash, tpa, ttag, probe, ptr, ptc. */
5662 idesc->operands[i] == IA64_OPND_R3
5663 /* For mov indirect. */
5664 || idesc->operands[i] == IA64_OPND_RR_R3
5665 || idesc->operands[i] == IA64_OPND_DBR_R3
5666 || idesc->operands[i] == IA64_OPND_IBR_R3
5667 || idesc->operands[i] == IA64_OPND_PKR_R3
5668 || idesc->operands[i] == IA64_OPND_PMC_R3
5669 || idesc->operands[i] == IA64_OPND_PMD_R3
5670 || idesc->operands[i] == IA64_OPND_MSR_R3
5671 || idesc->operands[i] == IA64_OPND_CPUID_R3
5672 /* For itr. */
5673 || idesc->operands[i] == IA64_OPND_ITR_R3
5674 || idesc->operands[i] == IA64_OPND_DTR_R3
5675 /* Normal memory addresses (load, store, xchg, cmpxchg, etc.). */
5676 || idesc->operands[i] == IA64_OPND_MR3)
5677 {
5678 int regno = slot->opnd[i].X_add_number - REG_GR;
5679 /* Ignore invalid operands; they generate errors elsewhere. */
5680 if (regno >= 128)
5681 return 0;
5682 if (idesc->operands[i] == IA64_OPND_R3)
5683 {
5684 if (strcmp (idesc->name, "fc") != 0
5685 && strcmp (idesc->name, "tak") != 0
5686 && strcmp (idesc->name, "thash") != 0
5687 && strcmp (idesc->name, "tpa") != 0
5688 && strcmp (idesc->name, "ttag") != 0
5689 && strncmp (idesc->name, "ptr", 3) != 0
5690 && strncmp (idesc->name, "ptc", 3) != 0
5691 && strncmp (idesc->name, "probe", 5) != 0)
5692 return 0;
5693 }
5694 if (prev_group->g_reg_set_conditionally[regno])
5695 return 1;
5696 }
5697 }
5698 return 0;
5699 }
5700
5701 static void
5702 build_insn (slot, insnp)
5703 struct slot *slot;
5704 bfd_vma *insnp;
5705 {
5706 const struct ia64_operand *odesc, *o2desc;
5707 struct ia64_opcode *idesc = slot->idesc;
5708 bfd_signed_vma insn, val;
5709 const char *err;
5710 int i;
5711
5712 insn = idesc->opcode | slot->qp_regno;
5713
5714 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
5715 {
5716 if (slot->opnd[i].X_op == O_register
5717 || slot->opnd[i].X_op == O_constant
5718 || slot->opnd[i].X_op == O_index)
5719 val = slot->opnd[i].X_add_number;
5720 else if (slot->opnd[i].X_op == O_big)
5721 {
5722 /* This must be the value 0x10000000000000000. */
5723 assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
5724 val = 0;
5725 }
5726 else
5727 val = 0;
5728
5729 switch (idesc->operands[i])
5730 {
5731 case IA64_OPND_IMMU64:
5732 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
5733 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
5734 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
5735 | (((val >> 63) & 0x1) << 36));
5736 continue;
5737
5738 case IA64_OPND_IMMU62:
5739 val &= 0x3fffffffffffffffULL;
5740 if (val != slot->opnd[i].X_add_number)
5741 as_warn (_("Value truncated to 62 bits"));
5742 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
5743 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
5744 continue;
5745
5746 case IA64_OPND_TGT64:
5747 val >>= 4;
5748 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
5749 insn |= ((((val >> 59) & 0x1) << 36)
5750 | (((val >> 0) & 0xfffff) << 13));
5751 continue;
5752
5753 case IA64_OPND_AR3:
5754 val -= REG_AR;
5755 break;
5756
5757 case IA64_OPND_B1:
5758 case IA64_OPND_B2:
5759 val -= REG_BR;
5760 break;
5761
5762 case IA64_OPND_CR3:
5763 val -= REG_CR;
5764 break;
5765
5766 case IA64_OPND_F1:
5767 case IA64_OPND_F2:
5768 case IA64_OPND_F3:
5769 case IA64_OPND_F4:
5770 val -= REG_FR;
5771 break;
5772
5773 case IA64_OPND_P1:
5774 case IA64_OPND_P2:
5775 val -= REG_P;
5776 break;
5777
5778 case IA64_OPND_R1:
5779 case IA64_OPND_R2:
5780 case IA64_OPND_R3:
5781 case IA64_OPND_R3_2:
5782 case IA64_OPND_CPUID_R3:
5783 case IA64_OPND_DBR_R3:
5784 case IA64_OPND_DTR_R3:
5785 case IA64_OPND_ITR_R3:
5786 case IA64_OPND_IBR_R3:
5787 case IA64_OPND_MR3:
5788 case IA64_OPND_MSR_R3:
5789 case IA64_OPND_PKR_R3:
5790 case IA64_OPND_PMC_R3:
5791 case IA64_OPND_PMD_R3:
5792 case IA64_OPND_RR_R3:
5793 val -= REG_GR;
5794 break;
5795
5796 default:
5797 break;
5798 }
5799
5800 odesc = elf64_ia64_operands + idesc->operands[i];
5801 err = (*odesc->insert) (odesc, val, &insn);
5802 if (err)
5803 as_bad_where (slot->src_file, slot->src_line,
5804 "Bad operand value: %s", err);
5805 if (idesc->flags & IA64_OPCODE_PSEUDO)
5806 {
5807 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
5808 && odesc == elf64_ia64_operands + IA64_OPND_F3)
5809 {
5810 o2desc = elf64_ia64_operands + IA64_OPND_F2;
5811 (*o2desc->insert) (o2desc, val, &insn);
5812 }
5813 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
5814 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
5815 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
5816 {
5817 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
5818 (*o2desc->insert) (o2desc, 64 - val, &insn);
5819 }
5820 }
5821 }
5822 *insnp = insn;
5823 }
5824
5825 static void
5826 emit_one_bundle ()
5827 {
5828 unsigned int manual_bundling_on = 0, manual_bundling_off = 0;
5829 unsigned int manual_bundling = 0;
5830 enum ia64_unit required_unit, insn_unit = 0;
5831 enum ia64_insn_type type[3], insn_type;
5832 unsigned int template, orig_template;
5833 bfd_vma insn[3] = { -1, -1, -1 };
5834 struct ia64_opcode *idesc;
5835 int end_of_insn_group = 0, user_template = -1;
5836 int n, i, j, first, curr;
5837 unw_rec_list *ptr;
5838 bfd_vma t0 = 0, t1 = 0;
5839 struct label_fix *lfix;
5840 struct insn_fix *ifix;
5841 char mnemonic[16];
5842 fixS *fix;
5843 char *f;
5844
5845 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
5846 know (first >= 0 & first < NUM_SLOTS);
5847 n = MIN (3, md.num_slots_in_use);
5848
5849 /* Determine template: user user_template if specified, best match
5850 otherwise: */
5851
5852 if (md.slot[first].user_template >= 0)
5853 user_template = template = md.slot[first].user_template;
5854 else
5855 {
5856 /* Auto select appropriate template. */
5857 memset (type, 0, sizeof (type));
5858 curr = first;
5859 for (i = 0; i < n; ++i)
5860 {
5861 if (md.slot[curr].label_fixups && i != 0)
5862 break;
5863 type[i] = md.slot[curr].idesc->type;
5864 curr = (curr + 1) % NUM_SLOTS;
5865 }
5866 template = best_template[type[0]][type[1]][type[2]];
5867 }
5868
5869 /* initialize instructions with appropriate nops: */
5870 for (i = 0; i < 3; ++i)
5871 insn[i] = nop[ia64_templ_desc[template].exec_unit[i]];
5872
5873 f = frag_more (16);
5874
5875 /* now fill in slots with as many insns as possible: */
5876 curr = first;
5877 idesc = md.slot[curr].idesc;
5878 end_of_insn_group = 0;
5879 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
5880 {
5881 /* Set the slot number for prologue/body records now as those
5882 refer to the current point, not the point after the
5883 instruction has been issued: */
5884 /* Don't try to delete prologue/body records here, as that will cause
5885 them to also be deleted from the master list of unwind records. */
5886 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
5887 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
5888 || ptr->r.type == body)
5889 {
5890 ptr->slot_number = (unsigned long) f + i;
5891 ptr->slot_frag = frag_now;
5892 }
5893
5894 if (idesc->flags & IA64_OPCODE_SLOT2)
5895 {
5896 if (manual_bundling && i != 2)
5897 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5898 "`%s' must be last in bundle", idesc->name);
5899 else
5900 i = 2;
5901 }
5902 if (idesc->flags & IA64_OPCODE_LAST)
5903 {
5904 int required_slot;
5905 unsigned int required_template;
5906
5907 /* If we need a stop bit after an M slot, our only choice is
5908 template 5 (M;;MI). If we need a stop bit after a B
5909 slot, our only choice is to place it at the end of the
5910 bundle, because the only available templates are MIB,
5911 MBB, BBB, MMB, and MFB. We don't handle anything other
5912 than M and B slots because these are the only kind of
5913 instructions that can have the IA64_OPCODE_LAST bit set. */
5914 required_template = template;
5915 switch (idesc->type)
5916 {
5917 case IA64_TYPE_M:
5918 required_slot = 0;
5919 required_template = 5;
5920 break;
5921
5922 case IA64_TYPE_B:
5923 required_slot = 2;
5924 break;
5925
5926 default:
5927 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5928 "Internal error: don't know how to force %s to end"
5929 "of instruction group", idesc->name);
5930 required_slot = i;
5931 break;
5932 }
5933 if (manual_bundling && i != required_slot)
5934 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5935 "`%s' must be last in instruction group",
5936 idesc->name);
5937 if (required_slot < i)
5938 /* Can't fit this instruction. */
5939 break;
5940
5941 i = required_slot;
5942 if (required_template != template)
5943 {
5944 /* If we switch the template, we need to reset the NOPs
5945 after slot i. The slot-types of the instructions ahead
5946 of i never change, so we don't need to worry about
5947 changing NOPs in front of this slot. */
5948 for (j = i; j < 3; ++j)
5949 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
5950 }
5951 template = required_template;
5952 }
5953 if (curr != first && md.slot[curr].label_fixups)
5954 {
5955 if (manual_bundling_on)
5956 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
5957 "Label must be first in a bundle");
5958 /* This insn must go into the first slot of a bundle. */
5959 break;
5960 }
5961
5962 manual_bundling_on = md.slot[curr].manual_bundling_on;
5963 manual_bundling_off = md.slot[curr].manual_bundling_off;
5964
5965 if (manual_bundling_on)
5966 {
5967 if (curr == first)
5968 manual_bundling = 1;
5969 else
5970 break; /* need to start a new bundle */
5971 }
5972
5973 if (end_of_insn_group && md.num_slots_in_use >= 1)
5974 {
5975 /* We need an instruction group boundary in the middle of a
5976 bundle. See if we can switch to an other template with
5977 an appropriate boundary. */
5978
5979 orig_template = template;
5980 if (i == 1 && (user_template == 4
5981 || (user_template < 0
5982 && (ia64_templ_desc[template].exec_unit[0]
5983 == IA64_UNIT_M))))
5984 {
5985 template = 5;
5986 end_of_insn_group = 0;
5987 }
5988 else if (i == 2 && (user_template == 0
5989 || (user_template < 0
5990 && (ia64_templ_desc[template].exec_unit[1]
5991 == IA64_UNIT_I)))
5992 /* This test makes sure we don't switch the template if
5993 the next instruction is one that needs to be first in
5994 an instruction group. Since all those instructions are
5995 in the M group, there is no way such an instruction can
5996 fit in this bundle even if we switch the template. The
5997 reason we have to check for this is that otherwise we
5998 may end up generating "MI;;I M.." which has the deadly
5999 effect that the second M instruction is no longer the
6000 first in the bundle! --davidm 99/12/16 */
6001 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6002 {
6003 template = 1;
6004 end_of_insn_group = 0;
6005 }
6006 else if (curr != first)
6007 /* can't fit this insn */
6008 break;
6009
6010 if (template != orig_template)
6011 /* if we switch the template, we need to reset the NOPs
6012 after slot i. The slot-types of the instructions ahead
6013 of i never change, so we don't need to worry about
6014 changing NOPs in front of this slot. */
6015 for (j = i; j < 3; ++j)
6016 insn[j] = nop[ia64_templ_desc[template].exec_unit[j]];
6017 }
6018 required_unit = ia64_templ_desc[template].exec_unit[i];
6019
6020 /* resolve dynamic opcodes such as "break" and "nop": */
6021 if (idesc->type == IA64_TYPE_DYN)
6022 {
6023 if ((strcmp (idesc->name, "nop") == 0)
6024 || (strcmp (idesc->name, "break") == 0))
6025 insn_unit = required_unit;
6026 else if (strcmp (idesc->name, "chk.s") == 0)
6027 {
6028 insn_unit = IA64_UNIT_M;
6029 if (required_unit == IA64_UNIT_I)
6030 insn_unit = IA64_UNIT_I;
6031 }
6032 else
6033 as_fatal ("emit_one_bundle: unexpected dynamic op");
6034
6035 sprintf (mnemonic, "%s.%c", idesc->name, "?imbf??"[insn_unit]);
6036 ia64_free_opcode (idesc);
6037 md.slot[curr].idesc = idesc = ia64_find_opcode (mnemonic);
6038 #if 0
6039 know (!idesc->next); /* no resolved dynamic ops have collisions */
6040 #endif
6041 }
6042 else
6043 {
6044 insn_type = idesc->type;
6045 insn_unit = IA64_UNIT_NIL;
6046 switch (insn_type)
6047 {
6048 case IA64_TYPE_A:
6049 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6050 insn_unit = required_unit;
6051 break;
6052 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6053 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6054 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6055 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6056 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6057 default: break;
6058 }
6059 }
6060
6061 if (insn_unit != required_unit)
6062 {
6063 if (required_unit == IA64_UNIT_L
6064 && insn_unit == IA64_UNIT_I
6065 && !(idesc->flags & IA64_OPCODE_X_IN_MLX))
6066 {
6067 /* we got ourselves an MLX template but the current
6068 instruction isn't an X-unit, or an I-unit instruction
6069 that can go into the X slot of an MLX template. Duh. */
6070 if (md.num_slots_in_use >= NUM_SLOTS)
6071 {
6072 as_bad_where (md.slot[curr].src_file,
6073 md.slot[curr].src_line,
6074 "`%s' can't go in X slot of "
6075 "MLX template", idesc->name);
6076 /* drop this insn so we don't livelock: */
6077 --md.num_slots_in_use;
6078 }
6079 break;
6080 }
6081 continue; /* try next slot */
6082 }
6083
6084 {
6085 bfd_vma addr;
6086
6087 addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6088 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6089 }
6090
6091 if (errata_nop_necessary_p (md.slot + curr, insn_unit))
6092 as_warn (_("Additional NOP may be necessary to workaround Itanium processor A/B step errata"));
6093
6094 build_insn (md.slot + curr, insn + i);
6095
6096 /* Set slot counts for non prologue/body unwind records. */
6097 for (ptr = md.slot[curr].unwind_record; ptr; ptr = ptr->next)
6098 if (ptr->r.type != prologue && ptr->r.type != prologue_gr
6099 && ptr->r.type != body)
6100 {
6101 ptr->slot_number = (unsigned long) f + i;
6102 ptr->slot_frag = frag_now;
6103 }
6104 md.slot[curr].unwind_record = NULL;
6105
6106 if (required_unit == IA64_UNIT_L)
6107 {
6108 know (i == 1);
6109 /* skip one slot for long/X-unit instructions */
6110 ++i;
6111 }
6112 --md.num_slots_in_use;
6113
6114 /* now is a good time to fix up the labels for this insn: */
6115 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6116 {
6117 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6118 symbol_set_frag (lfix->sym, frag_now);
6119 }
6120 /* and fix up the tags also. */
6121 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6122 {
6123 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6124 symbol_set_frag (lfix->sym, frag_now);
6125 }
6126
6127 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6128 {
6129 ifix = md.slot[curr].fixup + j;
6130 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6131 &ifix->expr, ifix->is_pcrel, ifix->code);
6132 fix->tc_fix_data.opnd = ifix->opnd;
6133 fix->fx_plt = (fix->fx_r_type == BFD_RELOC_IA64_PLTOFF22);
6134 fix->fx_file = md.slot[curr].src_file;
6135 fix->fx_line = md.slot[curr].src_line;
6136 }
6137
6138 end_of_insn_group = md.slot[curr].end_of_insn_group;
6139
6140 if (end_of_insn_group)
6141 {
6142 md.group_idx = (md.group_idx + 1) % 3;
6143 memset (md.last_groups + md.group_idx, 0, sizeof md.last_groups[0]);
6144 }
6145
6146 /* clear slot: */
6147 ia64_free_opcode (md.slot[curr].idesc);
6148 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6149 md.slot[curr].user_template = -1;
6150
6151 if (manual_bundling_off)
6152 {
6153 manual_bundling = 0;
6154 break;
6155 }
6156 curr = (curr + 1) % NUM_SLOTS;
6157 idesc = md.slot[curr].idesc;
6158 }
6159 if (manual_bundling)
6160 {
6161 if (md.num_slots_in_use > 0)
6162 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6163 "`%s' does not fit into %s template",
6164 idesc->name, ia64_templ_desc[template].name);
6165 else
6166 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6167 "Missing '}' at end of file");
6168 }
6169 know (md.num_slots_in_use < NUM_SLOTS);
6170
6171 t0 = end_of_insn_group | (template << 1) | (insn[0] << 5) | (insn[1] << 46);
6172 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6173
6174 number_to_chars_littleendian (f + 0, t0, 8);
6175 number_to_chars_littleendian (f + 8, t1, 8);
6176
6177 unwind.next_slot_number = (unsigned long) f + 16;
6178 unwind.next_slot_frag = frag_now;
6179 }
6180
6181 int
6182 md_parse_option (c, arg)
6183 int c;
6184 char *arg;
6185 {
6186
6187 switch (c)
6188 {
6189 /* Switches from the Intel assembler. */
6190 case 'm':
6191 if (strcmp (arg, "ilp64") == 0
6192 || strcmp (arg, "lp64") == 0
6193 || strcmp (arg, "p64") == 0)
6194 {
6195 md.flags |= EF_IA_64_ABI64;
6196 }
6197 else if (strcmp (arg, "ilp32") == 0)
6198 {
6199 md.flags &= ~EF_IA_64_ABI64;
6200 }
6201 else if (strcmp (arg, "le") == 0)
6202 {
6203 md.flags &= ~EF_IA_64_BE;
6204 }
6205 else if (strcmp (arg, "be") == 0)
6206 {
6207 md.flags |= EF_IA_64_BE;
6208 }
6209 else
6210 return 0;
6211 break;
6212
6213 case 'N':
6214 if (strcmp (arg, "so") == 0)
6215 {
6216 /* Suppress signon message. */
6217 }
6218 else if (strcmp (arg, "pi") == 0)
6219 {
6220 /* Reject privileged instructions. FIXME */
6221 }
6222 else if (strcmp (arg, "us") == 0)
6223 {
6224 /* Allow union of signed and unsigned range. FIXME */
6225 }
6226 else if (strcmp (arg, "close_fcalls") == 0)
6227 {
6228 /* Do not resolve global function calls. */
6229 }
6230 else
6231 return 0;
6232 break;
6233
6234 case 'C':
6235 /* temp[="prefix"] Insert temporary labels into the object file
6236 symbol table prefixed by "prefix".
6237 Default prefix is ":temp:".
6238 */
6239 break;
6240
6241 case 'a':
6242 /* indirect=<tgt> Assume unannotated indirect branches behavior
6243 according to <tgt> --
6244 exit: branch out from the current context (default)
6245 labels: all labels in context may be branch targets
6246 */
6247 if (strncmp (arg, "indirect=", 9) != 0)
6248 return 0;
6249 break;
6250
6251 case 'x':
6252 /* -X conflicts with an ignored option, use -x instead */
6253 md.detect_dv = 1;
6254 if (!arg || strcmp (arg, "explicit") == 0)
6255 {
6256 /* set default mode to explicit */
6257 md.default_explicit_mode = 1;
6258 break;
6259 }
6260 else if (strcmp (arg, "auto") == 0)
6261 {
6262 md.default_explicit_mode = 0;
6263 }
6264 else if (strcmp (arg, "debug") == 0)
6265 {
6266 md.debug_dv = 1;
6267 }
6268 else if (strcmp (arg, "debugx") == 0)
6269 {
6270 md.default_explicit_mode = 1;
6271 md.debug_dv = 1;
6272 }
6273 else
6274 {
6275 as_bad (_("Unrecognized option '-x%s'"), arg);
6276 }
6277 break;
6278
6279 case 'S':
6280 /* nops Print nops statistics. */
6281 break;
6282
6283 /* GNU specific switches for gcc. */
6284 case OPTION_MCONSTANT_GP:
6285 md.flags |= EF_IA_64_CONS_GP;
6286 break;
6287
6288 case OPTION_MAUTO_PIC:
6289 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
6290 break;
6291
6292 default:
6293 return 0;
6294 }
6295
6296 return 1;
6297 }
6298
6299 void
6300 md_show_usage (stream)
6301 FILE *stream;
6302 {
6303 fputs (_("\
6304 IA-64 options:\n\
6305 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
6306 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
6307 -x | -xexplicit turn on dependency violation checking (default)\n\
6308 -xauto automagically remove dependency violations\n\
6309 -xdebug debug dependency violation checker\n"),
6310 stream);
6311 }
6312
6313 /* Return true if TYPE fits in TEMPL at SLOT. */
6314
6315 static int
6316 match (int templ, int type, int slot)
6317 {
6318 enum ia64_unit unit;
6319 int result;
6320
6321 unit = ia64_templ_desc[templ].exec_unit[slot];
6322 switch (type)
6323 {
6324 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
6325 case IA64_TYPE_A:
6326 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
6327 break;
6328 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
6329 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
6330 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
6331 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
6332 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
6333 default: result = 0; break;
6334 }
6335 return result;
6336 }
6337
6338 /* Add a bit of extra goodness if a nop of type F or B would fit
6339 in TEMPL at SLOT. */
6340
6341 static inline int
6342 extra_goodness (int templ, int slot)
6343 {
6344 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
6345 return 2;
6346 if (slot == 2 && match (templ, IA64_TYPE_B, slot))
6347 return 1;
6348 return 0;
6349 }
6350
6351 /* This function is called once, at assembler startup time. It sets
6352 up all the tables, etc. that the MD part of the assembler will need
6353 that can be determined before arguments are parsed. */
6354 void
6355 md_begin ()
6356 {
6357 int i, j, k, t, total, ar_base, cr_base, goodness, best, regnum, ok;
6358 const char *err;
6359 char name[8];
6360
6361 md.auto_align = 1;
6362 md.explicit_mode = md.default_explicit_mode;
6363
6364 bfd_set_section_alignment (stdoutput, text_section, 4);
6365
6366 target_big_endian = TARGET_BYTES_BIG_ENDIAN;
6367 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
6368 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
6369 &zero_address_frag);
6370
6371 pseudo_func[FUNC_GP_RELATIVE].u.sym =
6372 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
6373 &zero_address_frag);
6374
6375 pseudo_func[FUNC_LT_RELATIVE].u.sym =
6376 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
6377 &zero_address_frag);
6378
6379 pseudo_func[FUNC_PC_RELATIVE].u.sym =
6380 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
6381 &zero_address_frag);
6382
6383 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
6384 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
6385 &zero_address_frag);
6386
6387 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
6388 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
6389 &zero_address_frag);
6390
6391 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
6392 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
6393 &zero_address_frag);
6394
6395 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
6396 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
6397 &zero_address_frag);
6398
6399 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
6400 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
6401 &zero_address_frag);
6402
6403 pseudo_func[FUNC_IPLT_RELOC].u.sym =
6404 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
6405 &zero_address_frag);
6406
6407 /* Compute the table of best templates. We compute goodness as a
6408 base 4 value, in which each match counts for 3, each F counts
6409 for 2, each B counts for 1. This should maximize the number of
6410 F and B nops in the chosen bundles, which is good because these
6411 pipelines are least likely to be overcommitted. */
6412 for (i = 0; i < IA64_NUM_TYPES; ++i)
6413 for (j = 0; j < IA64_NUM_TYPES; ++j)
6414 for (k = 0; k < IA64_NUM_TYPES; ++k)
6415 {
6416 best = 0;
6417 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
6418 {
6419 goodness = 0;
6420 if (match (t, i, 0))
6421 {
6422 if (match (t, j, 1))
6423 {
6424 if (match (t, k, 2))
6425 goodness = 3 + 3 + 3;
6426 else
6427 goodness = 3 + 3 + extra_goodness (t, 2);
6428 }
6429 else if (match (t, j, 2))
6430 goodness = 3 + 3 + extra_goodness (t, 1);
6431 else
6432 {
6433 goodness = 3;
6434 goodness += extra_goodness (t, 1);
6435 goodness += extra_goodness (t, 2);
6436 }
6437 }
6438 else if (match (t, i, 1))
6439 {
6440 if (match (t, j, 2))
6441 goodness = 3 + 3;
6442 else
6443 goodness = 3 + extra_goodness (t, 2);
6444 }
6445 else if (match (t, i, 2))
6446 goodness = 3 + extra_goodness (t, 1);
6447
6448 if (goodness > best)
6449 {
6450 best = goodness;
6451 best_template[i][j][k] = t;
6452 }
6453 }
6454 }
6455
6456 for (i = 0; i < NUM_SLOTS; ++i)
6457 md.slot[i].user_template = -1;
6458
6459 md.pseudo_hash = hash_new ();
6460 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
6461 {
6462 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
6463 (void *) (pseudo_opcode + i));
6464 if (err)
6465 as_fatal ("ia64.md_begin: can't hash `%s': %s",
6466 pseudo_opcode[i].name, err);
6467 }
6468
6469 md.reg_hash = hash_new ();
6470 md.dynreg_hash = hash_new ();
6471 md.const_hash = hash_new ();
6472 md.entry_hash = hash_new ();
6473
6474 /* general registers: */
6475
6476 total = 128;
6477 for (i = 0; i < total; ++i)
6478 {
6479 sprintf (name, "r%d", i - REG_GR);
6480 md.regsym[i] = declare_register (name, i);
6481 }
6482
6483 /* floating point registers: */
6484 total += 128;
6485 for (; i < total; ++i)
6486 {
6487 sprintf (name, "f%d", i - REG_FR);
6488 md.regsym[i] = declare_register (name, i);
6489 }
6490
6491 /* application registers: */
6492 total += 128;
6493 ar_base = i;
6494 for (; i < total; ++i)
6495 {
6496 sprintf (name, "ar%d", i - REG_AR);
6497 md.regsym[i] = declare_register (name, i);
6498 }
6499
6500 /* control registers: */
6501 total += 128;
6502 cr_base = i;
6503 for (; i < total; ++i)
6504 {
6505 sprintf (name, "cr%d", i - REG_CR);
6506 md.regsym[i] = declare_register (name, i);
6507 }
6508
6509 /* predicate registers: */
6510 total += 64;
6511 for (; i < total; ++i)
6512 {
6513 sprintf (name, "p%d", i - REG_P);
6514 md.regsym[i] = declare_register (name, i);
6515 }
6516
6517 /* branch registers: */
6518 total += 8;
6519 for (; i < total; ++i)
6520 {
6521 sprintf (name, "b%d", i - REG_BR);
6522 md.regsym[i] = declare_register (name, i);
6523 }
6524
6525 md.regsym[REG_IP] = declare_register ("ip", REG_IP);
6526 md.regsym[REG_CFM] = declare_register ("cfm", REG_CFM);
6527 md.regsym[REG_PR] = declare_register ("pr", REG_PR);
6528 md.regsym[REG_PR_ROT] = declare_register ("pr.rot", REG_PR_ROT);
6529 md.regsym[REG_PSR] = declare_register ("psr", REG_PSR);
6530 md.regsym[REG_PSR_L] = declare_register ("psr.l", REG_PSR_L);
6531 md.regsym[REG_PSR_UM] = declare_register ("psr.um", REG_PSR_UM);
6532
6533 for (i = 0; i < NELEMS (indirect_reg); ++i)
6534 {
6535 regnum = indirect_reg[i].regnum;
6536 md.regsym[regnum] = declare_register (indirect_reg[i].name, regnum);
6537 }
6538
6539 /* define synonyms for application registers: */
6540 for (i = REG_AR; i < REG_AR + NELEMS (ar); ++i)
6541 md.regsym[i] = declare_register (ar[i - REG_AR].name,
6542 REG_AR + ar[i - REG_AR].regnum);
6543
6544 /* define synonyms for control registers: */
6545 for (i = REG_CR; i < REG_CR + NELEMS (cr); ++i)
6546 md.regsym[i] = declare_register (cr[i - REG_CR].name,
6547 REG_CR + cr[i - REG_CR].regnum);
6548
6549 declare_register ("gp", REG_GR + 1);
6550 declare_register ("sp", REG_GR + 12);
6551 declare_register ("rp", REG_BR + 0);
6552
6553 /* pseudo-registers used to specify unwind info: */
6554 declare_register ("psp", REG_PSP);
6555
6556 declare_register_set ("ret", 4, REG_GR + 8);
6557 declare_register_set ("farg", 8, REG_FR + 8);
6558 declare_register_set ("fret", 8, REG_FR + 8);
6559
6560 for (i = 0; i < NELEMS (const_bits); ++i)
6561 {
6562 err = hash_insert (md.const_hash, const_bits[i].name,
6563 (PTR) (const_bits + i));
6564 if (err)
6565 as_fatal ("Inserting \"%s\" into constant hash table failed: %s",
6566 name, err);
6567 }
6568
6569 /* Set the architecture and machine depending on defaults and command line
6570 options. */
6571 if (md.flags & EF_IA_64_ABI64)
6572 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
6573 else
6574 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
6575
6576 if (! ok)
6577 as_warn (_("Could not set architecture and machine"));
6578
6579 /* Set the pointer size and pointer shift size depending on md.flags */
6580
6581 if (md.flags & EF_IA_64_ABI64)
6582 {
6583 md.pointer_size = 8; /* pointers are 8 bytes */
6584 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
6585 }
6586 else
6587 {
6588 md.pointer_size = 4; /* pointers are 4 bytes */
6589 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
6590 }
6591
6592 md.mem_offset.hint = 0;
6593 md.path = 0;
6594 md.maxpaths = 0;
6595 md.entry_labels = NULL;
6596 }
6597
6598 /* Set the elf type to 64 bit ABI by default. Cannot do this in md_begin
6599 because that is called after md_parse_option which is where we do the
6600 dynamic changing of md.flags based on -mlp64 or -milp32. Also, set the
6601 default endianness. */
6602
6603 void
6604 ia64_init (argc, argv)
6605 int argc ATTRIBUTE_UNUSED;
6606 char **argv ATTRIBUTE_UNUSED;
6607 {
6608 md.flags = EF_IA_64_ABI64;
6609 if (TARGET_BYTES_BIG_ENDIAN)
6610 md.flags |= EF_IA_64_BE;
6611 }
6612
6613 /* Return a string for the target object file format. */
6614
6615 const char *
6616 ia64_target_format ()
6617 {
6618 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
6619 {
6620 if (md.flags & EF_IA_64_BE)
6621 {
6622 if (md.flags & EF_IA_64_ABI64)
6623 #ifdef TE_AIX50
6624 return "elf64-ia64-aix-big";
6625 #else
6626 return "elf64-ia64-big";
6627 #endif
6628 else
6629 #ifdef TE_AIX50
6630 return "elf32-ia64-aix-big";
6631 #else
6632 return "elf32-ia64-big";
6633 #endif
6634 }
6635 else
6636 {
6637 if (md.flags & EF_IA_64_ABI64)
6638 #ifdef TE_AIX50
6639 return "elf64-ia64-aix-little";
6640 #else
6641 return "elf64-ia64-little";
6642 #endif
6643 else
6644 #ifdef TE_AIX50
6645 return "elf32-ia64-aix-little";
6646 #else
6647 return "elf32-ia64-little";
6648 #endif
6649 }
6650 }
6651 else
6652 return "unknown-format";
6653 }
6654
6655 void
6656 ia64_end_of_source ()
6657 {
6658 /* terminate insn group upon reaching end of file: */
6659 insn_group_break (1, 0, 0);
6660
6661 /* emits slots we haven't written yet: */
6662 ia64_flush_insns ();
6663
6664 bfd_set_private_flags (stdoutput, md.flags);
6665
6666 md.mem_offset.hint = 0;
6667 }
6668
6669 void
6670 ia64_start_line ()
6671 {
6672 if (md.qp.X_op == O_register)
6673 as_bad ("qualifying predicate not followed by instruction");
6674 md.qp.X_op = O_absent;
6675
6676 if (ignore_input ())
6677 return;
6678
6679 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
6680 {
6681 if (md.detect_dv && !md.explicit_mode)
6682 as_warn (_("Explicit stops are ignored in auto mode"));
6683 else
6684 insn_group_break (1, 0, 0);
6685 }
6686 }
6687
6688 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
6689 labels. */
6690 static int defining_tag = 0;
6691
6692 int
6693 ia64_unrecognized_line (ch)
6694 int ch;
6695 {
6696 switch (ch)
6697 {
6698 case '(':
6699 expression (&md.qp);
6700 if (*input_line_pointer++ != ')')
6701 {
6702 as_bad ("Expected ')'");
6703 return 0;
6704 }
6705 if (md.qp.X_op != O_register)
6706 {
6707 as_bad ("Qualifying predicate expected");
6708 return 0;
6709 }
6710 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
6711 {
6712 as_bad ("Predicate register expected");
6713 return 0;
6714 }
6715 return 1;
6716
6717 case '{':
6718 if (md.manual_bundling)
6719 as_warn ("Found '{' when manual bundling is already turned on");
6720 else
6721 CURR_SLOT.manual_bundling_on = 1;
6722 md.manual_bundling = 1;
6723
6724 /* Bundling is only acceptable in explicit mode
6725 or when in default automatic mode. */
6726 if (md.detect_dv && !md.explicit_mode)
6727 {
6728 if (!md.mode_explicitly_set
6729 && !md.default_explicit_mode)
6730 dot_dv_mode ('E');
6731 else
6732 as_warn (_("Found '{' after explicit switch to automatic mode"));
6733 }
6734 return 1;
6735
6736 case '}':
6737 if (!md.manual_bundling)
6738 as_warn ("Found '}' when manual bundling is off");
6739 else
6740 PREV_SLOT.manual_bundling_off = 1;
6741 md.manual_bundling = 0;
6742
6743 /* switch back to automatic mode, if applicable */
6744 if (md.detect_dv
6745 && md.explicit_mode
6746 && !md.mode_explicitly_set
6747 && !md.default_explicit_mode)
6748 dot_dv_mode ('A');
6749
6750 /* Allow '{' to follow on the same line. We also allow ";;", but that
6751 happens automatically because ';' is an end of line marker. */
6752 SKIP_WHITESPACE ();
6753 if (input_line_pointer[0] == '{')
6754 {
6755 input_line_pointer++;
6756 return ia64_unrecognized_line ('{');
6757 }
6758
6759 demand_empty_rest_of_line ();
6760 return 1;
6761
6762 case '[':
6763 {
6764 char *s;
6765 char c;
6766 symbolS *tag;
6767 int temp;
6768
6769 if (md.qp.X_op == O_register)
6770 {
6771 as_bad ("Tag must come before qualifying predicate.");
6772 return 0;
6773 }
6774
6775 /* This implements just enough of read_a_source_file in read.c to
6776 recognize labels. */
6777 if (is_name_beginner (*input_line_pointer))
6778 {
6779 s = input_line_pointer;
6780 c = get_symbol_end ();
6781 }
6782 else if (LOCAL_LABELS_FB
6783 && ISDIGIT (*input_line_pointer))
6784 {
6785 temp = 0;
6786 while (ISDIGIT (*input_line_pointer))
6787 temp = (temp * 10) + *input_line_pointer++ - '0';
6788 fb_label_instance_inc (temp);
6789 s = fb_label_name (temp, 0);
6790 c = *input_line_pointer;
6791 }
6792 else
6793 {
6794 s = NULL;
6795 c = '\0';
6796 }
6797 if (c != ':')
6798 {
6799 /* Put ':' back for error messages' sake. */
6800 *input_line_pointer++ = ':';
6801 as_bad ("Expected ':'");
6802 return 0;
6803 }
6804
6805 defining_tag = 1;
6806 tag = colon (s);
6807 defining_tag = 0;
6808 /* Put ':' back for error messages' sake. */
6809 *input_line_pointer++ = ':';
6810 if (*input_line_pointer++ != ']')
6811 {
6812 as_bad ("Expected ']'");
6813 return 0;
6814 }
6815 if (! tag)
6816 {
6817 as_bad ("Tag name expected");
6818 return 0;
6819 }
6820 return 1;
6821 }
6822
6823 default:
6824 break;
6825 }
6826
6827 /* Not a valid line. */
6828 return 0;
6829 }
6830
6831 void
6832 ia64_frob_label (sym)
6833 struct symbol *sym;
6834 {
6835 struct label_fix *fix;
6836
6837 /* Tags need special handling since they are not bundle breaks like
6838 labels. */
6839 if (defining_tag)
6840 {
6841 fix = obstack_alloc (&notes, sizeof (*fix));
6842 fix->sym = sym;
6843 fix->next = CURR_SLOT.tag_fixups;
6844 CURR_SLOT.tag_fixups = fix;
6845
6846 return;
6847 }
6848
6849 if (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6850 {
6851 md.last_text_seg = now_seg;
6852 fix = obstack_alloc (&notes, sizeof (*fix));
6853 fix->sym = sym;
6854 fix->next = CURR_SLOT.label_fixups;
6855 CURR_SLOT.label_fixups = fix;
6856
6857 /* Keep track of how many code entry points we've seen. */
6858 if (md.path == md.maxpaths)
6859 {
6860 md.maxpaths += 20;
6861 md.entry_labels = (const char **)
6862 xrealloc ((void *) md.entry_labels,
6863 md.maxpaths * sizeof (char *));
6864 }
6865 md.entry_labels[md.path++] = S_GET_NAME (sym);
6866 }
6867 }
6868
6869 void
6870 ia64_flush_pending_output ()
6871 {
6872 if (!md.keep_pending_output
6873 && bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE)
6874 {
6875 /* ??? This causes many unnecessary stop bits to be emitted.
6876 Unfortunately, it isn't clear if it is safe to remove this. */
6877 insn_group_break (1, 0, 0);
6878 ia64_flush_insns ();
6879 }
6880 }
6881
6882 /* Do ia64-specific expression optimization. All that's done here is
6883 to transform index expressions that are either due to the indexing
6884 of rotating registers or due to the indexing of indirect register
6885 sets. */
6886 int
6887 ia64_optimize_expr (l, op, r)
6888 expressionS *l;
6889 operatorT op;
6890 expressionS *r;
6891 {
6892 unsigned num_regs;
6893
6894 if (op == O_index)
6895 {
6896 if (l->X_op == O_register && r->X_op == O_constant)
6897 {
6898 num_regs = (l->X_add_number >> 16);
6899 if ((unsigned) r->X_add_number >= num_regs)
6900 {
6901 if (!num_regs)
6902 as_bad ("No current frame");
6903 else
6904 as_bad ("Index out of range 0..%u", num_regs - 1);
6905 r->X_add_number = 0;
6906 }
6907 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
6908 return 1;
6909 }
6910 else if (l->X_op == O_register && r->X_op == O_register)
6911 {
6912 if (l->X_add_number < IND_CPUID || l->X_add_number > IND_RR
6913 || l->X_add_number == IND_MEM)
6914 {
6915 as_bad ("Indirect register set name expected");
6916 l->X_add_number = IND_CPUID;
6917 }
6918 l->X_op = O_index;
6919 l->X_op_symbol = md.regsym[l->X_add_number];
6920 l->X_add_number = r->X_add_number;
6921 return 1;
6922 }
6923 }
6924 return 0;
6925 }
6926
6927 int
6928 ia64_parse_name (name, e)
6929 char *name;
6930 expressionS *e;
6931 {
6932 struct const_desc *cdesc;
6933 struct dynreg *dr = 0;
6934 unsigned int regnum;
6935 struct symbol *sym;
6936 char *end;
6937
6938 /* first see if NAME is a known register name: */
6939 sym = hash_find (md.reg_hash, name);
6940 if (sym)
6941 {
6942 e->X_op = O_register;
6943 e->X_add_number = S_GET_VALUE (sym);
6944 return 1;
6945 }
6946
6947 cdesc = hash_find (md.const_hash, name);
6948 if (cdesc)
6949 {
6950 e->X_op = O_constant;
6951 e->X_add_number = cdesc->value;
6952 return 1;
6953 }
6954
6955 /* check for inN, locN, or outN: */
6956 switch (name[0])
6957 {
6958 case 'i':
6959 if (name[1] == 'n' && ISDIGIT (name[2]))
6960 {
6961 dr = &md.in;
6962 name += 2;
6963 }
6964 break;
6965
6966 case 'l':
6967 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
6968 {
6969 dr = &md.loc;
6970 name += 3;
6971 }
6972 break;
6973
6974 case 'o':
6975 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
6976 {
6977 dr = &md.out;
6978 name += 3;
6979 }
6980 break;
6981
6982 default:
6983 break;
6984 }
6985
6986 if (dr)
6987 {
6988 /* The name is inN, locN, or outN; parse the register number. */
6989 regnum = strtoul (name, &end, 10);
6990 if (end > name && *end == '\0')
6991 {
6992 if ((unsigned) regnum >= dr->num_regs)
6993 {
6994 if (!dr->num_regs)
6995 as_bad ("No current frame");
6996 else
6997 as_bad ("Register number out of range 0..%u",
6998 dr->num_regs - 1);
6999 regnum = 0;
7000 }
7001 e->X_op = O_register;
7002 e->X_add_number = dr->base + regnum;
7003 return 1;
7004 }
7005 }
7006
7007 if ((dr = hash_find (md.dynreg_hash, name)))
7008 {
7009 /* We've got ourselves the name of a rotating register set.
7010 Store the base register number in the low 16 bits of
7011 X_add_number and the size of the register set in the top 16
7012 bits. */
7013 e->X_op = O_register;
7014 e->X_add_number = dr->base | (dr->num_regs << 16);
7015 return 1;
7016 }
7017 return 0;
7018 }
7019
7020 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
7021
7022 char *
7023 ia64_canonicalize_symbol_name (name)
7024 char *name;
7025 {
7026 size_t len = strlen (name);
7027 if (len > 1 && name[len - 1] == '#')
7028 name[len - 1] = '\0';
7029 return name;
7030 }
7031
7032 /* Return true if idesc is a conditional branch instruction. This excludes
7033 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
7034 because they always read/write resources regardless of the value of the
7035 qualifying predicate. br.ia must always use p0, and hence is always
7036 taken. Thus this function returns true for branches which can fall
7037 through, and which use no resources if they do fall through. */
7038
7039 static int
7040 is_conditional_branch (idesc)
7041 struct ia64_opcode *idesc;
7042 {
7043 /* br is a conditional branch. Everything that starts with br. except
7044 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
7045 Everything that starts with brl is a conditional branch. */
7046 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
7047 && (idesc->name[2] == '\0'
7048 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
7049 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
7050 || idesc->name[2] == 'l'
7051 /* br.cond, br.call, br.clr */
7052 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
7053 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
7054 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
7055 }
7056
7057 /* Return whether the given opcode is a taken branch. If there's any doubt,
7058 returns zero. */
7059
7060 static int
7061 is_taken_branch (idesc)
7062 struct ia64_opcode *idesc;
7063 {
7064 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
7065 || strncmp (idesc->name, "br.ia", 5) == 0);
7066 }
7067
7068 /* Return whether the given opcode is an interruption or rfi. If there's any
7069 doubt, returns zero. */
7070
7071 static int
7072 is_interruption_or_rfi (idesc)
7073 struct ia64_opcode *idesc;
7074 {
7075 if (strcmp (idesc->name, "rfi") == 0)
7076 return 1;
7077 return 0;
7078 }
7079
7080 /* Returns the index of the given dependency in the opcode's list of chks, or
7081 -1 if there is no dependency. */
7082
7083 static int
7084 depends_on (depind, idesc)
7085 int depind;
7086 struct ia64_opcode *idesc;
7087 {
7088 int i;
7089 const struct ia64_opcode_dependency *dep = idesc->dependencies;
7090 for (i = 0; i < dep->nchks; i++)
7091 {
7092 if (depind == DEP (dep->chks[i]))
7093 return i;
7094 }
7095 return -1;
7096 }
7097
7098 /* Determine a set of specific resources used for a particular resource
7099 class. Returns the number of specific resources identified For those
7100 cases which are not determinable statically, the resource returned is
7101 marked nonspecific.
7102
7103 Meanings of value in 'NOTE':
7104 1) only read/write when the register number is explicitly encoded in the
7105 insn.
7106 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
7107 accesses CFM when qualifying predicate is in the rotating region.
7108 3) general register value is used to specify an indirect register; not
7109 determinable statically.
7110 4) only read the given resource when bits 7:0 of the indirect index
7111 register value does not match the register number of the resource; not
7112 determinable statically.
7113 5) all rules are implementation specific.
7114 6) only when both the index specified by the reader and the index specified
7115 by the writer have the same value in bits 63:61; not determinable
7116 statically.
7117 7) only access the specified resource when the corresponding mask bit is
7118 set
7119 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
7120 only read when these insns reference FR2-31
7121 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
7122 written when these insns write FR32-127
7123 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
7124 instruction
7125 11) The target predicates are written independently of PR[qp], but source
7126 registers are only read if PR[qp] is true. Since the state of PR[qp]
7127 cannot statically be determined, all source registers are marked used.
7128 12) This insn only reads the specified predicate register when that
7129 register is the PR[qp].
7130 13) This reference to ld-c only applies to teh GR whose value is loaded
7131 with data returned from memory, not the post-incremented address register.
7132 14) The RSE resource includes the implementation-specific RSE internal
7133 state resources. At least one (and possibly more) of these resources are
7134 read by each instruction listed in IC:rse-readers. At least one (and
7135 possibly more) of these resources are written by each insn listed in
7136 IC:rse-writers.
7137 15+16) Represents reserved instructions, which the assembler does not
7138 generate.
7139
7140 Memory resources (i.e. locations in memory) are *not* marked or tracked by
7141 this code; there are no dependency violations based on memory access.
7142 */
7143
7144 #define MAX_SPECS 256
7145 #define DV_CHK 1
7146 #define DV_REG 0
7147
7148 static int
7149 specify_resource (dep, idesc, type, specs, note, path)
7150 const struct ia64_dependency *dep;
7151 struct ia64_opcode *idesc;
7152 int type; /* is this a DV chk or a DV reg? */
7153 struct rsrc specs[MAX_SPECS]; /* returned specific resources */
7154 int note; /* resource note for this insn's usage */
7155 int path; /* which execution path to examine */
7156 {
7157 int count = 0;
7158 int i;
7159 int rsrc_write = 0;
7160 struct rsrc tmpl;
7161
7162 if (dep->mode == IA64_DV_WAW
7163 || (dep->mode == IA64_DV_RAW && type == DV_REG)
7164 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
7165 rsrc_write = 1;
7166
7167 /* template for any resources we identify */
7168 tmpl.dependency = dep;
7169 tmpl.note = note;
7170 tmpl.insn_srlz = tmpl.data_srlz = 0;
7171 tmpl.qp_regno = CURR_SLOT.qp_regno;
7172 tmpl.link_to_qp_branch = 1;
7173 tmpl.mem_offset.hint = 0;
7174 tmpl.specific = 1;
7175 tmpl.index = 0;
7176 tmpl.cmp_type = CMP_NONE;
7177
7178 #define UNHANDLED \
7179 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
7180 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
7181 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
7182
7183 /* we don't need to track these */
7184 if (dep->semantics == IA64_DVS_NONE)
7185 return 0;
7186
7187 switch (dep->specifier)
7188 {
7189 case IA64_RS_AR_K:
7190 if (note == 1)
7191 {
7192 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7193 {
7194 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7195 if (regno >= 0 && regno <= 7)
7196 {
7197 specs[count] = tmpl;
7198 specs[count++].index = regno;
7199 }
7200 }
7201 }
7202 else if (note == 0)
7203 {
7204 for (i = 0; i < 8; i++)
7205 {
7206 specs[count] = tmpl;
7207 specs[count++].index = i;
7208 }
7209 }
7210 else
7211 {
7212 UNHANDLED;
7213 }
7214 break;
7215
7216 case IA64_RS_AR_UNAT:
7217 /* This is a mov =AR or mov AR= instruction. */
7218 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7219 {
7220 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7221 if (regno == AR_UNAT)
7222 {
7223 specs[count++] = tmpl;
7224 }
7225 }
7226 else
7227 {
7228 /* This is a spill/fill, or other instruction that modifies the
7229 unat register. */
7230
7231 /* Unless we can determine the specific bits used, mark the whole
7232 thing; bits 8:3 of the memory address indicate the bit used in
7233 UNAT. The .mem.offset hint may be used to eliminate a small
7234 subset of conflicts. */
7235 specs[count] = tmpl;
7236 if (md.mem_offset.hint)
7237 {
7238 if (md.debug_dv)
7239 fprintf (stderr, " Using hint for spill/fill\n");
7240 /* The index isn't actually used, just set it to something
7241 approximating the bit index. */
7242 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
7243 specs[count].mem_offset.hint = 1;
7244 specs[count].mem_offset.offset = md.mem_offset.offset;
7245 specs[count++].mem_offset.base = md.mem_offset.base;
7246 }
7247 else
7248 {
7249 specs[count++].specific = 0;
7250 }
7251 }
7252 break;
7253
7254 case IA64_RS_AR:
7255 if (note == 1)
7256 {
7257 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7258 {
7259 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7260 if ((regno >= 8 && regno <= 15)
7261 || (regno >= 20 && regno <= 23)
7262 || (regno >= 31 && regno <= 39)
7263 || (regno >= 41 && regno <= 47)
7264 || (regno >= 67 && regno <= 111))
7265 {
7266 specs[count] = tmpl;
7267 specs[count++].index = regno;
7268 }
7269 }
7270 }
7271 else
7272 {
7273 UNHANDLED;
7274 }
7275 break;
7276
7277 case IA64_RS_ARb:
7278 if (note == 1)
7279 {
7280 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
7281 {
7282 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
7283 if ((regno >= 48 && regno <= 63)
7284 || (regno >= 112 && regno <= 127))
7285 {
7286 specs[count] = tmpl;
7287 specs[count++].index = regno;
7288 }
7289 }
7290 }
7291 else if (note == 0)
7292 {
7293 for (i = 48; i < 64; i++)
7294 {
7295 specs[count] = tmpl;
7296 specs[count++].index = i;
7297 }
7298 for (i = 112; i < 128; i++)
7299 {
7300 specs[count] = tmpl;
7301 specs[count++].index = i;
7302 }
7303 }
7304 else
7305 {
7306 UNHANDLED;
7307 }
7308 break;
7309
7310 case IA64_RS_BR:
7311 if (note != 1)
7312 {
7313 UNHANDLED;
7314 }
7315 else
7316 {
7317 if (rsrc_write)
7318 {
7319 for (i = 0; i < idesc->num_outputs; i++)
7320 if (idesc->operands[i] == IA64_OPND_B1
7321 || idesc->operands[i] == IA64_OPND_B2)
7322 {
7323 specs[count] = tmpl;
7324 specs[count++].index =
7325 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7326 }
7327 }
7328 else
7329 {
7330 for (i = idesc->num_outputs;i < NELEMS (idesc->operands); i++)
7331 if (idesc->operands[i] == IA64_OPND_B1
7332 || idesc->operands[i] == IA64_OPND_B2)
7333 {
7334 specs[count] = tmpl;
7335 specs[count++].index =
7336 CURR_SLOT.opnd[i].X_add_number - REG_BR;
7337 }
7338 }
7339 }
7340 break;
7341
7342 case IA64_RS_CPUID: /* four or more registers */
7343 if (note == 3)
7344 {
7345 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
7346 {
7347 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7348 if (regno >= 0 && regno < NELEMS (gr_values)
7349 && KNOWN (regno))
7350 {
7351 specs[count] = tmpl;
7352 specs[count++].index = gr_values[regno].value & 0xFF;
7353 }
7354 else
7355 {
7356 specs[count] = tmpl;
7357 specs[count++].specific = 0;
7358 }
7359 }
7360 }
7361 else
7362 {
7363 UNHANDLED;
7364 }
7365 break;
7366
7367 case IA64_RS_DBR: /* four or more registers */
7368 if (note == 3)
7369 {
7370 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
7371 {
7372 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7373 if (regno >= 0 && regno < NELEMS (gr_values)
7374 && KNOWN (regno))
7375 {
7376 specs[count] = tmpl;
7377 specs[count++].index = gr_values[regno].value & 0xFF;
7378 }
7379 else
7380 {
7381 specs[count] = tmpl;
7382 specs[count++].specific = 0;
7383 }
7384 }
7385 }
7386 else if (note == 0 && !rsrc_write)
7387 {
7388 specs[count] = tmpl;
7389 specs[count++].specific = 0;
7390 }
7391 else
7392 {
7393 UNHANDLED;
7394 }
7395 break;
7396
7397 case IA64_RS_IBR: /* four or more registers */
7398 if (note == 3)
7399 {
7400 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
7401 {
7402 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7403 if (regno >= 0 && regno < NELEMS (gr_values)
7404 && KNOWN (regno))
7405 {
7406 specs[count] = tmpl;
7407 specs[count++].index = gr_values[regno].value & 0xFF;
7408 }
7409 else
7410 {
7411 specs[count] = tmpl;
7412 specs[count++].specific = 0;
7413 }
7414 }
7415 }
7416 else
7417 {
7418 UNHANDLED;
7419 }
7420 break;
7421
7422 case IA64_RS_MSR:
7423 if (note == 5)
7424 {
7425 /* These are implementation specific. Force all references to
7426 conflict with all other references. */
7427 specs[count] = tmpl;
7428 specs[count++].specific = 0;
7429 }
7430 else
7431 {
7432 UNHANDLED;
7433 }
7434 break;
7435
7436 case IA64_RS_PKR: /* 16 or more registers */
7437 if (note == 3 || note == 4)
7438 {
7439 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
7440 {
7441 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7442 if (regno >= 0 && regno < NELEMS (gr_values)
7443 && KNOWN (regno))
7444 {
7445 if (note == 3)
7446 {
7447 specs[count] = tmpl;
7448 specs[count++].index = gr_values[regno].value & 0xFF;
7449 }
7450 else
7451 for (i = 0; i < NELEMS (gr_values); i++)
7452 {
7453 /* Uses all registers *except* the one in R3. */
7454 if ((unsigned)i != (gr_values[regno].value & 0xFF))
7455 {
7456 specs[count] = tmpl;
7457 specs[count++].index = i;
7458 }
7459 }
7460 }
7461 else
7462 {
7463 specs[count] = tmpl;
7464 specs[count++].specific = 0;
7465 }
7466 }
7467 }
7468 else if (note == 0)
7469 {
7470 /* probe et al. */
7471 specs[count] = tmpl;
7472 specs[count++].specific = 0;
7473 }
7474 break;
7475
7476 case IA64_RS_PMC: /* four or more registers */
7477 if (note == 3)
7478 {
7479 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
7480 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
7481
7482 {
7483 int index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
7484 ? 1 : !rsrc_write);
7485 int regno = CURR_SLOT.opnd[index].X_add_number - REG_GR;
7486 if (regno >= 0 && regno < NELEMS (gr_values)
7487 && KNOWN (regno))
7488 {
7489 specs[count] = tmpl;
7490 specs[count++].index = gr_values[regno].value & 0xFF;
7491 }
7492 else
7493 {
7494 specs[count] = tmpl;
7495 specs[count++].specific = 0;
7496 }
7497 }
7498 }
7499 else
7500 {
7501 UNHANDLED;
7502 }
7503 break;
7504
7505 case IA64_RS_PMD: /* four or more registers */
7506 if (note == 3)
7507 {
7508 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
7509 {
7510 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7511 if (regno >= 0 && regno < NELEMS (gr_values)
7512 && KNOWN (regno))
7513 {
7514 specs[count] = tmpl;
7515 specs[count++].index = gr_values[regno].value & 0xFF;
7516 }
7517 else
7518 {
7519 specs[count] = tmpl;
7520 specs[count++].specific = 0;
7521 }
7522 }
7523 }
7524 else
7525 {
7526 UNHANDLED;
7527 }
7528 break;
7529
7530 case IA64_RS_RR: /* eight registers */
7531 if (note == 6)
7532 {
7533 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
7534 {
7535 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
7536 if (regno >= 0 && regno < NELEMS (gr_values)
7537 && KNOWN (regno))
7538 {
7539 specs[count] = tmpl;
7540 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
7541 }
7542 else
7543 {
7544 specs[count] = tmpl;
7545 specs[count++].specific = 0;
7546 }
7547 }
7548 }
7549 else if (note == 0 && !rsrc_write)
7550 {
7551 specs[count] = tmpl;
7552 specs[count++].specific = 0;
7553 }
7554 else
7555 {
7556 UNHANDLED;
7557 }
7558 break;
7559
7560 case IA64_RS_CR_IRR:
7561 if (note == 0)
7562 {
7563 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
7564 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
7565 if (rsrc_write
7566 && idesc->operands[1] == IA64_OPND_CR3
7567 && regno == CR_IVR)
7568 {
7569 for (i = 0; i < 4; i++)
7570 {
7571 specs[count] = tmpl;
7572 specs[count++].index = CR_IRR0 + i;
7573 }
7574 }
7575 }
7576 else if (note == 1)
7577 {
7578 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7579 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7580 && regno >= CR_IRR0
7581 && regno <= CR_IRR3)
7582 {
7583 specs[count] = tmpl;
7584 specs[count++].index = regno;
7585 }
7586 }
7587 else
7588 {
7589 UNHANDLED;
7590 }
7591 break;
7592
7593 case IA64_RS_CR_LRR:
7594 if (note != 1)
7595 {
7596 UNHANDLED;
7597 }
7598 else
7599 {
7600 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7601 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
7602 && (regno == CR_LRR0 || regno == CR_LRR1))
7603 {
7604 specs[count] = tmpl;
7605 specs[count++].index = regno;
7606 }
7607 }
7608 break;
7609
7610 case IA64_RS_CR:
7611 if (note == 1)
7612 {
7613 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
7614 {
7615 specs[count] = tmpl;
7616 specs[count++].index =
7617 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
7618 }
7619 }
7620 else
7621 {
7622 UNHANDLED;
7623 }
7624 break;
7625
7626 case IA64_RS_FR:
7627 case IA64_RS_FRb:
7628 if (note != 1)
7629 {
7630 UNHANDLED;
7631 }
7632 else if (rsrc_write)
7633 {
7634 if (dep->specifier == IA64_RS_FRb
7635 && idesc->operands[0] == IA64_OPND_F1)
7636 {
7637 specs[count] = tmpl;
7638 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
7639 }
7640 }
7641 else
7642 {
7643 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
7644 {
7645 if (idesc->operands[i] == IA64_OPND_F2
7646 || idesc->operands[i] == IA64_OPND_F3
7647 || idesc->operands[i] == IA64_OPND_F4)
7648 {
7649 specs[count] = tmpl;
7650 specs[count++].index =
7651 CURR_SLOT.opnd[i].X_add_number - REG_FR;
7652 }
7653 }
7654 }
7655 break;
7656
7657 case IA64_RS_GR:
7658 if (note == 13)
7659 {
7660 /* This reference applies only to the GR whose value is loaded with
7661 data returned from memory. */
7662 specs[count] = tmpl;
7663 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
7664 }
7665 else if (note == 1)
7666 {
7667 if (rsrc_write)
7668 {
7669 for (i = 0; i < idesc->num_outputs; i++)
7670 if (idesc->operands[i] == IA64_OPND_R1
7671 || idesc->operands[i] == IA64_OPND_R2
7672 || idesc->operands[i] == IA64_OPND_R3)
7673 {
7674 specs[count] = tmpl;
7675 specs[count++].index =
7676 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7677 }
7678 if (idesc->flags & IA64_OPCODE_POSTINC)
7679 for (i = 0; i < NELEMS (idesc->operands); i++)
7680 if (idesc->operands[i] == IA64_OPND_MR3)
7681 {
7682 specs[count] = tmpl;
7683 specs[count++].index =
7684 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7685 }
7686 }
7687 else
7688 {
7689 /* Look for anything that reads a GR. */
7690 for (i = 0; i < NELEMS (idesc->operands); i++)
7691 {
7692 if (idesc->operands[i] == IA64_OPND_MR3
7693 || idesc->operands[i] == IA64_OPND_CPUID_R3
7694 || idesc->operands[i] == IA64_OPND_DBR_R3
7695 || idesc->operands[i] == IA64_OPND_IBR_R3
7696 || idesc->operands[i] == IA64_OPND_MSR_R3
7697 || idesc->operands[i] == IA64_OPND_PKR_R3
7698 || idesc->operands[i] == IA64_OPND_PMC_R3
7699 || idesc->operands[i] == IA64_OPND_PMD_R3
7700 || idesc->operands[i] == IA64_OPND_RR_R3
7701 || ((i >= idesc->num_outputs)
7702 && (idesc->operands[i] == IA64_OPND_R1
7703 || idesc->operands[i] == IA64_OPND_R2
7704 || idesc->operands[i] == IA64_OPND_R3
7705 /* addl source register. */
7706 || idesc->operands[i] == IA64_OPND_R3_2)))
7707 {
7708 specs[count] = tmpl;
7709 specs[count++].index =
7710 CURR_SLOT.opnd[i].X_add_number - REG_GR;
7711 }
7712 }
7713 }
7714 }
7715 else
7716 {
7717 UNHANDLED;
7718 }
7719 break;
7720
7721 /* This is the same as IA64_RS_PRr, except that the register range is
7722 from 1 - 15, and there are no rotating register reads/writes here. */
7723 case IA64_RS_PR:
7724 if (note == 0)
7725 {
7726 for (i = 1; i < 16; i++)
7727 {
7728 specs[count] = tmpl;
7729 specs[count++].index = i;
7730 }
7731 }
7732 else if (note == 7)
7733 {
7734 valueT mask = 0;
7735 /* Mark only those registers indicated by the mask. */
7736 if (rsrc_write)
7737 {
7738 mask = CURR_SLOT.opnd[2].X_add_number;
7739 for (i = 1; i < 16; i++)
7740 if (mask & ((valueT) 1 << i))
7741 {
7742 specs[count] = tmpl;
7743 specs[count++].index = i;
7744 }
7745 }
7746 else
7747 {
7748 UNHANDLED;
7749 }
7750 }
7751 else if (note == 11) /* note 11 implies note 1 as well */
7752 {
7753 if (rsrc_write)
7754 {
7755 for (i = 0; i < idesc->num_outputs; i++)
7756 {
7757 if (idesc->operands[i] == IA64_OPND_P1
7758 || idesc->operands[i] == IA64_OPND_P2)
7759 {
7760 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7761 if (regno >= 1 && regno < 16)
7762 {
7763 specs[count] = tmpl;
7764 specs[count++].index = regno;
7765 }
7766 }
7767 }
7768 }
7769 else
7770 {
7771 UNHANDLED;
7772 }
7773 }
7774 else if (note == 12)
7775 {
7776 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7777 {
7778 specs[count] = tmpl;
7779 specs[count++].index = CURR_SLOT.qp_regno;
7780 }
7781 }
7782 else if (note == 1)
7783 {
7784 if (rsrc_write)
7785 {
7786 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7787 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7788 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
7789 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
7790
7791 if ((idesc->operands[0] == IA64_OPND_P1
7792 || idesc->operands[0] == IA64_OPND_P2)
7793 && p1 >= 1 && p1 < 16)
7794 {
7795 specs[count] = tmpl;
7796 specs[count].cmp_type =
7797 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7798 specs[count++].index = p1;
7799 }
7800 if ((idesc->operands[1] == IA64_OPND_P1
7801 || idesc->operands[1] == IA64_OPND_P2)
7802 && p2 >= 1 && p2 < 16)
7803 {
7804 specs[count] = tmpl;
7805 specs[count].cmp_type =
7806 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7807 specs[count++].index = p2;
7808 }
7809 }
7810 else
7811 {
7812 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
7813 {
7814 specs[count] = tmpl;
7815 specs[count++].index = CURR_SLOT.qp_regno;
7816 }
7817 if (idesc->operands[1] == IA64_OPND_PR)
7818 {
7819 for (i = 1; i < 16; i++)
7820 {
7821 specs[count] = tmpl;
7822 specs[count++].index = i;
7823 }
7824 }
7825 }
7826 }
7827 else
7828 {
7829 UNHANDLED;
7830 }
7831 break;
7832
7833 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
7834 simplified cases of this. */
7835 case IA64_RS_PRr:
7836 if (note == 0)
7837 {
7838 for (i = 16; i < 63; i++)
7839 {
7840 specs[count] = tmpl;
7841 specs[count++].index = i;
7842 }
7843 }
7844 else if (note == 7)
7845 {
7846 valueT mask = 0;
7847 /* Mark only those registers indicated by the mask. */
7848 if (rsrc_write
7849 && idesc->operands[0] == IA64_OPND_PR)
7850 {
7851 mask = CURR_SLOT.opnd[2].X_add_number;
7852 if (mask & ((valueT) 1<<16))
7853 for (i = 16; i < 63; i++)
7854 {
7855 specs[count] = tmpl;
7856 specs[count++].index = i;
7857 }
7858 }
7859 else if (rsrc_write
7860 && idesc->operands[0] == IA64_OPND_PR_ROT)
7861 {
7862 for (i = 16; i < 63; i++)
7863 {
7864 specs[count] = tmpl;
7865 specs[count++].index = i;
7866 }
7867 }
7868 else
7869 {
7870 UNHANDLED;
7871 }
7872 }
7873 else if (note == 11) /* note 11 implies note 1 as well */
7874 {
7875 if (rsrc_write)
7876 {
7877 for (i = 0; i < idesc->num_outputs; i++)
7878 {
7879 if (idesc->operands[i] == IA64_OPND_P1
7880 || idesc->operands[i] == IA64_OPND_P2)
7881 {
7882 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
7883 if (regno >= 16 && regno < 63)
7884 {
7885 specs[count] = tmpl;
7886 specs[count++].index = regno;
7887 }
7888 }
7889 }
7890 }
7891 else
7892 {
7893 UNHANDLED;
7894 }
7895 }
7896 else if (note == 12)
7897 {
7898 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7899 {
7900 specs[count] = tmpl;
7901 specs[count++].index = CURR_SLOT.qp_regno;
7902 }
7903 }
7904 else if (note == 1)
7905 {
7906 if (rsrc_write)
7907 {
7908 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
7909 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
7910 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
7911 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
7912
7913 if ((idesc->operands[0] == IA64_OPND_P1
7914 || idesc->operands[0] == IA64_OPND_P2)
7915 && p1 >= 16 && p1 < 63)
7916 {
7917 specs[count] = tmpl;
7918 specs[count].cmp_type =
7919 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
7920 specs[count++].index = p1;
7921 }
7922 if ((idesc->operands[1] == IA64_OPND_P1
7923 || idesc->operands[1] == IA64_OPND_P2)
7924 && p2 >= 16 && p2 < 63)
7925 {
7926 specs[count] = tmpl;
7927 specs[count].cmp_type =
7928 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
7929 specs[count++].index = p2;
7930 }
7931 }
7932 else
7933 {
7934 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
7935 {
7936 specs[count] = tmpl;
7937 specs[count++].index = CURR_SLOT.qp_regno;
7938 }
7939 if (idesc->operands[1] == IA64_OPND_PR)
7940 {
7941 for (i = 16; i < 63; i++)
7942 {
7943 specs[count] = tmpl;
7944 specs[count++].index = i;
7945 }
7946 }
7947 }
7948 }
7949 else
7950 {
7951 UNHANDLED;
7952 }
7953 break;
7954
7955 case IA64_RS_PSR:
7956 /* Verify that the instruction is using the PSR bit indicated in
7957 dep->regindex. */
7958 if (note == 0)
7959 {
7960 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
7961 {
7962 if (dep->regindex < 6)
7963 {
7964 specs[count++] = tmpl;
7965 }
7966 }
7967 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
7968 {
7969 if (dep->regindex < 32
7970 || dep->regindex == 35
7971 || dep->regindex == 36
7972 || (!rsrc_write && dep->regindex == PSR_CPL))
7973 {
7974 specs[count++] = tmpl;
7975 }
7976 }
7977 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
7978 {
7979 if (dep->regindex < 32
7980 || dep->regindex == 35
7981 || dep->regindex == 36
7982 || (rsrc_write && dep->regindex == PSR_CPL))
7983 {
7984 specs[count++] = tmpl;
7985 }
7986 }
7987 else
7988 {
7989 /* Several PSR bits have very specific dependencies. */
7990 switch (dep->regindex)
7991 {
7992 default:
7993 specs[count++] = tmpl;
7994 break;
7995 case PSR_IC:
7996 if (rsrc_write)
7997 {
7998 specs[count++] = tmpl;
7999 }
8000 else
8001 {
8002 /* Only certain CR accesses use PSR.ic */
8003 if (idesc->operands[0] == IA64_OPND_CR3
8004 || idesc->operands[1] == IA64_OPND_CR3)
8005 {
8006 int index =
8007 ((idesc->operands[0] == IA64_OPND_CR3)
8008 ? 0 : 1);
8009 int regno =
8010 CURR_SLOT.opnd[index].X_add_number - REG_CR;
8011
8012 switch (regno)
8013 {
8014 default:
8015 break;
8016 case CR_ITIR:
8017 case CR_IFS:
8018 case CR_IIM:
8019 case CR_IIP:
8020 case CR_IPSR:
8021 case CR_ISR:
8022 case CR_IFA:
8023 case CR_IHA:
8024 case CR_IIPA:
8025 specs[count++] = tmpl;
8026 break;
8027 }
8028 }
8029 }
8030 break;
8031 case PSR_CPL:
8032 if (rsrc_write)
8033 {
8034 specs[count++] = tmpl;
8035 }
8036 else
8037 {
8038 /* Only some AR accesses use cpl */
8039 if (idesc->operands[0] == IA64_OPND_AR3
8040 || idesc->operands[1] == IA64_OPND_AR3)
8041 {
8042 int index =
8043 ((idesc->operands[0] == IA64_OPND_AR3)
8044 ? 0 : 1);
8045 int regno =
8046 CURR_SLOT.opnd[index].X_add_number - REG_AR;
8047
8048 if (regno == AR_ITC
8049 || (index == 0
8050 && (regno == AR_ITC
8051 || regno == AR_RSC
8052 || (regno >= AR_K0
8053 && regno <= AR_K7))))
8054 {
8055 specs[count++] = tmpl;
8056 }
8057 }
8058 else
8059 {
8060 specs[count++] = tmpl;
8061 }
8062 break;
8063 }
8064 }
8065 }
8066 }
8067 else if (note == 7)
8068 {
8069 valueT mask = 0;
8070 if (idesc->operands[0] == IA64_OPND_IMMU24)
8071 {
8072 mask = CURR_SLOT.opnd[0].X_add_number;
8073 }
8074 else
8075 {
8076 UNHANDLED;
8077 }
8078 if (mask & ((valueT) 1 << dep->regindex))
8079 {
8080 specs[count++] = tmpl;
8081 }
8082 }
8083 else if (note == 8)
8084 {
8085 int min = dep->regindex == PSR_DFL ? 2 : 32;
8086 int max = dep->regindex == PSR_DFL ? 31 : 127;
8087 /* dfh is read on FR32-127; dfl is read on FR2-31 */
8088 for (i = 0; i < NELEMS (idesc->operands); i++)
8089 {
8090 if (idesc->operands[i] == IA64_OPND_F1
8091 || idesc->operands[i] == IA64_OPND_F2
8092 || idesc->operands[i] == IA64_OPND_F3
8093 || idesc->operands[i] == IA64_OPND_F4)
8094 {
8095 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8096 if (reg >= min && reg <= max)
8097 {
8098 specs[count++] = tmpl;
8099 }
8100 }
8101 }
8102 }
8103 else if (note == 9)
8104 {
8105 int min = dep->regindex == PSR_MFL ? 2 : 32;
8106 int max = dep->regindex == PSR_MFL ? 31 : 127;
8107 /* mfh is read on writes to FR32-127; mfl is read on writes to
8108 FR2-31 */
8109 for (i = 0; i < idesc->num_outputs; i++)
8110 {
8111 if (idesc->operands[i] == IA64_OPND_F1)
8112 {
8113 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8114 if (reg >= min && reg <= max)
8115 {
8116 specs[count++] = tmpl;
8117 }
8118 }
8119 }
8120 }
8121 else if (note == 10)
8122 {
8123 for (i = 0; i < NELEMS (idesc->operands); i++)
8124 {
8125 if (idesc->operands[i] == IA64_OPND_R1
8126 || idesc->operands[i] == IA64_OPND_R2
8127 || idesc->operands[i] == IA64_OPND_R3)
8128 {
8129 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8130 if (regno >= 16 && regno <= 31)
8131 {
8132 specs[count++] = tmpl;
8133 }
8134 }
8135 }
8136 }
8137 else
8138 {
8139 UNHANDLED;
8140 }
8141 break;
8142
8143 case IA64_RS_AR_FPSR:
8144 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8145 {
8146 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8147 if (regno == AR_FPSR)
8148 {
8149 specs[count++] = tmpl;
8150 }
8151 }
8152 else
8153 {
8154 specs[count++] = tmpl;
8155 }
8156 break;
8157
8158 case IA64_RS_ARX:
8159 /* Handle all AR[REG] resources */
8160 if (note == 0 || note == 1)
8161 {
8162 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8163 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
8164 && regno == dep->regindex)
8165 {
8166 specs[count++] = tmpl;
8167 }
8168 /* other AR[REG] resources may be affected by AR accesses */
8169 else if (idesc->operands[0] == IA64_OPND_AR3)
8170 {
8171 /* AR[] writes */
8172 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
8173 switch (dep->regindex)
8174 {
8175 default:
8176 break;
8177 case AR_BSP:
8178 case AR_RNAT:
8179 if (regno == AR_BSPSTORE)
8180 {
8181 specs[count++] = tmpl;
8182 }
8183 case AR_RSC:
8184 if (!rsrc_write &&
8185 (regno == AR_BSPSTORE
8186 || regno == AR_RNAT))
8187 {
8188 specs[count++] = tmpl;
8189 }
8190 break;
8191 }
8192 }
8193 else if (idesc->operands[1] == IA64_OPND_AR3)
8194 {
8195 /* AR[] reads */
8196 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
8197 switch (dep->regindex)
8198 {
8199 default:
8200 break;
8201 case AR_RSC:
8202 if (regno == AR_BSPSTORE || regno == AR_RNAT)
8203 {
8204 specs[count++] = tmpl;
8205 }
8206 break;
8207 }
8208 }
8209 else
8210 {
8211 specs[count++] = tmpl;
8212 }
8213 }
8214 else
8215 {
8216 UNHANDLED;
8217 }
8218 break;
8219
8220 case IA64_RS_CRX:
8221 /* Handle all CR[REG] resources */
8222 if (note == 0 || note == 1)
8223 {
8224 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8225 {
8226 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8227 if (regno == dep->regindex)
8228 {
8229 specs[count++] = tmpl;
8230 }
8231 else if (!rsrc_write)
8232 {
8233 /* Reads from CR[IVR] affect other resources. */
8234 if (regno == CR_IVR)
8235 {
8236 if ((dep->regindex >= CR_IRR0
8237 && dep->regindex <= CR_IRR3)
8238 || dep->regindex == CR_TPR)
8239 {
8240 specs[count++] = tmpl;
8241 }
8242 }
8243 }
8244 }
8245 else
8246 {
8247 specs[count++] = tmpl;
8248 }
8249 }
8250 else
8251 {
8252 UNHANDLED;
8253 }
8254 break;
8255
8256 case IA64_RS_INSERVICE:
8257 /* look for write of EOI (67) or read of IVR (65) */
8258 if ((idesc->operands[0] == IA64_OPND_CR3
8259 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
8260 || (idesc->operands[1] == IA64_OPND_CR3
8261 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
8262 {
8263 specs[count++] = tmpl;
8264 }
8265 break;
8266
8267 case IA64_RS_GR0:
8268 if (note == 1)
8269 {
8270 specs[count++] = tmpl;
8271 }
8272 else
8273 {
8274 UNHANDLED;
8275 }
8276 break;
8277
8278 case IA64_RS_CFM:
8279 if (note != 2)
8280 {
8281 specs[count++] = tmpl;
8282 }
8283 else
8284 {
8285 /* Check if any of the registers accessed are in the rotating region.
8286 mov to/from pr accesses CFM only when qp_regno is in the rotating
8287 region */
8288 for (i = 0; i < NELEMS (idesc->operands); i++)
8289 {
8290 if (idesc->operands[i] == IA64_OPND_R1
8291 || idesc->operands[i] == IA64_OPND_R2
8292 || idesc->operands[i] == IA64_OPND_R3)
8293 {
8294 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8295 /* Assumes that md.rot.num_regs is always valid */
8296 if (md.rot.num_regs > 0
8297 && num > 31
8298 && num < 31 + md.rot.num_regs)
8299 {
8300 specs[count] = tmpl;
8301 specs[count++].specific = 0;
8302 }
8303 }
8304 else if (idesc->operands[i] == IA64_OPND_F1
8305 || idesc->operands[i] == IA64_OPND_F2
8306 || idesc->operands[i] == IA64_OPND_F3
8307 || idesc->operands[i] == IA64_OPND_F4)
8308 {
8309 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
8310 if (num > 31)
8311 {
8312 specs[count] = tmpl;
8313 specs[count++].specific = 0;
8314 }
8315 }
8316 else if (idesc->operands[i] == IA64_OPND_P1
8317 || idesc->operands[i] == IA64_OPND_P2)
8318 {
8319 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
8320 if (num > 15)
8321 {
8322 specs[count] = tmpl;
8323 specs[count++].specific = 0;
8324 }
8325 }
8326 }
8327 if (CURR_SLOT.qp_regno > 15)
8328 {
8329 specs[count] = tmpl;
8330 specs[count++].specific = 0;
8331 }
8332 }
8333 break;
8334
8335 /* This is the same as IA64_RS_PRr, except simplified to account for
8336 the fact that there is only one register. */
8337 case IA64_RS_PR63:
8338 if (note == 0)
8339 {
8340 specs[count++] = tmpl;
8341 }
8342 else if (note == 7)
8343 {
8344 valueT mask = 0;
8345 if (idesc->operands[2] == IA64_OPND_IMM17)
8346 mask = CURR_SLOT.opnd[2].X_add_number;
8347 if (mask & ((valueT) 1 << 63))
8348 specs[count++] = tmpl;
8349 }
8350 else if (note == 11)
8351 {
8352 if ((idesc->operands[0] == IA64_OPND_P1
8353 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
8354 || (idesc->operands[1] == IA64_OPND_P2
8355 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
8356 {
8357 specs[count++] = tmpl;
8358 }
8359 }
8360 else if (note == 12)
8361 {
8362 if (CURR_SLOT.qp_regno == 63)
8363 {
8364 specs[count++] = tmpl;
8365 }
8366 }
8367 else if (note == 1)
8368 {
8369 if (rsrc_write)
8370 {
8371 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8372 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8373 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8374 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8375
8376 if (p1 == 63
8377 && (idesc->operands[0] == IA64_OPND_P1
8378 || idesc->operands[0] == IA64_OPND_P2))
8379 {
8380 specs[count] = tmpl;
8381 specs[count++].cmp_type =
8382 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8383 }
8384 if (p2 == 63
8385 && (idesc->operands[1] == IA64_OPND_P1
8386 || idesc->operands[1] == IA64_OPND_P2))
8387 {
8388 specs[count] = tmpl;
8389 specs[count++].cmp_type =
8390 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8391 }
8392 }
8393 else
8394 {
8395 if (CURR_SLOT.qp_regno == 63)
8396 {
8397 specs[count++] = tmpl;
8398 }
8399 }
8400 }
8401 else
8402 {
8403 UNHANDLED;
8404 }
8405 break;
8406
8407 case IA64_RS_RSE:
8408 /* FIXME we can identify some individual RSE written resources, but RSE
8409 read resources have not yet been completely identified, so for now
8410 treat RSE as a single resource */
8411 if (strncmp (idesc->name, "mov", 3) == 0)
8412 {
8413 if (rsrc_write)
8414 {
8415 if (idesc->operands[0] == IA64_OPND_AR3
8416 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
8417 {
8418 specs[count] = tmpl;
8419 specs[count++].index = 0; /* IA64_RSE_BSPLOAD/RNATBITINDEX */
8420 }
8421 }
8422 else
8423 {
8424 if (idesc->operands[0] == IA64_OPND_AR3)
8425 {
8426 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
8427 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
8428 {
8429 specs[count++] = tmpl;
8430 }
8431 }
8432 else if (idesc->operands[1] == IA64_OPND_AR3)
8433 {
8434 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
8435 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
8436 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
8437 {
8438 specs[count++] = tmpl;
8439 }
8440 }
8441 }
8442 }
8443 else
8444 {
8445 specs[count++] = tmpl;
8446 }
8447 break;
8448
8449 case IA64_RS_ANY:
8450 /* FIXME -- do any of these need to be non-specific? */
8451 specs[count++] = tmpl;
8452 break;
8453
8454 default:
8455 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
8456 break;
8457 }
8458
8459 return count;
8460 }
8461
8462 /* Clear branch flags on marked resources. This breaks the link between the
8463 QP of the marking instruction and a subsequent branch on the same QP. */
8464
8465 static void
8466 clear_qp_branch_flag (mask)
8467 valueT mask;
8468 {
8469 int i;
8470 for (i = 0; i < regdepslen; i++)
8471 {
8472 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
8473 if ((bit & mask) != 0)
8474 {
8475 regdeps[i].link_to_qp_branch = 0;
8476 }
8477 }
8478 }
8479
8480 /* Remove any mutexes which contain any of the PRs indicated in the mask.
8481
8482 Any changes to a PR clears the mutex relations which include that PR. */
8483
8484 static void
8485 clear_qp_mutex (mask)
8486 valueT mask;
8487 {
8488 int i;
8489
8490 i = 0;
8491 while (i < qp_mutexeslen)
8492 {
8493 if ((qp_mutexes[i].prmask & mask) != 0)
8494 {
8495 if (md.debug_dv)
8496 {
8497 fprintf (stderr, " Clearing mutex relation");
8498 print_prmask (qp_mutexes[i].prmask);
8499 fprintf (stderr, "\n");
8500 }
8501 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
8502 }
8503 else
8504 ++i;
8505 }
8506 }
8507
8508 /* Clear implies relations which contain PRs in the given masks.
8509 P1_MASK indicates the source of the implies relation, while P2_MASK
8510 indicates the implied PR. */
8511
8512 static void
8513 clear_qp_implies (p1_mask, p2_mask)
8514 valueT p1_mask;
8515 valueT p2_mask;
8516 {
8517 int i;
8518
8519 i = 0;
8520 while (i < qp_implieslen)
8521 {
8522 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
8523 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
8524 {
8525 if (md.debug_dv)
8526 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
8527 qp_implies[i].p1, qp_implies[i].p2);
8528 qp_implies[i] = qp_implies[--qp_implieslen];
8529 }
8530 else
8531 ++i;
8532 }
8533 }
8534
8535 /* Add the PRs specified to the list of implied relations. */
8536
8537 static void
8538 add_qp_imply (p1, p2)
8539 int p1, p2;
8540 {
8541 valueT mask;
8542 valueT bit;
8543 int i;
8544
8545 /* p0 is not meaningful here. */
8546 if (p1 == 0 || p2 == 0)
8547 abort ();
8548
8549 if (p1 == p2)
8550 return;
8551
8552 /* If it exists already, ignore it. */
8553 for (i = 0; i < qp_implieslen; i++)
8554 {
8555 if (qp_implies[i].p1 == p1
8556 && qp_implies[i].p2 == p2
8557 && qp_implies[i].path == md.path
8558 && !qp_implies[i].p2_branched)
8559 return;
8560 }
8561
8562 if (qp_implieslen == qp_impliestotlen)
8563 {
8564 qp_impliestotlen += 20;
8565 qp_implies = (struct qp_imply *)
8566 xrealloc ((void *) qp_implies,
8567 qp_impliestotlen * sizeof (struct qp_imply));
8568 }
8569 if (md.debug_dv)
8570 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
8571 qp_implies[qp_implieslen].p1 = p1;
8572 qp_implies[qp_implieslen].p2 = p2;
8573 qp_implies[qp_implieslen].path = md.path;
8574 qp_implies[qp_implieslen++].p2_branched = 0;
8575
8576 /* Add in the implied transitive relations; for everything that p2 implies,
8577 make p1 imply that, too; for everything that implies p1, make it imply p2
8578 as well. */
8579 for (i = 0; i < qp_implieslen; i++)
8580 {
8581 if (qp_implies[i].p1 == p2)
8582 add_qp_imply (p1, qp_implies[i].p2);
8583 if (qp_implies[i].p2 == p1)
8584 add_qp_imply (qp_implies[i].p1, p2);
8585 }
8586 /* Add in mutex relations implied by this implies relation; for each mutex
8587 relation containing p2, duplicate it and replace p2 with p1. */
8588 bit = (valueT) 1 << p1;
8589 mask = (valueT) 1 << p2;
8590 for (i = 0; i < qp_mutexeslen; i++)
8591 {
8592 if (qp_mutexes[i].prmask & mask)
8593 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
8594 }
8595 }
8596
8597 /* Add the PRs specified in the mask to the mutex list; this means that only
8598 one of the PRs can be true at any time. PR0 should never be included in
8599 the mask. */
8600
8601 static void
8602 add_qp_mutex (mask)
8603 valueT mask;
8604 {
8605 if (mask & 0x1)
8606 abort ();
8607
8608 if (qp_mutexeslen == qp_mutexestotlen)
8609 {
8610 qp_mutexestotlen += 20;
8611 qp_mutexes = (struct qpmutex *)
8612 xrealloc ((void *) qp_mutexes,
8613 qp_mutexestotlen * sizeof (struct qpmutex));
8614 }
8615 if (md.debug_dv)
8616 {
8617 fprintf (stderr, " Registering mutex on");
8618 print_prmask (mask);
8619 fprintf (stderr, "\n");
8620 }
8621 qp_mutexes[qp_mutexeslen].path = md.path;
8622 qp_mutexes[qp_mutexeslen++].prmask = mask;
8623 }
8624
8625 static void
8626 clear_register_values ()
8627 {
8628 int i;
8629 if (md.debug_dv)
8630 fprintf (stderr, " Clearing register values\n");
8631 for (i = 1; i < NELEMS (gr_values); i++)
8632 gr_values[i].known = 0;
8633 }
8634
8635 /* Keep track of register values/changes which affect DV tracking.
8636
8637 optimization note: should add a flag to classes of insns where otherwise we
8638 have to examine a group of strings to identify them. */
8639
8640 static void
8641 note_register_values (idesc)
8642 struct ia64_opcode *idesc;
8643 {
8644 valueT qp_changemask = 0;
8645 int i;
8646
8647 /* Invalidate values for registers being written to. */
8648 for (i = 0; i < idesc->num_outputs; i++)
8649 {
8650 if (idesc->operands[i] == IA64_OPND_R1
8651 || idesc->operands[i] == IA64_OPND_R2
8652 || idesc->operands[i] == IA64_OPND_R3)
8653 {
8654 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8655 if (regno > 0 && regno < NELEMS (gr_values))
8656 gr_values[regno].known = 0;
8657 }
8658 else if (idesc->operands[i] == IA64_OPND_R3_2)
8659 {
8660 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
8661 if (regno > 0 && regno < 4)
8662 gr_values[regno].known = 0;
8663 }
8664 else if (idesc->operands[i] == IA64_OPND_P1
8665 || idesc->operands[i] == IA64_OPND_P2)
8666 {
8667 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8668 qp_changemask |= (valueT) 1 << regno;
8669 }
8670 else if (idesc->operands[i] == IA64_OPND_PR)
8671 {
8672 if (idesc->operands[2] & (valueT) 0x10000)
8673 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
8674 else
8675 qp_changemask = idesc->operands[2];
8676 break;
8677 }
8678 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
8679 {
8680 if (idesc->operands[1] & ((valueT) 1 << 43))
8681 qp_changemask = ~(valueT) 0xFFFFFFFFFFF | idesc->operands[1];
8682 else
8683 qp_changemask = idesc->operands[1];
8684 qp_changemask &= ~(valueT) 0xFFFF;
8685 break;
8686 }
8687 }
8688
8689 /* Always clear qp branch flags on any PR change. */
8690 /* FIXME there may be exceptions for certain compares. */
8691 clear_qp_branch_flag (qp_changemask);
8692
8693 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
8694 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
8695 {
8696 qp_changemask |= ~(valueT) 0xFFFF;
8697 if (strcmp (idesc->name, "clrrrb.pr") != 0)
8698 {
8699 for (i = 32; i < 32 + md.rot.num_regs; i++)
8700 gr_values[i].known = 0;
8701 }
8702 clear_qp_mutex (qp_changemask);
8703 clear_qp_implies (qp_changemask, qp_changemask);
8704 }
8705 /* After a call, all register values are undefined, except those marked
8706 as "safe". */
8707 else if (strncmp (idesc->name, "br.call", 6) == 0
8708 || strncmp (idesc->name, "brl.call", 7) == 0)
8709 {
8710 /* FIXME keep GR values which are marked as "safe_across_calls" */
8711 clear_register_values ();
8712 clear_qp_mutex (~qp_safe_across_calls);
8713 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
8714 clear_qp_branch_flag (~qp_safe_across_calls);
8715 }
8716 else if (is_interruption_or_rfi (idesc)
8717 || is_taken_branch (idesc))
8718 {
8719 clear_register_values ();
8720 clear_qp_mutex (~(valueT) 0);
8721 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
8722 }
8723 /* Look for mutex and implies relations. */
8724 else if ((idesc->operands[0] == IA64_OPND_P1
8725 || idesc->operands[0] == IA64_OPND_P2)
8726 && (idesc->operands[1] == IA64_OPND_P1
8727 || idesc->operands[1] == IA64_OPND_P2))
8728 {
8729 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8730 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8731 valueT p1mask = (valueT) 1 << p1;
8732 valueT p2mask = (valueT) 1 << p2;
8733
8734 /* If one of the PRs is PR0, we can't really do anything. */
8735 if (p1 == 0 || p2 == 0)
8736 {
8737 if (md.debug_dv)
8738 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
8739 }
8740 /* In general, clear mutexes and implies which include P1 or P2,
8741 with the following exceptions. */
8742 else if (strstr (idesc->name, ".or.andcm") != NULL)
8743 {
8744 add_qp_mutex (p1mask | p2mask);
8745 clear_qp_implies (p2mask, p1mask);
8746 }
8747 else if (strstr (idesc->name, ".and.orcm") != NULL)
8748 {
8749 add_qp_mutex (p1mask | p2mask);
8750 clear_qp_implies (p1mask, p2mask);
8751 }
8752 else if (strstr (idesc->name, ".and") != NULL)
8753 {
8754 clear_qp_implies (0, p1mask | p2mask);
8755 }
8756 else if (strstr (idesc->name, ".or") != NULL)
8757 {
8758 clear_qp_mutex (p1mask | p2mask);
8759 clear_qp_implies (p1mask | p2mask, 0);
8760 }
8761 else
8762 {
8763 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
8764 if (strstr (idesc->name, ".unc") != NULL)
8765 {
8766 add_qp_mutex (p1mask | p2mask);
8767 if (CURR_SLOT.qp_regno != 0)
8768 {
8769 add_qp_imply (CURR_SLOT.opnd[0].X_add_number - REG_P,
8770 CURR_SLOT.qp_regno);
8771 add_qp_imply (CURR_SLOT.opnd[1].X_add_number - REG_P,
8772 CURR_SLOT.qp_regno);
8773 }
8774 }
8775 else if (CURR_SLOT.qp_regno == 0)
8776 {
8777 add_qp_mutex (p1mask | p2mask);
8778 }
8779 else
8780 {
8781 clear_qp_mutex (p1mask | p2mask);
8782 }
8783 }
8784 }
8785 /* Look for mov imm insns into GRs. */
8786 else if (idesc->operands[0] == IA64_OPND_R1
8787 && (idesc->operands[1] == IA64_OPND_IMM22
8788 || idesc->operands[1] == IA64_OPND_IMMU64)
8789 && (strcmp (idesc->name, "mov") == 0
8790 || strcmp (idesc->name, "movl") == 0))
8791 {
8792 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8793 if (regno > 0 && regno < NELEMS (gr_values))
8794 {
8795 gr_values[regno].known = 1;
8796 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
8797 gr_values[regno].path = md.path;
8798 if (md.debug_dv)
8799 {
8800 fprintf (stderr, " Know gr%d = ", regno);
8801 fprintf_vma (stderr, gr_values[regno].value);
8802 fputs ("\n", stderr);
8803 }
8804 }
8805 }
8806 else
8807 {
8808 clear_qp_mutex (qp_changemask);
8809 clear_qp_implies (qp_changemask, qp_changemask);
8810 }
8811 }
8812
8813 /* Return whether the given predicate registers are currently mutex. */
8814
8815 static int
8816 qp_mutex (p1, p2, path)
8817 int p1;
8818 int p2;
8819 int path;
8820 {
8821 int i;
8822 valueT mask;
8823
8824 if (p1 != p2)
8825 {
8826 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
8827 for (i = 0; i < qp_mutexeslen; i++)
8828 {
8829 if (qp_mutexes[i].path >= path
8830 && (qp_mutexes[i].prmask & mask) == mask)
8831 return 1;
8832 }
8833 }
8834 return 0;
8835 }
8836
8837 /* Return whether the given resource is in the given insn's list of chks
8838 Return 1 if the conflict is absolutely determined, 2 if it's a potential
8839 conflict. */
8840
8841 static int
8842 resources_match (rs, idesc, note, qp_regno, path)
8843 struct rsrc *rs;
8844 struct ia64_opcode *idesc;
8845 int note;
8846 int qp_regno;
8847 int path;
8848 {
8849 struct rsrc specs[MAX_SPECS];
8850 int count;
8851
8852 /* If the marked resource's qp_regno and the given qp_regno are mutex,
8853 we don't need to check. One exception is note 11, which indicates that
8854 target predicates are written regardless of PR[qp]. */
8855 if (qp_mutex (rs->qp_regno, qp_regno, path)
8856 && note != 11)
8857 return 0;
8858
8859 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
8860 while (count-- > 0)
8861 {
8862 /* UNAT checking is a bit more specific than other resources */
8863 if (rs->dependency->specifier == IA64_RS_AR_UNAT
8864 && specs[count].mem_offset.hint
8865 && rs->mem_offset.hint)
8866 {
8867 if (rs->mem_offset.base == specs[count].mem_offset.base)
8868 {
8869 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
8870 ((specs[count].mem_offset.offset >> 3) & 0x3F))
8871 return 1;
8872 else
8873 continue;
8874 }
8875 }
8876
8877 /* Skip apparent PR write conflicts where both writes are an AND or both
8878 writes are an OR. */
8879 if (rs->dependency->specifier == IA64_RS_PR
8880 || rs->dependency->specifier == IA64_RS_PRr
8881 || rs->dependency->specifier == IA64_RS_PR63)
8882 {
8883 if (specs[count].cmp_type != CMP_NONE
8884 && specs[count].cmp_type == rs->cmp_type)
8885 {
8886 if (md.debug_dv)
8887 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
8888 dv_mode[rs->dependency->mode],
8889 rs->dependency->specifier != IA64_RS_PR63 ?
8890 specs[count].index : 63);
8891 continue;
8892 }
8893 if (md.debug_dv)
8894 fprintf (stderr,
8895 " %s on parallel compare conflict %s vs %s on PR%d\n",
8896 dv_mode[rs->dependency->mode],
8897 dv_cmp_type[rs->cmp_type],
8898 dv_cmp_type[specs[count].cmp_type],
8899 rs->dependency->specifier != IA64_RS_PR63 ?
8900 specs[count].index : 63);
8901
8902 }
8903
8904 /* If either resource is not specific, conservatively assume a conflict
8905 */
8906 if (!specs[count].specific || !rs->specific)
8907 return 2;
8908 else if (specs[count].index == rs->index)
8909 return 1;
8910 }
8911 #if 0
8912 if (md.debug_dv)
8913 fprintf (stderr, " No %s conflicts\n", rs->dependency->name);
8914 #endif
8915
8916 return 0;
8917 }
8918
8919 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
8920 insert a stop to create the break. Update all resource dependencies
8921 appropriately. If QP_REGNO is non-zero, only apply the break to resources
8922 which use the same QP_REGNO and have the link_to_qp_branch flag set.
8923 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
8924 instruction. */
8925
8926 static void
8927 insn_group_break (insert_stop, qp_regno, save_current)
8928 int insert_stop;
8929 int qp_regno;
8930 int save_current;
8931 {
8932 int i;
8933
8934 if (insert_stop && md.num_slots_in_use > 0)
8935 PREV_SLOT.end_of_insn_group = 1;
8936
8937 if (md.debug_dv)
8938 {
8939 fprintf (stderr, " Insn group break%s",
8940 (insert_stop ? " (w/stop)" : ""));
8941 if (qp_regno != 0)
8942 fprintf (stderr, " effective for QP=%d", qp_regno);
8943 fprintf (stderr, "\n");
8944 }
8945
8946 i = 0;
8947 while (i < regdepslen)
8948 {
8949 const struct ia64_dependency *dep = regdeps[i].dependency;
8950
8951 if (qp_regno != 0
8952 && regdeps[i].qp_regno != qp_regno)
8953 {
8954 ++i;
8955 continue;
8956 }
8957
8958 if (save_current
8959 && CURR_SLOT.src_file == regdeps[i].file
8960 && CURR_SLOT.src_line == regdeps[i].line)
8961 {
8962 ++i;
8963 continue;
8964 }
8965
8966 /* clear dependencies which are automatically cleared by a stop, or
8967 those that have reached the appropriate state of insn serialization */
8968 if (dep->semantics == IA64_DVS_IMPLIED
8969 || dep->semantics == IA64_DVS_IMPLIEDF
8970 || regdeps[i].insn_srlz == STATE_SRLZ)
8971 {
8972 print_dependency ("Removing", i);
8973 regdeps[i] = regdeps[--regdepslen];
8974 }
8975 else
8976 {
8977 if (dep->semantics == IA64_DVS_DATA
8978 || dep->semantics == IA64_DVS_INSTR
8979 || dep->semantics == IA64_DVS_SPECIFIC)
8980 {
8981 if (regdeps[i].insn_srlz == STATE_NONE)
8982 regdeps[i].insn_srlz = STATE_STOP;
8983 if (regdeps[i].data_srlz == STATE_NONE)
8984 regdeps[i].data_srlz = STATE_STOP;
8985 }
8986 ++i;
8987 }
8988 }
8989 }
8990
8991 /* Add the given resource usage spec to the list of active dependencies. */
8992
8993 static void
8994 mark_resource (idesc, dep, spec, depind, path)
8995 struct ia64_opcode *idesc ATTRIBUTE_UNUSED;
8996 const struct ia64_dependency *dep ATTRIBUTE_UNUSED;
8997 struct rsrc *spec;
8998 int depind;
8999 int path;
9000 {
9001 if (regdepslen == regdepstotlen)
9002 {
9003 regdepstotlen += 20;
9004 regdeps = (struct rsrc *)
9005 xrealloc ((void *) regdeps,
9006 regdepstotlen * sizeof (struct rsrc));
9007 }
9008
9009 regdeps[regdepslen] = *spec;
9010 regdeps[regdepslen].depind = depind;
9011 regdeps[regdepslen].path = path;
9012 regdeps[regdepslen].file = CURR_SLOT.src_file;
9013 regdeps[regdepslen].line = CURR_SLOT.src_line;
9014
9015 print_dependency ("Adding", regdepslen);
9016
9017 ++regdepslen;
9018 }
9019
9020 static void
9021 print_dependency (action, depind)
9022 const char *action;
9023 int depind;
9024 {
9025 if (md.debug_dv)
9026 {
9027 fprintf (stderr, " %s %s '%s'",
9028 action, dv_mode[(regdeps[depind].dependency)->mode],
9029 (regdeps[depind].dependency)->name);
9030 if (regdeps[depind].specific && regdeps[depind].index != 0)
9031 fprintf (stderr, " (%d)", regdeps[depind].index);
9032 if (regdeps[depind].mem_offset.hint)
9033 {
9034 fputs (" ", stderr);
9035 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
9036 fputs ("+", stderr);
9037 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
9038 }
9039 fprintf (stderr, "\n");
9040 }
9041 }
9042
9043 static void
9044 instruction_serialization ()
9045 {
9046 int i;
9047 if (md.debug_dv)
9048 fprintf (stderr, " Instruction serialization\n");
9049 for (i = 0; i < regdepslen; i++)
9050 if (regdeps[i].insn_srlz == STATE_STOP)
9051 regdeps[i].insn_srlz = STATE_SRLZ;
9052 }
9053
9054 static void
9055 data_serialization ()
9056 {
9057 int i = 0;
9058 if (md.debug_dv)
9059 fprintf (stderr, " Data serialization\n");
9060 while (i < regdepslen)
9061 {
9062 if (regdeps[i].data_srlz == STATE_STOP
9063 /* Note: as of 991210, all "other" dependencies are cleared by a
9064 data serialization. This might change with new tables */
9065 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
9066 {
9067 print_dependency ("Removing", i);
9068 regdeps[i] = regdeps[--regdepslen];
9069 }
9070 else
9071 ++i;
9072 }
9073 }
9074
9075 /* Insert stops and serializations as needed to avoid DVs. */
9076
9077 static void
9078 remove_marked_resource (rs)
9079 struct rsrc *rs;
9080 {
9081 switch (rs->dependency->semantics)
9082 {
9083 case IA64_DVS_SPECIFIC:
9084 if (md.debug_dv)
9085 fprintf (stderr, "Implementation-specific, assume worst case...\n");
9086 /* ...fall through... */
9087 case IA64_DVS_INSTR:
9088 if (md.debug_dv)
9089 fprintf (stderr, "Inserting instr serialization\n");
9090 if (rs->insn_srlz < STATE_STOP)
9091 insn_group_break (1, 0, 0);
9092 if (rs->insn_srlz < STATE_SRLZ)
9093 {
9094 int oldqp = CURR_SLOT.qp_regno;
9095 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9096 /* Manually jam a srlz.i insn into the stream */
9097 CURR_SLOT.qp_regno = 0;
9098 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
9099 instruction_serialization ();
9100 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9101 if (++md.num_slots_in_use >= NUM_SLOTS)
9102 emit_one_bundle ();
9103 CURR_SLOT.qp_regno = oldqp;
9104 CURR_SLOT.idesc = oldidesc;
9105 }
9106 insn_group_break (1, 0, 0);
9107 break;
9108 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
9109 "other" types of DV are eliminated
9110 by a data serialization */
9111 case IA64_DVS_DATA:
9112 if (md.debug_dv)
9113 fprintf (stderr, "Inserting data serialization\n");
9114 if (rs->data_srlz < STATE_STOP)
9115 insn_group_break (1, 0, 0);
9116 {
9117 int oldqp = CURR_SLOT.qp_regno;
9118 struct ia64_opcode *oldidesc = CURR_SLOT.idesc;
9119 /* Manually jam a srlz.d insn into the stream */
9120 CURR_SLOT.qp_regno = 0;
9121 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
9122 data_serialization ();
9123 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9124 if (++md.num_slots_in_use >= NUM_SLOTS)
9125 emit_one_bundle ();
9126 CURR_SLOT.qp_regno = oldqp;
9127 CURR_SLOT.idesc = oldidesc;
9128 }
9129 break;
9130 case IA64_DVS_IMPLIED:
9131 case IA64_DVS_IMPLIEDF:
9132 if (md.debug_dv)
9133 fprintf (stderr, "Inserting stop\n");
9134 insn_group_break (1, 0, 0);
9135 break;
9136 default:
9137 break;
9138 }
9139 }
9140
9141 /* Check the resources used by the given opcode against the current dependency
9142 list.
9143
9144 The check is run once for each execution path encountered. In this case,
9145 a unique execution path is the sequence of instructions following a code
9146 entry point, e.g. the following has three execution paths, one starting
9147 at L0, one at L1, and one at L2.
9148
9149 L0: nop
9150 L1: add
9151 L2: add
9152 br.ret
9153 */
9154
9155 static void
9156 check_dependencies (idesc)
9157 struct ia64_opcode *idesc;
9158 {
9159 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9160 int path;
9161 int i;
9162
9163 /* Note that the number of marked resources may change within the
9164 loop if in auto mode. */
9165 i = 0;
9166 while (i < regdepslen)
9167 {
9168 struct rsrc *rs = &regdeps[i];
9169 const struct ia64_dependency *dep = rs->dependency;
9170 int chkind;
9171 int note;
9172 int start_over = 0;
9173
9174 if (dep->semantics == IA64_DVS_NONE
9175 || (chkind = depends_on (rs->depind, idesc)) == -1)
9176 {
9177 ++i;
9178 continue;
9179 }
9180
9181 note = NOTE (opdeps->chks[chkind]);
9182
9183 /* Check this resource against each execution path seen thus far. */
9184 for (path = 0; path <= md.path; path++)
9185 {
9186 int matchtype;
9187
9188 /* If the dependency wasn't on the path being checked, ignore it. */
9189 if (rs->path < path)
9190 continue;
9191
9192 /* If the QP for this insn implies a QP which has branched, don't
9193 bother checking. Ed. NOTE: I don't think this check is terribly
9194 useful; what's the point of generating code which will only be
9195 reached if its QP is zero?
9196 This code was specifically inserted to handle the following code,
9197 based on notes from Intel's DV checking code, where p1 implies p2.
9198
9199 mov r4 = 2
9200 (p2) br.cond L
9201 (p1) mov r4 = 7
9202 */
9203 if (CURR_SLOT.qp_regno != 0)
9204 {
9205 int skip = 0;
9206 int implies;
9207 for (implies = 0; implies < qp_implieslen; implies++)
9208 {
9209 if (qp_implies[implies].path >= path
9210 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
9211 && qp_implies[implies].p2_branched)
9212 {
9213 skip = 1;
9214 break;
9215 }
9216 }
9217 if (skip)
9218 continue;
9219 }
9220
9221 if ((matchtype = resources_match (rs, idesc, note,
9222 CURR_SLOT.qp_regno, path)) != 0)
9223 {
9224 char msg[1024];
9225 char pathmsg[256] = "";
9226 char indexmsg[256] = "";
9227 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
9228
9229 if (path != 0)
9230 sprintf (pathmsg, " when entry is at label '%s'",
9231 md.entry_labels[path - 1]);
9232 if (rs->specific && rs->index != 0)
9233 sprintf (indexmsg, ", specific resource number is %d",
9234 rs->index);
9235 sprintf (msg, "Use of '%s' %s %s dependency '%s' (%s)%s%s",
9236 idesc->name,
9237 (certain ? "violates" : "may violate"),
9238 dv_mode[dep->mode], dep->name,
9239 dv_sem[dep->semantics],
9240 pathmsg, indexmsg);
9241
9242 if (md.explicit_mode)
9243 {
9244 as_warn ("%s", msg);
9245 if (path < md.path)
9246 as_warn (_("Only the first path encountering the conflict "
9247 "is reported"));
9248 as_warn_where (rs->file, rs->line,
9249 _("This is the location of the "
9250 "conflicting usage"));
9251 /* Don't bother checking other paths, to avoid duplicating
9252 the same warning */
9253 break;
9254 }
9255 else
9256 {
9257 if (md.debug_dv)
9258 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
9259
9260 remove_marked_resource (rs);
9261
9262 /* since the set of dependencies has changed, start over */
9263 /* FIXME -- since we're removing dvs as we go, we
9264 probably don't really need to start over... */
9265 start_over = 1;
9266 break;
9267 }
9268 }
9269 }
9270 if (start_over)
9271 i = 0;
9272 else
9273 ++i;
9274 }
9275 }
9276
9277 /* Register new dependencies based on the given opcode. */
9278
9279 static void
9280 mark_resources (idesc)
9281 struct ia64_opcode *idesc;
9282 {
9283 int i;
9284 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
9285 int add_only_qp_reads = 0;
9286
9287 /* A conditional branch only uses its resources if it is taken; if it is
9288 taken, we stop following that path. The other branch types effectively
9289 *always* write their resources. If it's not taken, register only QP
9290 reads. */
9291 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
9292 {
9293 add_only_qp_reads = 1;
9294 }
9295
9296 if (md.debug_dv)
9297 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
9298
9299 for (i = 0; i < opdeps->nregs; i++)
9300 {
9301 const struct ia64_dependency *dep;
9302 struct rsrc specs[MAX_SPECS];
9303 int note;
9304 int path;
9305 int count;
9306
9307 dep = ia64_find_dependency (opdeps->regs[i]);
9308 note = NOTE (opdeps->regs[i]);
9309
9310 if (add_only_qp_reads
9311 && !(dep->mode == IA64_DV_WAR
9312 && (dep->specifier == IA64_RS_PR
9313 || dep->specifier == IA64_RS_PRr
9314 || dep->specifier == IA64_RS_PR63)))
9315 continue;
9316
9317 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
9318
9319 #if 0
9320 if (md.debug_dv && !count)
9321 fprintf (stderr, " No %s %s usage found (path %d)\n",
9322 dv_mode[dep->mode], dep->name, md.path);
9323 #endif
9324
9325 while (count-- > 0)
9326 {
9327 mark_resource (idesc, dep, &specs[count],
9328 DEP (opdeps->regs[i]), md.path);
9329 }
9330
9331 /* The execution path may affect register values, which may in turn
9332 affect which indirect-access resources are accessed. */
9333 switch (dep->specifier)
9334 {
9335 default:
9336 break;
9337 case IA64_RS_CPUID:
9338 case IA64_RS_DBR:
9339 case IA64_RS_IBR:
9340 case IA64_RS_MSR:
9341 case IA64_RS_PKR:
9342 case IA64_RS_PMC:
9343 case IA64_RS_PMD:
9344 case IA64_RS_RR:
9345 for (path = 0; path < md.path; path++)
9346 {
9347 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
9348 while (count-- > 0)
9349 mark_resource (idesc, dep, &specs[count],
9350 DEP (opdeps->regs[i]), path);
9351 }
9352 break;
9353 }
9354 }
9355 }
9356
9357 /* Remove dependencies when they no longer apply. */
9358
9359 static void
9360 update_dependencies (idesc)
9361 struct ia64_opcode *idesc;
9362 {
9363 int i;
9364
9365 if (strcmp (idesc->name, "srlz.i") == 0)
9366 {
9367 instruction_serialization ();
9368 }
9369 else if (strcmp (idesc->name, "srlz.d") == 0)
9370 {
9371 data_serialization ();
9372 }
9373 else if (is_interruption_or_rfi (idesc)
9374 || is_taken_branch (idesc))
9375 {
9376 /* Although technically the taken branch doesn't clear dependencies
9377 which require a srlz.[id], we don't follow the branch; the next
9378 instruction is assumed to start with a clean slate. */
9379 regdepslen = 0;
9380 md.path = 0;
9381 }
9382 else if (is_conditional_branch (idesc)
9383 && CURR_SLOT.qp_regno != 0)
9384 {
9385 int is_call = strstr (idesc->name, ".call") != NULL;
9386
9387 for (i = 0; i < qp_implieslen; i++)
9388 {
9389 /* If the conditional branch's predicate is implied by the predicate
9390 in an existing dependency, remove that dependency. */
9391 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
9392 {
9393 int depind = 0;
9394 /* Note that this implied predicate takes a branch so that if
9395 a later insn generates a DV but its predicate implies this
9396 one, we can avoid the false DV warning. */
9397 qp_implies[i].p2_branched = 1;
9398 while (depind < regdepslen)
9399 {
9400 if (regdeps[depind].qp_regno == qp_implies[i].p1)
9401 {
9402 print_dependency ("Removing", depind);
9403 regdeps[depind] = regdeps[--regdepslen];
9404 }
9405 else
9406 ++depind;
9407 }
9408 }
9409 }
9410 /* Any marked resources which have this same predicate should be
9411 cleared, provided that the QP hasn't been modified between the
9412 marking instruction and the branch. */
9413 if (is_call)
9414 {
9415 insn_group_break (0, CURR_SLOT.qp_regno, 1);
9416 }
9417 else
9418 {
9419 i = 0;
9420 while (i < regdepslen)
9421 {
9422 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
9423 && regdeps[i].link_to_qp_branch
9424 && (regdeps[i].file != CURR_SLOT.src_file
9425 || regdeps[i].line != CURR_SLOT.src_line))
9426 {
9427 /* Treat like a taken branch */
9428 print_dependency ("Removing", i);
9429 regdeps[i] = regdeps[--regdepslen];
9430 }
9431 else
9432 ++i;
9433 }
9434 }
9435 }
9436 }
9437
9438 /* Examine the current instruction for dependency violations. */
9439
9440 static int
9441 check_dv (idesc)
9442 struct ia64_opcode *idesc;
9443 {
9444 if (md.debug_dv)
9445 {
9446 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
9447 idesc->name, CURR_SLOT.src_line,
9448 idesc->dependencies->nchks,
9449 idesc->dependencies->nregs);
9450 }
9451
9452 /* Look through the list of currently marked resources; if the current
9453 instruction has the dependency in its chks list which uses that resource,
9454 check against the specific resources used. */
9455 check_dependencies (idesc);
9456
9457 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
9458 then add them to the list of marked resources. */
9459 mark_resources (idesc);
9460
9461 /* There are several types of dependency semantics, and each has its own
9462 requirements for being cleared
9463
9464 Instruction serialization (insns separated by interruption, rfi, or
9465 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
9466
9467 Data serialization (instruction serialization, or writer + srlz.d +
9468 reader, where writer and srlz.d are in separate groups) clears
9469 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
9470 always be the case).
9471
9472 Instruction group break (groups separated by stop, taken branch,
9473 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
9474 */
9475 update_dependencies (idesc);
9476
9477 /* Sometimes, knowing a register value allows us to avoid giving a false DV
9478 warning. Keep track of as many as possible that are useful. */
9479 note_register_values (idesc);
9480
9481 /* We don't need or want this anymore. */
9482 md.mem_offset.hint = 0;
9483
9484 return 0;
9485 }
9486
9487 /* Translate one line of assembly. Pseudo ops and labels do not show
9488 here. */
9489 void
9490 md_assemble (str)
9491 char *str;
9492 {
9493 char *saved_input_line_pointer, *mnemonic;
9494 const struct pseudo_opcode *pdesc;
9495 struct ia64_opcode *idesc;
9496 unsigned char qp_regno;
9497 unsigned int flags;
9498 int ch;
9499
9500 saved_input_line_pointer = input_line_pointer;
9501 input_line_pointer = str;
9502
9503 /* extract the opcode (mnemonic): */
9504
9505 mnemonic = input_line_pointer;
9506 ch = get_symbol_end ();
9507 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
9508 if (pdesc)
9509 {
9510 *input_line_pointer = ch;
9511 (*pdesc->handler) (pdesc->arg);
9512 goto done;
9513 }
9514
9515 /* Find the instruction descriptor matching the arguments. */
9516
9517 idesc = ia64_find_opcode (mnemonic);
9518 *input_line_pointer = ch;
9519 if (!idesc)
9520 {
9521 as_bad ("Unknown opcode `%s'", mnemonic);
9522 goto done;
9523 }
9524
9525 idesc = parse_operands (idesc);
9526 if (!idesc)
9527 goto done;
9528
9529 /* Handle the dynamic ops we can handle now: */
9530 if (idesc->type == IA64_TYPE_DYN)
9531 {
9532 if (strcmp (idesc->name, "add") == 0)
9533 {
9534 if (CURR_SLOT.opnd[2].X_op == O_register
9535 && CURR_SLOT.opnd[2].X_add_number < 4)
9536 mnemonic = "addl";
9537 else
9538 mnemonic = "adds";
9539 ia64_free_opcode (idesc);
9540 idesc = ia64_find_opcode (mnemonic);
9541 #if 0
9542 know (!idesc->next);
9543 #endif
9544 }
9545 else if (strcmp (idesc->name, "mov") == 0)
9546 {
9547 enum ia64_opnd opnd1, opnd2;
9548 int rop;
9549
9550 opnd1 = idesc->operands[0];
9551 opnd2 = idesc->operands[1];
9552 if (opnd1 == IA64_OPND_AR3)
9553 rop = 0;
9554 else if (opnd2 == IA64_OPND_AR3)
9555 rop = 1;
9556 else
9557 abort ();
9558 if (CURR_SLOT.opnd[rop].X_op == O_register
9559 && ar_is_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
9560 mnemonic = "mov.i";
9561 else
9562 mnemonic = "mov.m";
9563 ia64_free_opcode (idesc);
9564 idesc = ia64_find_opcode (mnemonic);
9565 while (idesc != NULL
9566 && (idesc->operands[0] != opnd1
9567 || idesc->operands[1] != opnd2))
9568 idesc = get_next_opcode (idesc);
9569 }
9570 }
9571
9572 qp_regno = 0;
9573 if (md.qp.X_op == O_register)
9574 {
9575 qp_regno = md.qp.X_add_number - REG_P;
9576 md.qp.X_op = O_absent;
9577 }
9578
9579 flags = idesc->flags;
9580
9581 if ((flags & IA64_OPCODE_FIRST) != 0)
9582 insn_group_break (1, 0, 0);
9583
9584 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
9585 {
9586 as_bad ("`%s' cannot be predicated", idesc->name);
9587 goto done;
9588 }
9589
9590 /* Build the instruction. */
9591 CURR_SLOT.qp_regno = qp_regno;
9592 CURR_SLOT.idesc = idesc;
9593 as_where (&CURR_SLOT.src_file, &CURR_SLOT.src_line);
9594 dwarf2_where (&CURR_SLOT.debug_line);
9595
9596 /* Add unwind entry, if there is one. */
9597 if (unwind.current_entry)
9598 {
9599 CURR_SLOT.unwind_record = unwind.current_entry;
9600 unwind.current_entry = NULL;
9601 }
9602
9603 /* Check for dependency violations. */
9604 if (md.detect_dv)
9605 check_dv (idesc);
9606
9607 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
9608 if (++md.num_slots_in_use >= NUM_SLOTS)
9609 emit_one_bundle ();
9610
9611 if ((flags & IA64_OPCODE_LAST) != 0)
9612 insn_group_break (1, 0, 0);
9613
9614 md.last_text_seg = now_seg;
9615
9616 done:
9617 input_line_pointer = saved_input_line_pointer;
9618 }
9619
9620 /* Called when symbol NAME cannot be found in the symbol table.
9621 Should be used for dynamic valued symbols only. */
9622
9623 symbolS *
9624 md_undefined_symbol (name)
9625 char *name ATTRIBUTE_UNUSED;
9626 {
9627 return 0;
9628 }
9629
9630 /* Called for any expression that can not be recognized. When the
9631 function is called, `input_line_pointer' will point to the start of
9632 the expression. */
9633
9634 void
9635 md_operand (e)
9636 expressionS *e;
9637 {
9638 enum pseudo_type pseudo_type;
9639 const char *name;
9640 size_t len;
9641 int ch, i;
9642
9643 switch (*input_line_pointer)
9644 {
9645 case '@':
9646 /* Find what relocation pseudo-function we're dealing with. */
9647 pseudo_type = 0;
9648 ch = *++input_line_pointer;
9649 for (i = 0; i < NELEMS (pseudo_func); ++i)
9650 if (pseudo_func[i].name && pseudo_func[i].name[0] == ch)
9651 {
9652 len = strlen (pseudo_func[i].name);
9653 if (strncmp (pseudo_func[i].name + 1,
9654 input_line_pointer + 1, len - 1) == 0
9655 && !is_part_of_name (input_line_pointer[len]))
9656 {
9657 input_line_pointer += len;
9658 pseudo_type = pseudo_func[i].type;
9659 break;
9660 }
9661 }
9662 switch (pseudo_type)
9663 {
9664 case PSEUDO_FUNC_RELOC:
9665 SKIP_WHITESPACE ();
9666 if (*input_line_pointer != '(')
9667 {
9668 as_bad ("Expected '('");
9669 goto err;
9670 }
9671 /* Skip '('. */
9672 ++input_line_pointer;
9673 expression (e);
9674 if (*input_line_pointer++ != ')')
9675 {
9676 as_bad ("Missing ')'");
9677 goto err;
9678 }
9679 if (e->X_op != O_symbol)
9680 {
9681 if (e->X_op != O_pseudo_fixup)
9682 {
9683 as_bad ("Not a symbolic expression");
9684 goto err;
9685 }
9686 if (S_GET_VALUE (e->X_op_symbol) == FUNC_FPTR_RELATIVE
9687 && i == FUNC_LT_RELATIVE)
9688 i = FUNC_LT_FPTR_RELATIVE;
9689 else
9690 {
9691 as_bad ("Illegal combination of relocation functions");
9692 goto err;
9693 }
9694 }
9695 /* Make sure gas doesn't get rid of local symbols that are used
9696 in relocs. */
9697 e->X_op = O_pseudo_fixup;
9698 e->X_op_symbol = pseudo_func[i].u.sym;
9699 break;
9700
9701 case PSEUDO_FUNC_CONST:
9702 e->X_op = O_constant;
9703 e->X_add_number = pseudo_func[i].u.ival;
9704 break;
9705
9706 case PSEUDO_FUNC_REG:
9707 e->X_op = O_register;
9708 e->X_add_number = pseudo_func[i].u.ival;
9709 break;
9710
9711 default:
9712 name = input_line_pointer - 1;
9713 get_symbol_end ();
9714 as_bad ("Unknown pseudo function `%s'", name);
9715 goto err;
9716 }
9717 break;
9718
9719 case '[':
9720 ++input_line_pointer;
9721 expression (e);
9722 if (*input_line_pointer != ']')
9723 {
9724 as_bad ("Closing bracket misssing");
9725 goto err;
9726 }
9727 else
9728 {
9729 if (e->X_op != O_register)
9730 as_bad ("Register expected as index");
9731
9732 ++input_line_pointer;
9733 e->X_op = O_index;
9734 }
9735 break;
9736
9737 default:
9738 break;
9739 }
9740 return;
9741
9742 err:
9743 ignore_rest_of_line ();
9744 }
9745
9746 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
9747 a section symbol plus some offset. For relocs involving @fptr(),
9748 directives we don't want such adjustments since we need to have the
9749 original symbol's name in the reloc. */
9750 int
9751 ia64_fix_adjustable (fix)
9752 fixS *fix;
9753 {
9754 /* Prevent all adjustments to global symbols */
9755 if (S_IS_EXTERN (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
9756 return 0;
9757
9758 switch (fix->fx_r_type)
9759 {
9760 case BFD_RELOC_IA64_FPTR64I:
9761 case BFD_RELOC_IA64_FPTR32MSB:
9762 case BFD_RELOC_IA64_FPTR32LSB:
9763 case BFD_RELOC_IA64_FPTR64MSB:
9764 case BFD_RELOC_IA64_FPTR64LSB:
9765 case BFD_RELOC_IA64_LTOFF_FPTR22:
9766 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9767 return 0;
9768 default:
9769 break;
9770 }
9771
9772 return 1;
9773 }
9774
9775 int
9776 ia64_force_relocation (fix)
9777 fixS *fix;
9778 {
9779 switch (fix->fx_r_type)
9780 {
9781 case BFD_RELOC_IA64_FPTR64I:
9782 case BFD_RELOC_IA64_FPTR32MSB:
9783 case BFD_RELOC_IA64_FPTR32LSB:
9784 case BFD_RELOC_IA64_FPTR64MSB:
9785 case BFD_RELOC_IA64_FPTR64LSB:
9786
9787 case BFD_RELOC_IA64_LTOFF22:
9788 case BFD_RELOC_IA64_LTOFF64I:
9789 case BFD_RELOC_IA64_LTOFF_FPTR22:
9790 case BFD_RELOC_IA64_LTOFF_FPTR64I:
9791 case BFD_RELOC_IA64_PLTOFF22:
9792 case BFD_RELOC_IA64_PLTOFF64I:
9793 case BFD_RELOC_IA64_PLTOFF64MSB:
9794 case BFD_RELOC_IA64_PLTOFF64LSB:
9795 return 1;
9796
9797 default:
9798 return 0;
9799 }
9800 return 0;
9801 }
9802
9803 /* Decide from what point a pc-relative relocation is relative to,
9804 relative to the pc-relative fixup. Er, relatively speaking. */
9805 long
9806 ia64_pcrel_from_section (fix, sec)
9807 fixS *fix;
9808 segT sec;
9809 {
9810 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
9811
9812 if (bfd_get_section_flags (stdoutput, sec) & SEC_CODE)
9813 off &= ~0xfUL;
9814
9815 return off;
9816 }
9817
9818 /* This is called whenever some data item (not an instruction) needs a
9819 fixup. We pick the right reloc code depending on the byteorder
9820 currently in effect. */
9821 void
9822 ia64_cons_fix_new (f, where, nbytes, exp)
9823 fragS *f;
9824 int where;
9825 int nbytes;
9826 expressionS *exp;
9827 {
9828 bfd_reloc_code_real_type code;
9829 fixS *fix;
9830
9831 switch (nbytes)
9832 {
9833 /* There are no reloc for 8 and 16 bit quantities, but we allow
9834 them here since they will work fine as long as the expression
9835 is fully defined at the end of the pass over the source file. */
9836 case 1: code = BFD_RELOC_8; break;
9837 case 2: code = BFD_RELOC_16; break;
9838 case 4:
9839 if (target_big_endian)
9840 code = BFD_RELOC_IA64_DIR32MSB;
9841 else
9842 code = BFD_RELOC_IA64_DIR32LSB;
9843 break;
9844
9845 case 8:
9846 if (target_big_endian)
9847 code = BFD_RELOC_IA64_DIR64MSB;
9848 else
9849 code = BFD_RELOC_IA64_DIR64LSB;
9850 break;
9851
9852 case 16:
9853 if (exp->X_op == O_pseudo_fixup
9854 && exp->X_op_symbol
9855 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
9856 {
9857 if (target_big_endian)
9858 code = BFD_RELOC_IA64_IPLTMSB;
9859 else
9860 code = BFD_RELOC_IA64_IPLTLSB;
9861
9862 exp->X_op = O_symbol;
9863 break;
9864 }
9865 /* FALLTHRU */
9866
9867 default:
9868 as_bad ("Unsupported fixup size %d", nbytes);
9869 ignore_rest_of_line ();
9870 return;
9871 }
9872 if (exp->X_op == O_pseudo_fixup)
9873 {
9874 /* ??? */
9875 exp->X_op = O_symbol;
9876 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
9877 }
9878
9879 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
9880 /* We need to store the byte order in effect in case we're going
9881 to fix an 8 or 16 bit relocation (for which there no real
9882 relocs available). See md_apply_fix(). */
9883 fix->tc_fix_data.bigendian = target_big_endian;
9884 }
9885
9886 /* Return the actual relocation we wish to associate with the pseudo
9887 reloc described by SYM and R_TYPE. SYM should be one of the
9888 symbols in the pseudo_func array, or NULL. */
9889
9890 static bfd_reloc_code_real_type
9891 ia64_gen_real_reloc_type (sym, r_type)
9892 struct symbol *sym;
9893 bfd_reloc_code_real_type r_type;
9894 {
9895 bfd_reloc_code_real_type new = 0;
9896
9897 if (sym == NULL)
9898 {
9899 return r_type;
9900 }
9901
9902 switch (S_GET_VALUE (sym))
9903 {
9904 case FUNC_FPTR_RELATIVE:
9905 switch (r_type)
9906 {
9907 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_FPTR64I; break;
9908 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_FPTR32MSB; break;
9909 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_FPTR32LSB; break;
9910 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_FPTR64MSB; break;
9911 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_FPTR64LSB; break;
9912 default: break;
9913 }
9914 break;
9915
9916 case FUNC_GP_RELATIVE:
9917 switch (r_type)
9918 {
9919 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_GPREL22; break;
9920 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_GPREL64I; break;
9921 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_GPREL32MSB; break;
9922 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_GPREL32LSB; break;
9923 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_GPREL64MSB; break;
9924 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_GPREL64LSB; break;
9925 default: break;
9926 }
9927 break;
9928
9929 case FUNC_LT_RELATIVE:
9930 switch (r_type)
9931 {
9932 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_LTOFF22; break;
9933 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_LTOFF64I; break;
9934 default: break;
9935 }
9936 break;
9937
9938 case FUNC_PC_RELATIVE:
9939 switch (r_type)
9940 {
9941 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PCREL22; break;
9942 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PCREL64I; break;
9943 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_PCREL32MSB; break;
9944 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_PCREL32LSB; break;
9945 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PCREL64MSB; break;
9946 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PCREL64LSB; break;
9947 default: break;
9948 }
9949 break;
9950
9951 case FUNC_PLT_RELATIVE:
9952 switch (r_type)
9953 {
9954 case BFD_RELOC_IA64_IMM22: new = BFD_RELOC_IA64_PLTOFF22; break;
9955 case BFD_RELOC_IA64_IMM64: new = BFD_RELOC_IA64_PLTOFF64I; break;
9956 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_PLTOFF64MSB;break;
9957 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_PLTOFF64LSB;break;
9958 default: break;
9959 }
9960 break;
9961
9962 case FUNC_SEC_RELATIVE:
9963 switch (r_type)
9964 {
9965 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SECREL32MSB;break;
9966 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SECREL32LSB;break;
9967 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SECREL64MSB;break;
9968 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SECREL64LSB;break;
9969 default: break;
9970 }
9971 break;
9972
9973 case FUNC_SEG_RELATIVE:
9974 switch (r_type)
9975 {
9976 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_SEGREL32MSB;break;
9977 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_SEGREL32LSB;break;
9978 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_SEGREL64MSB;break;
9979 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_SEGREL64LSB;break;
9980 default: break;
9981 }
9982 break;
9983
9984 case FUNC_LTV_RELATIVE:
9985 switch (r_type)
9986 {
9987 case BFD_RELOC_IA64_DIR32MSB: new = BFD_RELOC_IA64_LTV32MSB; break;
9988 case BFD_RELOC_IA64_DIR32LSB: new = BFD_RELOC_IA64_LTV32LSB; break;
9989 case BFD_RELOC_IA64_DIR64MSB: new = BFD_RELOC_IA64_LTV64MSB; break;
9990 case BFD_RELOC_IA64_DIR64LSB: new = BFD_RELOC_IA64_LTV64LSB; break;
9991 default: break;
9992 }
9993 break;
9994
9995 case FUNC_LT_FPTR_RELATIVE:
9996 switch (r_type)
9997 {
9998 case BFD_RELOC_IA64_IMM22:
9999 new = BFD_RELOC_IA64_LTOFF_FPTR22; break;
10000 case BFD_RELOC_IA64_IMM64:
10001 new = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
10002 default:
10003 break;
10004 }
10005 break;
10006
10007 default:
10008 abort ();
10009 }
10010 /* Hmmmm. Should this ever occur? */
10011 if (new)
10012 return new;
10013 else
10014 return r_type;
10015 }
10016
10017 /* Here is where generate the appropriate reloc for pseudo relocation
10018 functions. */
10019 void
10020 ia64_validate_fix (fix)
10021 fixS *fix;
10022 {
10023 switch (fix->fx_r_type)
10024 {
10025 case BFD_RELOC_IA64_FPTR64I:
10026 case BFD_RELOC_IA64_FPTR32MSB:
10027 case BFD_RELOC_IA64_FPTR64LSB:
10028 case BFD_RELOC_IA64_LTOFF_FPTR22:
10029 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10030 if (fix->fx_offset != 0)
10031 as_bad_where (fix->fx_file, fix->fx_line,
10032 "No addend allowed in @fptr() relocation");
10033 break;
10034 default:
10035 break;
10036 }
10037
10038 return;
10039 }
10040
10041 static void
10042 fix_insn (fix, odesc, value)
10043 fixS *fix;
10044 const struct ia64_operand *odesc;
10045 valueT value;
10046 {
10047 bfd_vma insn[3], t0, t1, control_bits;
10048 const char *err;
10049 char *fixpos;
10050 long slot;
10051
10052 slot = fix->fx_where & 0x3;
10053 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
10054
10055 /* Bundles are always in little-endian byte order */
10056 t0 = bfd_getl64 (fixpos);
10057 t1 = bfd_getl64 (fixpos + 8);
10058 control_bits = t0 & 0x1f;
10059 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
10060 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
10061 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
10062
10063 err = NULL;
10064 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
10065 {
10066 insn[1] = (value >> 22) & 0x1ffffffffffLL;
10067 insn[2] |= (((value & 0x7f) << 13)
10068 | (((value >> 7) & 0x1ff) << 27)
10069 | (((value >> 16) & 0x1f) << 22)
10070 | (((value >> 21) & 0x1) << 21)
10071 | (((value >> 63) & 0x1) << 36));
10072 }
10073 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
10074 {
10075 if (value & ~0x3fffffffffffffffULL)
10076 err = "integer operand out of range";
10077 insn[1] = (value >> 21) & 0x1ffffffffffLL;
10078 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
10079 }
10080 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
10081 {
10082 value >>= 4;
10083 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
10084 insn[2] |= ((((value >> 59) & 0x1) << 36)
10085 | (((value >> 0) & 0xfffff) << 13));
10086 }
10087 else
10088 err = (*odesc->insert) (odesc, value, insn + slot);
10089
10090 if (err)
10091 as_bad_where (fix->fx_file, fix->fx_line, err);
10092
10093 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
10094 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
10095 number_to_chars_littleendian (fixpos + 0, t0, 8);
10096 number_to_chars_littleendian (fixpos + 8, t1, 8);
10097 }
10098
10099 /* Attempt to simplify or even eliminate a fixup. The return value is
10100 ignored; perhaps it was once meaningful, but now it is historical.
10101 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
10102
10103 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
10104 (if possible). */
10105 int
10106 md_apply_fix3 (fix, valuep, seg)
10107 fixS *fix;
10108 valueT *valuep;
10109 segT seg ATTRIBUTE_UNUSED;
10110 {
10111 char *fixpos;
10112 valueT value = *valuep;
10113 int adjust = 0;
10114
10115 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
10116
10117 if (fix->fx_pcrel)
10118 {
10119 switch (fix->fx_r_type)
10120 {
10121 case BFD_RELOC_IA64_DIR32MSB:
10122 fix->fx_r_type = BFD_RELOC_IA64_PCREL32MSB;
10123 adjust = 1;
10124 break;
10125
10126 case BFD_RELOC_IA64_DIR32LSB:
10127 fix->fx_r_type = BFD_RELOC_IA64_PCREL32LSB;
10128 adjust = 1;
10129 break;
10130
10131 case BFD_RELOC_IA64_DIR64MSB:
10132 fix->fx_r_type = BFD_RELOC_IA64_PCREL64MSB;
10133 adjust = 1;
10134 break;
10135
10136 case BFD_RELOC_IA64_DIR64LSB:
10137 fix->fx_r_type = BFD_RELOC_IA64_PCREL64LSB;
10138 adjust = 1;
10139 break;
10140
10141 default:
10142 break;
10143 }
10144 }
10145 if (fix->fx_addsy)
10146 {
10147 if (fix->fx_r_type == (int) BFD_RELOC_UNUSED)
10148 {
10149 /* This must be a TAG13 or TAG13b operand. There are no external
10150 relocs defined for them, so we must give an error. */
10151 as_bad_where (fix->fx_file, fix->fx_line,
10152 "%s must have a constant value",
10153 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
10154 fix->fx_done = 1;
10155 return 1;
10156 }
10157
10158 /* ??? This is a hack copied from tc-i386.c to make PCREL relocs
10159 work. There should be a better way to handle this. */
10160 if (adjust)
10161 fix->fx_offset += fix->fx_where + fix->fx_frag->fr_address;
10162 }
10163 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
10164 {
10165 if (fix->tc_fix_data.bigendian)
10166 number_to_chars_bigendian (fixpos, value, fix->fx_size);
10167 else
10168 number_to_chars_littleendian (fixpos, value, fix->fx_size);
10169 fix->fx_done = 1;
10170 return 1;
10171 }
10172 else
10173 {
10174 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
10175 fix->fx_done = 1;
10176 return 1;
10177 }
10178 return 1;
10179 }
10180
10181 /* Generate the BFD reloc to be stuck in the object file from the
10182 fixup used internally in the assembler. */
10183
10184 arelent *
10185 tc_gen_reloc (sec, fixp)
10186 asection *sec ATTRIBUTE_UNUSED;
10187 fixS *fixp;
10188 {
10189 arelent *reloc;
10190
10191 reloc = xmalloc (sizeof (*reloc));
10192 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10193 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10194 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
10195 reloc->addend = fixp->fx_offset;
10196 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
10197
10198 if (!reloc->howto)
10199 {
10200 as_bad_where (fixp->fx_file, fixp->fx_line,
10201 "Cannot represent %s relocation in object file",
10202 bfd_get_reloc_code_name (fixp->fx_r_type));
10203 }
10204 return reloc;
10205 }
10206
10207 /* Turn a string in input_line_pointer into a floating point constant
10208 of type TYPE, and store the appropriate bytes in *LIT. The number
10209 of LITTLENUMS emitted is stored in *SIZE. An error message is
10210 returned, or NULL on OK. */
10211
10212 #define MAX_LITTLENUMS 5
10213
10214 char *
10215 md_atof (type, lit, size)
10216 int type;
10217 char *lit;
10218 int *size;
10219 {
10220 LITTLENUM_TYPE words[MAX_LITTLENUMS];
10221 LITTLENUM_TYPE *word;
10222 char *t;
10223 int prec;
10224
10225 switch (type)
10226 {
10227 /* IEEE floats */
10228 case 'f':
10229 case 'F':
10230 case 's':
10231 case 'S':
10232 prec = 2;
10233 break;
10234
10235 case 'd':
10236 case 'D':
10237 case 'r':
10238 case 'R':
10239 prec = 4;
10240 break;
10241
10242 case 'x':
10243 case 'X':
10244 case 'p':
10245 case 'P':
10246 prec = 5;
10247 break;
10248
10249 default:
10250 *size = 0;
10251 return "Bad call to MD_ATOF()";
10252 }
10253 t = atof_ieee (input_line_pointer, type, words);
10254 if (t)
10255 input_line_pointer = t;
10256 *size = prec * sizeof (LITTLENUM_TYPE);
10257
10258 for (word = words + prec - 1; prec--;)
10259 {
10260 md_number_to_chars (lit, (long) (*word--), sizeof (LITTLENUM_TYPE));
10261 lit += sizeof (LITTLENUM_TYPE);
10262 }
10263 return 0;
10264 }
10265
10266 /* Round up a section's size to the appropriate boundary. */
10267 valueT
10268 md_section_align (seg, size)
10269 segT seg;
10270 valueT size;
10271 {
10272 int align = bfd_get_section_alignment (stdoutput, seg);
10273 valueT mask = ((valueT) 1 << align) - 1;
10274
10275 return (size + mask) & ~mask;
10276 }
10277
10278 /* Handle ia64 specific semantics of the align directive. */
10279
10280 void
10281 ia64_md_do_align (n, fill, len, max)
10282 int n ATTRIBUTE_UNUSED;
10283 const char *fill ATTRIBUTE_UNUSED;
10284 int len ATTRIBUTE_UNUSED;
10285 int max ATTRIBUTE_UNUSED;
10286 {
10287 if (subseg_text_p (now_seg))
10288 ia64_flush_insns ();
10289 }
10290
10291 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
10292 of an rs_align_code fragment. */
10293
10294 void
10295 ia64_handle_align (fragp)
10296 fragS *fragp;
10297 {
10298 /* Use mfi bundle of nops with no stop bits. */
10299 static const unsigned char be_nop[]
10300 = { 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
10301 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c};
10302 static const unsigned char le_nop[]
10303 = { 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
10304 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00};
10305
10306 int bytes;
10307 char *p;
10308
10309 if (fragp->fr_type != rs_align_code)
10310 return;
10311
10312 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
10313 p = fragp->fr_literal + fragp->fr_fix;
10314
10315 /* Make sure we are on a 16-byte boundary, in case someone has been
10316 putting data into a text section. */
10317 if (bytes & 15)
10318 {
10319 int fix = bytes & 15;
10320 memset (p, 0, fix);
10321 p += fix;
10322 bytes -= fix;
10323 fragp->fr_fix += fix;
10324 }
10325
10326 memcpy (p, (target_big_endian ? be_nop : le_nop), 16);
10327 fragp->fr_var = 16;
10328 }
This page took 0.25123 seconds and 4 git commands to generate.