gas signed overflow fixes
[deliverable/binutils-gdb.git] / gas / config / tc-ia64.c
1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54
55 #ifdef HAVE_LIMITS_H
56 #include <limits.h>
57 #endif
58
59 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
60
61 /* Some systems define MIN in, e.g., param.h. */
62 #undef MIN
63 #define MIN(a,b) ((a) < (b) ? (a) : (b))
64
65 #define NUM_SLOTS 4
66 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
67 #define CURR_SLOT md.slot[md.curr_slot]
68
69 #define O_pseudo_fixup (O_max + 1)
70
71 enum special_section
72 {
73 /* IA-64 ABI section pseudo-ops. */
74 SPECIAL_SECTION_BSS = 0,
75 SPECIAL_SECTION_SBSS,
76 SPECIAL_SECTION_SDATA,
77 SPECIAL_SECTION_RODATA,
78 SPECIAL_SECTION_COMMENT,
79 SPECIAL_SECTION_UNWIND,
80 SPECIAL_SECTION_UNWIND_INFO,
81 /* HPUX specific section pseudo-ops. */
82 SPECIAL_SECTION_INIT_ARRAY,
83 SPECIAL_SECTION_FINI_ARRAY,
84 };
85
86 enum reloc_func
87 {
88 FUNC_DTP_MODULE,
89 FUNC_DTP_RELATIVE,
90 FUNC_FPTR_RELATIVE,
91 FUNC_GP_RELATIVE,
92 FUNC_LT_RELATIVE,
93 FUNC_LT_RELATIVE_X,
94 FUNC_PC_RELATIVE,
95 FUNC_PLT_RELATIVE,
96 FUNC_SEC_RELATIVE,
97 FUNC_SEG_RELATIVE,
98 FUNC_TP_RELATIVE,
99 FUNC_LTV_RELATIVE,
100 FUNC_LT_FPTR_RELATIVE,
101 FUNC_LT_DTP_MODULE,
102 FUNC_LT_DTP_RELATIVE,
103 FUNC_LT_TP_RELATIVE,
104 FUNC_IPLT_RELOC,
105 #ifdef TE_VMS
106 FUNC_SLOTCOUNT_RELOC,
107 #endif
108 };
109
110 enum reg_symbol
111 {
112 REG_GR = 0,
113 REG_FR = (REG_GR + 128),
114 REG_AR = (REG_FR + 128),
115 REG_CR = (REG_AR + 128),
116 REG_DAHR = (REG_CR + 128),
117 REG_P = (REG_DAHR + 8),
118 REG_BR = (REG_P + 64),
119 REG_IP = (REG_BR + 8),
120 REG_CFM,
121 REG_PR,
122 REG_PR_ROT,
123 REG_PSR,
124 REG_PSR_L,
125 REG_PSR_UM,
126 /* The following are pseudo-registers for use by gas only. */
127 IND_CPUID,
128 IND_DBR,
129 IND_DTR,
130 IND_ITR,
131 IND_IBR,
132 IND_MSR,
133 IND_PKR,
134 IND_PMC,
135 IND_PMD,
136 IND_DAHR,
137 IND_RR,
138 /* The following pseudo-registers are used for unwind directives only: */
139 REG_PSP,
140 REG_PRIUNAT,
141 REG_NUM
142 };
143
144 enum dynreg_type
145 {
146 DYNREG_GR = 0, /* dynamic general purpose register */
147 DYNREG_FR, /* dynamic floating point register */
148 DYNREG_PR, /* dynamic predicate register */
149 DYNREG_NUM_TYPES
150 };
151
152 enum operand_match_result
153 {
154 OPERAND_MATCH,
155 OPERAND_OUT_OF_RANGE,
156 OPERAND_MISMATCH
157 };
158
159 /* On the ia64, we can't know the address of a text label until the
160 instructions are packed into a bundle. To handle this, we keep
161 track of the list of labels that appear in front of each
162 instruction. */
163 struct label_fix
164 {
165 struct label_fix *next;
166 struct symbol *sym;
167 bfd_boolean dw2_mark_labels;
168 };
169
170 #ifdef TE_VMS
171 /* An internally used relocation. */
172 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
173 #endif
174
175 /* This is the endianness of the current section. */
176 extern int target_big_endian;
177
178 /* This is the default endianness. */
179 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
180
181 void (*ia64_number_to_chars) (char *, valueT, int);
182
183 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
184 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
185
186 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
187
188 static struct hash_control *alias_hash;
189 static struct hash_control *alias_name_hash;
190 static struct hash_control *secalias_hash;
191 static struct hash_control *secalias_name_hash;
192
193 /* List of chars besides those in app.c:symbol_chars that can start an
194 operand. Used to prevent the scrubber eating vital white-space. */
195 const char ia64_symbol_chars[] = "@?";
196
197 /* Characters which always start a comment. */
198 const char comment_chars[] = "";
199
200 /* Characters which start a comment at the beginning of a line. */
201 const char line_comment_chars[] = "#";
202
203 /* Characters which may be used to separate multiple commands on a
204 single line. */
205 const char line_separator_chars[] = ";{}";
206
207 /* Characters which are used to indicate an exponent in a floating
208 point number. */
209 const char EXP_CHARS[] = "eE";
210
211 /* Characters which mean that a number is a floating point constant,
212 as in 0d1.0. */
213 const char FLT_CHARS[] = "rRsSfFdDxXpP";
214
215 /* ia64-specific option processing: */
216
217 const char *md_shortopts = "m:N:x::";
218
219 struct option md_longopts[] =
220 {
221 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
222 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
223 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
224 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
225 };
226
227 size_t md_longopts_size = sizeof (md_longopts);
228
229 static struct
230 {
231 struct hash_control *pseudo_hash; /* pseudo opcode hash table */
232 struct hash_control *reg_hash; /* register name hash table */
233 struct hash_control *dynreg_hash; /* dynamic register hash table */
234 struct hash_control *const_hash; /* constant hash table */
235 struct hash_control *entry_hash; /* code entry hint hash table */
236
237 /* If X_op is != O_absent, the register name for the instruction's
238 qualifying predicate. If NULL, p0 is assumed for instructions
239 that are predictable. */
240 expressionS qp;
241
242 /* Optimize for which CPU. */
243 enum
244 {
245 itanium1,
246 itanium2
247 } tune;
248
249 /* What to do when hint.b is used. */
250 enum
251 {
252 hint_b_error,
253 hint_b_warning,
254 hint_b_ok
255 } hint_b;
256
257 unsigned int
258 manual_bundling : 1,
259 debug_dv: 1,
260 detect_dv: 1,
261 explicit_mode : 1, /* which mode we're in */
262 default_explicit_mode : 1, /* which mode is the default */
263 mode_explicitly_set : 1, /* was the current mode explicitly set? */
264 auto_align : 1,
265 keep_pending_output : 1;
266
267 /* What to do when something is wrong with unwind directives. */
268 enum
269 {
270 unwind_check_warning,
271 unwind_check_error
272 } unwind_check;
273
274 /* Each bundle consists of up to three instructions. We keep
275 track of four most recent instructions so we can correctly set
276 the end_of_insn_group for the last instruction in a bundle. */
277 int curr_slot;
278 int num_slots_in_use;
279 struct slot
280 {
281 unsigned int
282 end_of_insn_group : 1,
283 manual_bundling_on : 1,
284 manual_bundling_off : 1,
285 loc_directive_seen : 1;
286 signed char user_template; /* user-selected template, if any */
287 unsigned char qp_regno; /* qualifying predicate */
288 /* This duplicates a good fraction of "struct fix" but we
289 can't use a "struct fix" instead since we can't call
290 fix_new_exp() until we know the address of the instruction. */
291 int num_fixups;
292 struct insn_fix
293 {
294 bfd_reloc_code_real_type code;
295 enum ia64_opnd opnd; /* type of operand in need of fix */
296 unsigned int is_pcrel : 1; /* is operand pc-relative? */
297 expressionS expr; /* the value to be inserted */
298 }
299 fixup[2]; /* at most two fixups per insn */
300 struct ia64_opcode *idesc;
301 struct label_fix *label_fixups;
302 struct label_fix *tag_fixups;
303 struct unw_rec_list *unwind_record; /* Unwind directive. */
304 expressionS opnd[6];
305 const char *src_file;
306 unsigned int src_line;
307 struct dwarf2_line_info debug_line;
308 }
309 slot[NUM_SLOTS];
310
311 segT last_text_seg;
312
313 struct dynreg
314 {
315 struct dynreg *next; /* next dynamic register */
316 const char *name;
317 unsigned short base; /* the base register number */
318 unsigned short num_regs; /* # of registers in this set */
319 }
320 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
321
322 flagword flags; /* ELF-header flags */
323
324 struct mem_offset {
325 unsigned hint:1; /* is this hint currently valid? */
326 bfd_vma offset; /* mem.offset offset */
327 bfd_vma base; /* mem.offset base */
328 } mem_offset;
329
330 int path; /* number of alt. entry points seen */
331 const char **entry_labels; /* labels of all alternate paths in
332 the current DV-checking block. */
333 int maxpaths; /* size currently allocated for
334 entry_labels */
335
336 int pointer_size; /* size in bytes of a pointer */
337 int pointer_size_shift; /* shift size of a pointer for alignment */
338
339 symbolS *indregsym[IND_RR - IND_CPUID + 1];
340 }
341 md;
342
343 /* These are not const, because they are modified to MMI for non-itanium1
344 targets below. */
345 /* MFI bundle of nops. */
346 static unsigned char le_nop[16] =
347 {
348 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
349 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
350 };
351 /* MFI bundle of nops with stop-bit. */
352 static unsigned char le_nop_stop[16] =
353 {
354 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
355 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
356 };
357
358 /* application registers: */
359
360 #define AR_K0 0
361 #define AR_K7 7
362 #define AR_RSC 16
363 #define AR_BSP 17
364 #define AR_BSPSTORE 18
365 #define AR_RNAT 19
366 #define AR_FCR 21
367 #define AR_EFLAG 24
368 #define AR_CSD 25
369 #define AR_SSD 26
370 #define AR_CFLG 27
371 #define AR_FSR 28
372 #define AR_FIR 29
373 #define AR_FDR 30
374 #define AR_CCV 32
375 #define AR_UNAT 36
376 #define AR_FPSR 40
377 #define AR_ITC 44
378 #define AR_RUC 45
379 #define AR_PFS 64
380 #define AR_LC 65
381 #define AR_EC 66
382
383 static const struct
384 {
385 const char *name;
386 unsigned int regnum;
387 }
388 ar[] =
389 {
390 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
391 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
392 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
393 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
394 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
395 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
396 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
397 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
398 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
399 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
400 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
401 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
402 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
403 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
404 };
405
406 /* control registers: */
407
408 #define CR_DCR 0
409 #define CR_ITM 1
410 #define CR_IVA 2
411 #define CR_PTA 8
412 #define CR_GPTA 9
413 #define CR_IPSR 16
414 #define CR_ISR 17
415 #define CR_IIP 19
416 #define CR_IFA 20
417 #define CR_ITIR 21
418 #define CR_IIPA 22
419 #define CR_IFS 23
420 #define CR_IIM 24
421 #define CR_IHA 25
422 #define CR_IIB0 26
423 #define CR_IIB1 27
424 #define CR_LID 64
425 #define CR_IVR 65
426 #define CR_TPR 66
427 #define CR_EOI 67
428 #define CR_IRR0 68
429 #define CR_IRR3 71
430 #define CR_ITV 72
431 #define CR_PMV 73
432 #define CR_CMCV 74
433 #define CR_LRR0 80
434 #define CR_LRR1 81
435
436 static const struct
437 {
438 const char *name;
439 unsigned int regnum;
440 }
441 cr[] =
442 {
443 {"cr.dcr", CR_DCR},
444 {"cr.itm", CR_ITM},
445 {"cr.iva", CR_IVA},
446 {"cr.pta", CR_PTA},
447 {"cr.gpta", CR_GPTA},
448 {"cr.ipsr", CR_IPSR},
449 {"cr.isr", CR_ISR},
450 {"cr.iip", CR_IIP},
451 {"cr.ifa", CR_IFA},
452 {"cr.itir", CR_ITIR},
453 {"cr.iipa", CR_IIPA},
454 {"cr.ifs", CR_IFS},
455 {"cr.iim", CR_IIM},
456 {"cr.iha", CR_IHA},
457 {"cr.iib0", CR_IIB0},
458 {"cr.iib1", CR_IIB1},
459 {"cr.lid", CR_LID},
460 {"cr.ivr", CR_IVR},
461 {"cr.tpr", CR_TPR},
462 {"cr.eoi", CR_EOI},
463 {"cr.irr0", CR_IRR0},
464 {"cr.irr1", CR_IRR0 + 1},
465 {"cr.irr2", CR_IRR0 + 2},
466 {"cr.irr3", CR_IRR3},
467 {"cr.itv", CR_ITV},
468 {"cr.pmv", CR_PMV},
469 {"cr.cmcv", CR_CMCV},
470 {"cr.lrr0", CR_LRR0},
471 {"cr.lrr1", CR_LRR1}
472 };
473
474 #define PSR_MFL 4
475 #define PSR_IC 13
476 #define PSR_DFL 18
477 #define PSR_CPL 32
478
479 static const struct const_desc
480 {
481 const char *name;
482 valueT value;
483 }
484 const_bits[] =
485 {
486 /* PSR constant masks: */
487
488 /* 0: reserved */
489 {"psr.be", ((valueT) 1) << 1},
490 {"psr.up", ((valueT) 1) << 2},
491 {"psr.ac", ((valueT) 1) << 3},
492 {"psr.mfl", ((valueT) 1) << 4},
493 {"psr.mfh", ((valueT) 1) << 5},
494 /* 6-12: reserved */
495 {"psr.ic", ((valueT) 1) << 13},
496 {"psr.i", ((valueT) 1) << 14},
497 {"psr.pk", ((valueT) 1) << 15},
498 /* 16: reserved */
499 {"psr.dt", ((valueT) 1) << 17},
500 {"psr.dfl", ((valueT) 1) << 18},
501 {"psr.dfh", ((valueT) 1) << 19},
502 {"psr.sp", ((valueT) 1) << 20},
503 {"psr.pp", ((valueT) 1) << 21},
504 {"psr.di", ((valueT) 1) << 22},
505 {"psr.si", ((valueT) 1) << 23},
506 {"psr.db", ((valueT) 1) << 24},
507 {"psr.lp", ((valueT) 1) << 25},
508 {"psr.tb", ((valueT) 1) << 26},
509 {"psr.rt", ((valueT) 1) << 27},
510 /* 28-31: reserved */
511 /* 32-33: cpl (current privilege level) */
512 {"psr.is", ((valueT) 1) << 34},
513 {"psr.mc", ((valueT) 1) << 35},
514 {"psr.it", ((valueT) 1) << 36},
515 {"psr.id", ((valueT) 1) << 37},
516 {"psr.da", ((valueT) 1) << 38},
517 {"psr.dd", ((valueT) 1) << 39},
518 {"psr.ss", ((valueT) 1) << 40},
519 /* 41-42: ri (restart instruction) */
520 {"psr.ed", ((valueT) 1) << 43},
521 {"psr.bn", ((valueT) 1) << 44},
522 };
523
524 /* indirect register-sets/memory: */
525
526 static const struct
527 {
528 const char *name;
529 unsigned int regnum;
530 }
531 indirect_reg[] =
532 {
533 { "CPUID", IND_CPUID },
534 { "cpuid", IND_CPUID },
535 { "dbr", IND_DBR },
536 { "dtr", IND_DTR },
537 { "itr", IND_ITR },
538 { "ibr", IND_IBR },
539 { "msr", IND_MSR },
540 { "pkr", IND_PKR },
541 { "pmc", IND_PMC },
542 { "pmd", IND_PMD },
543 { "dahr", IND_DAHR },
544 { "rr", IND_RR },
545 };
546
547 /* Pseudo functions used to indicate relocation types (these functions
548 start with an at sign (@). */
549 static struct
550 {
551 const char *name;
552 enum pseudo_type
553 {
554 PSEUDO_FUNC_NONE,
555 PSEUDO_FUNC_RELOC,
556 PSEUDO_FUNC_CONST,
557 PSEUDO_FUNC_REG,
558 PSEUDO_FUNC_FLOAT
559 }
560 type;
561 union
562 {
563 unsigned long ival;
564 symbolS *sym;
565 }
566 u;
567 }
568 pseudo_func[] =
569 {
570 /* reloc pseudo functions (these must come first!): */
571 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
572 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
574 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
575 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
576 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
577 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
579 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
581 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
582 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
585 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
586 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
587 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
588 #ifdef TE_VMS
589 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
590 #endif
591
592 /* mbtype4 constants: */
593 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
594 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
595 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
596 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
597 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
598
599 /* fclass constants: */
600 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
601 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
602 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
603 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
604 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
605 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
606 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
607 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
608 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
609
610 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
611
612 /* hint constants: */
613 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
614 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
615
616 /* tf constants: */
617 { "clz", PSEUDO_FUNC_CONST, { 32 } },
618 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
619 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
620
621 /* unwind-related constants: */
622 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
623 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
624 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
625 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
626 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
627 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
628 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
629
630 /* unwind-related registers: */
631 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
632 };
633
634 /* 41-bit nop opcodes (one per unit): */
635 static const bfd_vma nop[IA64_NUM_UNITS] =
636 {
637 0x0000000000LL, /* NIL => break 0 */
638 0x0008000000LL, /* I-unit nop */
639 0x0008000000LL, /* M-unit nop */
640 0x4000000000LL, /* B-unit nop */
641 0x0008000000LL, /* F-unit nop */
642 0x0000000000LL, /* L-"unit" nop immediate */
643 0x0008000000LL, /* X-unit nop */
644 };
645
646 /* Can't be `const' as it's passed to input routines (which have the
647 habit of setting temporary sentinels. */
648 static char special_section_name[][20] =
649 {
650 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
651 {".IA_64.unwind"}, {".IA_64.unwind_info"},
652 {".init_array"}, {".fini_array"}
653 };
654
655 /* The best template for a particular sequence of up to three
656 instructions: */
657 #define N IA64_NUM_TYPES
658 static unsigned char best_template[N][N][N];
659 #undef N
660
661 /* Resource dependencies currently in effect */
662 static struct rsrc {
663 int depind; /* dependency index */
664 const struct ia64_dependency *dependency; /* actual dependency */
665 unsigned specific:1, /* is this a specific bit/regno? */
666 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
667 int index; /* specific regno/bit within dependency */
668 int note; /* optional qualifying note (0 if none) */
669 #define STATE_NONE 0
670 #define STATE_STOP 1
671 #define STATE_SRLZ 2
672 int insn_srlz; /* current insn serialization state */
673 int data_srlz; /* current data serialization state */
674 int qp_regno; /* qualifying predicate for this usage */
675 const char *file; /* what file marked this dependency */
676 unsigned int line; /* what line marked this dependency */
677 struct mem_offset mem_offset; /* optional memory offset hint */
678 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
679 int path; /* corresponding code entry index */
680 } *regdeps = NULL;
681 static int regdepslen = 0;
682 static int regdepstotlen = 0;
683 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
684 static const char *dv_sem[] = { "none", "implied", "impliedf",
685 "data", "instr", "specific", "stop", "other" };
686 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
687
688 /* Current state of PR mutexation */
689 static struct qpmutex {
690 valueT prmask;
691 int path;
692 } *qp_mutexes = NULL; /* QP mutex bitmasks */
693 static int qp_mutexeslen = 0;
694 static int qp_mutexestotlen = 0;
695 static valueT qp_safe_across_calls = 0;
696
697 /* Current state of PR implications */
698 static struct qp_imply {
699 unsigned p1:6;
700 unsigned p2:6;
701 unsigned p2_branched:1;
702 int path;
703 } *qp_implies = NULL;
704 static int qp_implieslen = 0;
705 static int qp_impliestotlen = 0;
706
707 /* Keep track of static GR values so that indirect register usage can
708 sometimes be tracked. */
709 static struct gr {
710 unsigned known:1;
711 int path;
712 valueT value;
713 } gr_values[128] = {
714 {
715 1,
716 #ifdef INT_MAX
717 INT_MAX,
718 #else
719 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
720 #endif
721 0
722 }
723 };
724
725 /* Remember the alignment frag. */
726 static fragS *align_frag;
727
728 /* These are the routines required to output the various types of
729 unwind records. */
730
731 /* A slot_number is a frag address plus the slot index (0-2). We use the
732 frag address here so that if there is a section switch in the middle of
733 a function, then instructions emitted to a different section are not
734 counted. Since there may be more than one frag for a function, this
735 means we also need to keep track of which frag this address belongs to
736 so we can compute inter-frag distances. This also nicely solves the
737 problem with nops emitted for align directives, which can't easily be
738 counted, but can easily be derived from frag sizes. */
739
740 typedef struct unw_rec_list {
741 unwind_record r;
742 unsigned long slot_number;
743 fragS *slot_frag;
744 struct unw_rec_list *next;
745 } unw_rec_list;
746
747 #define SLOT_NUM_NOT_SET (unsigned)-1
748
749 /* Linked list of saved prologue counts. A very poor
750 implementation of a map from label numbers to prologue counts. */
751 typedef struct label_prologue_count
752 {
753 struct label_prologue_count *next;
754 unsigned long label_number;
755 unsigned int prologue_count;
756 } label_prologue_count;
757
758 typedef struct proc_pending
759 {
760 symbolS *sym;
761 struct proc_pending *next;
762 } proc_pending;
763
764 static struct
765 {
766 /* Maintain a list of unwind entries for the current function. */
767 unw_rec_list *list;
768 unw_rec_list *tail;
769
770 /* Any unwind entries that should be attached to the current slot
771 that an insn is being constructed for. */
772 unw_rec_list *current_entry;
773
774 /* These are used to create the unwind table entry for this function. */
775 proc_pending proc_pending;
776 symbolS *info; /* pointer to unwind info */
777 symbolS *personality_routine;
778 segT saved_text_seg;
779 subsegT saved_text_subseg;
780 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
781
782 /* TRUE if processing unwind directives in a prologue region. */
783 unsigned int prologue : 1;
784 unsigned int prologue_mask : 4;
785 unsigned int prologue_gr : 7;
786 unsigned int body : 1;
787 unsigned int insn : 1;
788 unsigned int prologue_count; /* number of .prologues seen so far */
789 /* Prologue counts at previous .label_state directives. */
790 struct label_prologue_count * saved_prologue_counts;
791
792 /* List of split up .save-s. */
793 unw_p_record *pending_saves;
794 } unwind;
795
796 /* The input value is a negated offset from psp, and specifies an address
797 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
798 must add 16 and divide by 4 to get the encoded value. */
799
800 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
801
802 typedef void (*vbyte_func) (int, char *, char *);
803
804 /* Forward declarations: */
805 static void dot_alias (int);
806 static int parse_operand_and_eval (expressionS *, int);
807 static void emit_one_bundle (void);
808 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
809 bfd_reloc_code_real_type);
810 static void insn_group_break (int, int, int);
811 static void add_qp_mutex (valueT);
812 static void add_qp_imply (int, int);
813 static void clear_qp_mutex (valueT);
814 static void clear_qp_implies (valueT, valueT);
815 static void print_dependency (const char *, int);
816 static void instruction_serialization (void);
817 static void data_serialization (void);
818 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
819 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
820 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
821 static void free_saved_prologue_counts (void);
822
823 /* Determine if application register REGNUM resides only in the integer
824 unit (as opposed to the memory unit). */
825 static int
826 ar_is_only_in_integer_unit (int reg)
827 {
828 reg -= REG_AR;
829 return reg >= 64 && reg <= 111;
830 }
831
832 /* Determine if application register REGNUM resides only in the memory
833 unit (as opposed to the integer unit). */
834 static int
835 ar_is_only_in_memory_unit (int reg)
836 {
837 reg -= REG_AR;
838 return reg >= 0 && reg <= 47;
839 }
840
841 /* Switch to section NAME and create section if necessary. It's
842 rather ugly that we have to manipulate input_line_pointer but I
843 don't see any other way to accomplish the same thing without
844 changing obj-elf.c (which may be the Right Thing, in the end). */
845 static void
846 set_section (char *name)
847 {
848 char *saved_input_line_pointer;
849
850 saved_input_line_pointer = input_line_pointer;
851 input_line_pointer = name;
852 obj_elf_section (0);
853 input_line_pointer = saved_input_line_pointer;
854 }
855
856 /* Map 's' to SHF_IA_64_SHORT. */
857
858 bfd_vma
859 ia64_elf_section_letter (int letter, const char **ptr_msg)
860 {
861 if (letter == 's')
862 return SHF_IA_64_SHORT;
863 else if (letter == 'o')
864 return SHF_LINK_ORDER;
865 #ifdef TE_VMS
866 else if (letter == 'O')
867 return SHF_IA_64_VMS_OVERLAID;
868 else if (letter == 'g')
869 return SHF_IA_64_VMS_GLOBAL;
870 #endif
871
872 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
873 return -1;
874 }
875
876 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
877
878 flagword
879 ia64_elf_section_flags (flagword flags,
880 bfd_vma attr,
881 int type ATTRIBUTE_UNUSED)
882 {
883 if (attr & SHF_IA_64_SHORT)
884 flags |= SEC_SMALL_DATA;
885 return flags;
886 }
887
888 int
889 ia64_elf_section_type (const char *str, size_t len)
890 {
891 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
892
893 if (STREQ (ELF_STRING_ia64_unwind_info))
894 return SHT_PROGBITS;
895
896 if (STREQ (ELF_STRING_ia64_unwind_info_once))
897 return SHT_PROGBITS;
898
899 if (STREQ (ELF_STRING_ia64_unwind))
900 return SHT_IA_64_UNWIND;
901
902 if (STREQ (ELF_STRING_ia64_unwind_once))
903 return SHT_IA_64_UNWIND;
904
905 if (STREQ ("unwind"))
906 return SHT_IA_64_UNWIND;
907
908 return -1;
909 #undef STREQ
910 }
911
912 static unsigned int
913 set_regstack (unsigned int ins,
914 unsigned int locs,
915 unsigned int outs,
916 unsigned int rots)
917 {
918 /* Size of frame. */
919 unsigned int sof;
920
921 sof = ins + locs + outs;
922 if (sof > 96)
923 {
924 as_bad (_("Size of frame exceeds maximum of 96 registers"));
925 return 0;
926 }
927 if (rots > sof)
928 {
929 as_warn (_("Size of rotating registers exceeds frame size"));
930 return 0;
931 }
932 md.in.base = REG_GR + 32;
933 md.loc.base = md.in.base + ins;
934 md.out.base = md.loc.base + locs;
935
936 md.in.num_regs = ins;
937 md.loc.num_regs = locs;
938 md.out.num_regs = outs;
939 md.rot.num_regs = rots;
940 return sof;
941 }
942
943 void
944 ia64_flush_insns (void)
945 {
946 struct label_fix *lfix;
947 segT saved_seg;
948 subsegT saved_subseg;
949 unw_rec_list *ptr;
950 bfd_boolean mark;
951
952 if (!md.last_text_seg)
953 return;
954
955 saved_seg = now_seg;
956 saved_subseg = now_subseg;
957
958 subseg_set (md.last_text_seg, 0);
959
960 while (md.num_slots_in_use > 0)
961 emit_one_bundle (); /* force out queued instructions */
962
963 /* In case there are labels following the last instruction, resolve
964 those now. */
965 mark = FALSE;
966 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
967 {
968 symbol_set_value_now (lfix->sym);
969 mark |= lfix->dw2_mark_labels;
970 }
971 if (mark)
972 {
973 dwarf2_where (&CURR_SLOT.debug_line);
974 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
975 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
976 dwarf2_consume_line_info ();
977 }
978 CURR_SLOT.label_fixups = 0;
979
980 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
981 symbol_set_value_now (lfix->sym);
982 CURR_SLOT.tag_fixups = 0;
983
984 /* In case there are unwind directives following the last instruction,
985 resolve those now. We only handle prologue, body, and endp directives
986 here. Give an error for others. */
987 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
988 {
989 switch (ptr->r.type)
990 {
991 case prologue:
992 case prologue_gr:
993 case body:
994 case endp:
995 ptr->slot_number = (unsigned long) frag_more (0);
996 ptr->slot_frag = frag_now;
997 break;
998
999 /* Allow any record which doesn't have a "t" field (i.e.,
1000 doesn't relate to a particular instruction). */
1001 case unwabi:
1002 case br_gr:
1003 case copy_state:
1004 case fr_mem:
1005 case frgr_mem:
1006 case gr_gr:
1007 case gr_mem:
1008 case label_state:
1009 case rp_br:
1010 case spill_base:
1011 case spill_mask:
1012 /* nothing */
1013 break;
1014
1015 default:
1016 as_bad (_("Unwind directive not followed by an instruction."));
1017 break;
1018 }
1019 }
1020 unwind.current_entry = NULL;
1021
1022 subseg_set (saved_seg, saved_subseg);
1023
1024 if (md.qp.X_op == O_register)
1025 as_bad (_("qualifying predicate not followed by instruction"));
1026 }
1027
1028 void
1029 ia64_cons_align (int nbytes)
1030 {
1031 if (md.auto_align)
1032 {
1033 int log;
1034 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1035 log++;
1036
1037 do_align (log, NULL, 0, 0);
1038 }
1039 }
1040
1041 #ifdef TE_VMS
1042
1043 /* .vms_common section, symbol, size, alignment */
1044
1045 static void
1046 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1047 {
1048 const char *sec_name;
1049 char *sym_name;
1050 char c;
1051 offsetT size;
1052 offsetT cur_size;
1053 offsetT temp;
1054 symbolS *symbolP;
1055 segT current_seg = now_seg;
1056 subsegT current_subseg = now_subseg;
1057 offsetT log_align;
1058
1059 /* Section name. */
1060 sec_name = obj_elf_section_name ();
1061 if (sec_name == NULL)
1062 return;
1063
1064 /* Symbol name. */
1065 SKIP_WHITESPACE ();
1066 if (*input_line_pointer == ',')
1067 {
1068 input_line_pointer++;
1069 SKIP_WHITESPACE ();
1070 }
1071 else
1072 {
1073 as_bad (_("expected ',' after section name"));
1074 ignore_rest_of_line ();
1075 return;
1076 }
1077
1078 c = get_symbol_name (&sym_name);
1079
1080 if (input_line_pointer == sym_name)
1081 {
1082 (void) restore_line_pointer (c);
1083 as_bad (_("expected symbol name"));
1084 ignore_rest_of_line ();
1085 return;
1086 }
1087
1088 symbolP = symbol_find_or_make (sym_name);
1089 (void) restore_line_pointer (c);
1090
1091 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1092 && !S_IS_COMMON (symbolP))
1093 {
1094 as_bad (_("Ignoring attempt to re-define symbol"));
1095 ignore_rest_of_line ();
1096 return;
1097 }
1098
1099 /* Symbol size. */
1100 SKIP_WHITESPACE ();
1101 if (*input_line_pointer == ',')
1102 {
1103 input_line_pointer++;
1104 SKIP_WHITESPACE ();
1105 }
1106 else
1107 {
1108 as_bad (_("expected ',' after symbol name"));
1109 ignore_rest_of_line ();
1110 return;
1111 }
1112
1113 temp = get_absolute_expression ();
1114 size = temp;
1115 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1116 if (temp != size)
1117 {
1118 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1119 ignore_rest_of_line ();
1120 return;
1121 }
1122
1123 /* Alignment. */
1124 SKIP_WHITESPACE ();
1125 if (*input_line_pointer == ',')
1126 {
1127 input_line_pointer++;
1128 SKIP_WHITESPACE ();
1129 }
1130 else
1131 {
1132 as_bad (_("expected ',' after symbol size"));
1133 ignore_rest_of_line ();
1134 return;
1135 }
1136
1137 log_align = get_absolute_expression ();
1138
1139 demand_empty_rest_of_line ();
1140
1141 obj_elf_change_section
1142 (sec_name, SHT_NOBITS, 0,
1143 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1144 0, NULL, 1, 0);
1145
1146 S_SET_VALUE (symbolP, 0);
1147 S_SET_SIZE (symbolP, size);
1148 S_SET_EXTERNAL (symbolP);
1149 S_SET_SEGMENT (symbolP, now_seg);
1150
1151 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1152
1153 record_alignment (now_seg, log_align);
1154
1155 cur_size = bfd_section_size (now_seg);
1156 if ((int) size > cur_size)
1157 {
1158 char *pfrag
1159 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1160 (valueT)size - (valueT)cur_size, NULL);
1161 *pfrag = 0;
1162 bfd_set_section_size (now_seg, size);
1163 }
1164
1165 /* Switch back to current segment. */
1166 subseg_set (current_seg, current_subseg);
1167
1168 #ifdef md_elf_section_change_hook
1169 md_elf_section_change_hook ();
1170 #endif
1171 }
1172
1173 #endif /* TE_VMS */
1174
1175 /* Output COUNT bytes to a memory location. */
1176 static char *vbyte_mem_ptr = NULL;
1177
1178 static void
1179 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1180 {
1181 int x;
1182 if (vbyte_mem_ptr == NULL)
1183 abort ();
1184
1185 if (count == 0)
1186 return;
1187 for (x = 0; x < count; x++)
1188 *(vbyte_mem_ptr++) = ptr[x];
1189 }
1190
1191 /* Count the number of bytes required for records. */
1192 static int vbyte_count = 0;
1193 static void
1194 count_output (int count,
1195 char *ptr ATTRIBUTE_UNUSED,
1196 char *comment ATTRIBUTE_UNUSED)
1197 {
1198 vbyte_count += count;
1199 }
1200
1201 static void
1202 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1203 {
1204 int r = 0;
1205 char byte;
1206 if (rlen > 0x1f)
1207 {
1208 output_R3_format (f, rtype, rlen);
1209 return;
1210 }
1211
1212 if (rtype == body)
1213 r = 1;
1214 else if (rtype != prologue)
1215 as_bad (_("record type is not valid"));
1216
1217 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1218 (*f) (1, &byte, NULL);
1219 }
1220
1221 static void
1222 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1223 {
1224 char bytes[20];
1225 int count = 2;
1226 mask = (mask & 0x0f);
1227 grsave = (grsave & 0x7f);
1228
1229 bytes[0] = (UNW_R2 | (mask >> 1));
1230 bytes[1] = (((mask & 0x01) << 7) | grsave);
1231 count += output_leb128 (bytes + 2, rlen, 0);
1232 (*f) (count, bytes, NULL);
1233 }
1234
1235 static void
1236 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1237 {
1238 int r = 0, count;
1239 char bytes[20];
1240 if (rlen <= 0x1f)
1241 {
1242 output_R1_format (f, rtype, rlen);
1243 return;
1244 }
1245
1246 if (rtype == body)
1247 r = 1;
1248 else if (rtype != prologue)
1249 as_bad (_("record type is not valid"));
1250 bytes[0] = (UNW_R3 | r);
1251 count = output_leb128 (bytes + 1, rlen, 0);
1252 (*f) (count + 1, bytes, NULL);
1253 }
1254
1255 static void
1256 output_P1_format (vbyte_func f, int brmask)
1257 {
1258 char byte;
1259 byte = UNW_P1 | (brmask & 0x1f);
1260 (*f) (1, &byte, NULL);
1261 }
1262
1263 static void
1264 output_P2_format (vbyte_func f, int brmask, int gr)
1265 {
1266 char bytes[2];
1267 brmask = (brmask & 0x1f);
1268 bytes[0] = UNW_P2 | (brmask >> 1);
1269 bytes[1] = (((brmask & 1) << 7) | gr);
1270 (*f) (2, bytes, NULL);
1271 }
1272
1273 static void
1274 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1275 {
1276 char bytes[2];
1277 int r = 0;
1278 reg = (reg & 0x7f);
1279 switch (rtype)
1280 {
1281 case psp_gr:
1282 r = 0;
1283 break;
1284 case rp_gr:
1285 r = 1;
1286 break;
1287 case pfs_gr:
1288 r = 2;
1289 break;
1290 case preds_gr:
1291 r = 3;
1292 break;
1293 case unat_gr:
1294 r = 4;
1295 break;
1296 case lc_gr:
1297 r = 5;
1298 break;
1299 case rp_br:
1300 r = 6;
1301 break;
1302 case rnat_gr:
1303 r = 7;
1304 break;
1305 case bsp_gr:
1306 r = 8;
1307 break;
1308 case bspstore_gr:
1309 r = 9;
1310 break;
1311 case fpsr_gr:
1312 r = 10;
1313 break;
1314 case priunat_gr:
1315 r = 11;
1316 break;
1317 default:
1318 as_bad (_("Invalid record type for P3 format."));
1319 }
1320 bytes[0] = (UNW_P3 | (r >> 1));
1321 bytes[1] = (((r & 1) << 7) | reg);
1322 (*f) (2, bytes, NULL);
1323 }
1324
1325 static void
1326 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1327 {
1328 imask[0] = UNW_P4;
1329 (*f) (imask_size, (char *) imask, NULL);
1330 }
1331
1332 static void
1333 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1334 {
1335 char bytes[4];
1336 grmask = (grmask & 0x0f);
1337
1338 bytes[0] = UNW_P5;
1339 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1340 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1341 bytes[3] = (frmask & 0x000000ff);
1342 (*f) (4, bytes, NULL);
1343 }
1344
1345 static void
1346 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1347 {
1348 char byte;
1349 int r = 0;
1350
1351 if (rtype == gr_mem)
1352 r = 1;
1353 else if (rtype != fr_mem)
1354 as_bad (_("Invalid record type for format P6"));
1355 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1356 (*f) (1, &byte, NULL);
1357 }
1358
1359 static void
1360 output_P7_format (vbyte_func f,
1361 unw_record_type rtype,
1362 unsigned long w1,
1363 unsigned long w2)
1364 {
1365 char bytes[20];
1366 int count = 1;
1367 int r = 0;
1368 count += output_leb128 (bytes + 1, w1, 0);
1369 switch (rtype)
1370 {
1371 case mem_stack_f:
1372 r = 0;
1373 count += output_leb128 (bytes + count, w2 >> 4, 0);
1374 break;
1375 case mem_stack_v:
1376 r = 1;
1377 break;
1378 case spill_base:
1379 r = 2;
1380 break;
1381 case psp_sprel:
1382 r = 3;
1383 break;
1384 case rp_when:
1385 r = 4;
1386 break;
1387 case rp_psprel:
1388 r = 5;
1389 break;
1390 case pfs_when:
1391 r = 6;
1392 break;
1393 case pfs_psprel:
1394 r = 7;
1395 break;
1396 case preds_when:
1397 r = 8;
1398 break;
1399 case preds_psprel:
1400 r = 9;
1401 break;
1402 case lc_when:
1403 r = 10;
1404 break;
1405 case lc_psprel:
1406 r = 11;
1407 break;
1408 case unat_when:
1409 r = 12;
1410 break;
1411 case unat_psprel:
1412 r = 13;
1413 break;
1414 case fpsr_when:
1415 r = 14;
1416 break;
1417 case fpsr_psprel:
1418 r = 15;
1419 break;
1420 default:
1421 break;
1422 }
1423 bytes[0] = (UNW_P7 | r);
1424 (*f) (count, bytes, NULL);
1425 }
1426
1427 static void
1428 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1429 {
1430 char bytes[20];
1431 int r = 0;
1432 int count = 2;
1433 bytes[0] = UNW_P8;
1434 switch (rtype)
1435 {
1436 case rp_sprel:
1437 r = 1;
1438 break;
1439 case pfs_sprel:
1440 r = 2;
1441 break;
1442 case preds_sprel:
1443 r = 3;
1444 break;
1445 case lc_sprel:
1446 r = 4;
1447 break;
1448 case unat_sprel:
1449 r = 5;
1450 break;
1451 case fpsr_sprel:
1452 r = 6;
1453 break;
1454 case bsp_when:
1455 r = 7;
1456 break;
1457 case bsp_psprel:
1458 r = 8;
1459 break;
1460 case bsp_sprel:
1461 r = 9;
1462 break;
1463 case bspstore_when:
1464 r = 10;
1465 break;
1466 case bspstore_psprel:
1467 r = 11;
1468 break;
1469 case bspstore_sprel:
1470 r = 12;
1471 break;
1472 case rnat_when:
1473 r = 13;
1474 break;
1475 case rnat_psprel:
1476 r = 14;
1477 break;
1478 case rnat_sprel:
1479 r = 15;
1480 break;
1481 case priunat_when_gr:
1482 r = 16;
1483 break;
1484 case priunat_psprel:
1485 r = 17;
1486 break;
1487 case priunat_sprel:
1488 r = 18;
1489 break;
1490 case priunat_when_mem:
1491 r = 19;
1492 break;
1493 default:
1494 break;
1495 }
1496 bytes[1] = r;
1497 count += output_leb128 (bytes + 2, t, 0);
1498 (*f) (count, bytes, NULL);
1499 }
1500
1501 static void
1502 output_P9_format (vbyte_func f, int grmask, int gr)
1503 {
1504 char bytes[3];
1505 bytes[0] = UNW_P9;
1506 bytes[1] = (grmask & 0x0f);
1507 bytes[2] = (gr & 0x7f);
1508 (*f) (3, bytes, NULL);
1509 }
1510
1511 static void
1512 output_P10_format (vbyte_func f, int abi, int context)
1513 {
1514 char bytes[3];
1515 bytes[0] = UNW_P10;
1516 bytes[1] = (abi & 0xff);
1517 bytes[2] = (context & 0xff);
1518 (*f) (3, bytes, NULL);
1519 }
1520
1521 static void
1522 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1523 {
1524 char byte;
1525 int r = 0;
1526 if (label > 0x1f)
1527 {
1528 output_B4_format (f, rtype, label);
1529 return;
1530 }
1531 if (rtype == copy_state)
1532 r = 1;
1533 else if (rtype != label_state)
1534 as_bad (_("Invalid record type for format B1"));
1535
1536 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1537 (*f) (1, &byte, NULL);
1538 }
1539
1540 static void
1541 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1542 {
1543 char bytes[20];
1544 int count = 1;
1545 if (ecount > 0x1f)
1546 {
1547 output_B3_format (f, ecount, t);
1548 return;
1549 }
1550 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1551 count += output_leb128 (bytes + 1, t, 0);
1552 (*f) (count, bytes, NULL);
1553 }
1554
1555 static void
1556 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1557 {
1558 char bytes[20];
1559 int count = 1;
1560 if (ecount <= 0x1f)
1561 {
1562 output_B2_format (f, ecount, t);
1563 return;
1564 }
1565 bytes[0] = UNW_B3;
1566 count += output_leb128 (bytes + 1, t, 0);
1567 count += output_leb128 (bytes + count, ecount, 0);
1568 (*f) (count, bytes, NULL);
1569 }
1570
1571 static void
1572 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1573 {
1574 char bytes[20];
1575 int r = 0;
1576 int count = 1;
1577 if (label <= 0x1f)
1578 {
1579 output_B1_format (f, rtype, label);
1580 return;
1581 }
1582
1583 if (rtype == copy_state)
1584 r = 1;
1585 else if (rtype != label_state)
1586 as_bad (_("Invalid record type for format B1"));
1587
1588 bytes[0] = (UNW_B4 | (r << 3));
1589 count += output_leb128 (bytes + 1, label, 0);
1590 (*f) (count, bytes, NULL);
1591 }
1592
1593 static char
1594 format_ab_reg (int ab, int reg)
1595 {
1596 int ret;
1597 ab = (ab & 3);
1598 reg = (reg & 0x1f);
1599 ret = (ab << 5) | reg;
1600 return ret;
1601 }
1602
1603 static void
1604 output_X1_format (vbyte_func f,
1605 unw_record_type rtype,
1606 int ab,
1607 int reg,
1608 unsigned long t,
1609 unsigned long w1)
1610 {
1611 char bytes[20];
1612 int r = 0;
1613 int count = 2;
1614 bytes[0] = UNW_X1;
1615
1616 if (rtype == spill_sprel)
1617 r = 1;
1618 else if (rtype != spill_psprel)
1619 as_bad (_("Invalid record type for format X1"));
1620 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1621 count += output_leb128 (bytes + 2, t, 0);
1622 count += output_leb128 (bytes + count, w1, 0);
1623 (*f) (count, bytes, NULL);
1624 }
1625
1626 static void
1627 output_X2_format (vbyte_func f,
1628 int ab,
1629 int reg,
1630 int x,
1631 int y,
1632 int treg,
1633 unsigned long t)
1634 {
1635 char bytes[20];
1636 int count = 3;
1637 bytes[0] = UNW_X2;
1638 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1639 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1640 count += output_leb128 (bytes + 3, t, 0);
1641 (*f) (count, bytes, NULL);
1642 }
1643
1644 static void
1645 output_X3_format (vbyte_func f,
1646 unw_record_type rtype,
1647 int qp,
1648 int ab,
1649 int reg,
1650 unsigned long t,
1651 unsigned long w1)
1652 {
1653 char bytes[20];
1654 int r = 0;
1655 int count = 3;
1656 bytes[0] = UNW_X3;
1657
1658 if (rtype == spill_sprel_p)
1659 r = 1;
1660 else if (rtype != spill_psprel_p)
1661 as_bad (_("Invalid record type for format X3"));
1662 bytes[1] = ((r << 7) | (qp & 0x3f));
1663 bytes[2] = format_ab_reg (ab, reg);
1664 count += output_leb128 (bytes + 3, t, 0);
1665 count += output_leb128 (bytes + count, w1, 0);
1666 (*f) (count, bytes, NULL);
1667 }
1668
1669 static void
1670 output_X4_format (vbyte_func f,
1671 int qp,
1672 int ab,
1673 int reg,
1674 int x,
1675 int y,
1676 int treg,
1677 unsigned long t)
1678 {
1679 char bytes[20];
1680 int count = 4;
1681 bytes[0] = UNW_X4;
1682 bytes[1] = (qp & 0x3f);
1683 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1684 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1685 count += output_leb128 (bytes + 4, t, 0);
1686 (*f) (count, bytes, NULL);
1687 }
1688
1689 /* This function checks whether there are any outstanding .save-s and
1690 discards them if so. */
1691
1692 static void
1693 check_pending_save (void)
1694 {
1695 if (unwind.pending_saves)
1696 {
1697 unw_rec_list *cur, *prev;
1698
1699 as_warn (_("Previous .save incomplete"));
1700 for (cur = unwind.list, prev = NULL; cur; )
1701 if (&cur->r.record.p == unwind.pending_saves)
1702 {
1703 if (prev)
1704 prev->next = cur->next;
1705 else
1706 unwind.list = cur->next;
1707 if (cur == unwind.tail)
1708 unwind.tail = prev;
1709 if (cur == unwind.current_entry)
1710 unwind.current_entry = cur->next;
1711 /* Don't free the first discarded record, it's being used as
1712 terminator for (currently) br_gr and gr_gr processing, and
1713 also prevents leaving a dangling pointer to it in its
1714 predecessor. */
1715 cur->r.record.p.grmask = 0;
1716 cur->r.record.p.brmask = 0;
1717 cur->r.record.p.frmask = 0;
1718 prev = cur->r.record.p.next;
1719 cur->r.record.p.next = NULL;
1720 cur = prev;
1721 break;
1722 }
1723 else
1724 {
1725 prev = cur;
1726 cur = cur->next;
1727 }
1728 while (cur)
1729 {
1730 prev = cur;
1731 cur = cur->r.record.p.next;
1732 free (prev);
1733 }
1734 unwind.pending_saves = NULL;
1735 }
1736 }
1737
1738 /* This function allocates a record list structure, and initializes fields. */
1739
1740 static unw_rec_list *
1741 alloc_record (unw_record_type t)
1742 {
1743 unw_rec_list *ptr;
1744 ptr = XNEW (unw_rec_list);
1745 memset (ptr, 0, sizeof (*ptr));
1746 ptr->slot_number = SLOT_NUM_NOT_SET;
1747 ptr->r.type = t;
1748 return ptr;
1749 }
1750
1751 /* Dummy unwind record used for calculating the length of the last prologue or
1752 body region. */
1753
1754 static unw_rec_list *
1755 output_endp (void)
1756 {
1757 unw_rec_list *ptr = alloc_record (endp);
1758 return ptr;
1759 }
1760
1761 static unw_rec_list *
1762 output_prologue (void)
1763 {
1764 unw_rec_list *ptr = alloc_record (prologue);
1765 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1766 return ptr;
1767 }
1768
1769 static unw_rec_list *
1770 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1771 {
1772 unw_rec_list *ptr = alloc_record (prologue_gr);
1773 memset (&ptr->r.record.r.mask, 0, sizeof (ptr->r.record.r.mask));
1774 ptr->r.record.r.grmask = saved_mask;
1775 ptr->r.record.r.grsave = reg;
1776 return ptr;
1777 }
1778
1779 static unw_rec_list *
1780 output_body (void)
1781 {
1782 unw_rec_list *ptr = alloc_record (body);
1783 return ptr;
1784 }
1785
1786 static unw_rec_list *
1787 output_mem_stack_f (unsigned int size)
1788 {
1789 unw_rec_list *ptr = alloc_record (mem_stack_f);
1790 ptr->r.record.p.size = size;
1791 return ptr;
1792 }
1793
1794 static unw_rec_list *
1795 output_mem_stack_v (void)
1796 {
1797 unw_rec_list *ptr = alloc_record (mem_stack_v);
1798 return ptr;
1799 }
1800
1801 static unw_rec_list *
1802 output_psp_gr (unsigned int gr)
1803 {
1804 unw_rec_list *ptr = alloc_record (psp_gr);
1805 ptr->r.record.p.r.gr = gr;
1806 return ptr;
1807 }
1808
1809 static unw_rec_list *
1810 output_psp_sprel (unsigned int offset)
1811 {
1812 unw_rec_list *ptr = alloc_record (psp_sprel);
1813 ptr->r.record.p.off.sp = offset / 4;
1814 return ptr;
1815 }
1816
1817 static unw_rec_list *
1818 output_rp_when (void)
1819 {
1820 unw_rec_list *ptr = alloc_record (rp_when);
1821 return ptr;
1822 }
1823
1824 static unw_rec_list *
1825 output_rp_gr (unsigned int gr)
1826 {
1827 unw_rec_list *ptr = alloc_record (rp_gr);
1828 ptr->r.record.p.r.gr = gr;
1829 return ptr;
1830 }
1831
1832 static unw_rec_list *
1833 output_rp_br (unsigned int br)
1834 {
1835 unw_rec_list *ptr = alloc_record (rp_br);
1836 ptr->r.record.p.r.br = br;
1837 return ptr;
1838 }
1839
1840 static unw_rec_list *
1841 output_rp_psprel (unsigned int offset)
1842 {
1843 unw_rec_list *ptr = alloc_record (rp_psprel);
1844 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1845 return ptr;
1846 }
1847
1848 static unw_rec_list *
1849 output_rp_sprel (unsigned int offset)
1850 {
1851 unw_rec_list *ptr = alloc_record (rp_sprel);
1852 ptr->r.record.p.off.sp = offset / 4;
1853 return ptr;
1854 }
1855
1856 static unw_rec_list *
1857 output_pfs_when (void)
1858 {
1859 unw_rec_list *ptr = alloc_record (pfs_when);
1860 return ptr;
1861 }
1862
1863 static unw_rec_list *
1864 output_pfs_gr (unsigned int gr)
1865 {
1866 unw_rec_list *ptr = alloc_record (pfs_gr);
1867 ptr->r.record.p.r.gr = gr;
1868 return ptr;
1869 }
1870
1871 static unw_rec_list *
1872 output_pfs_psprel (unsigned int offset)
1873 {
1874 unw_rec_list *ptr = alloc_record (pfs_psprel);
1875 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1876 return ptr;
1877 }
1878
1879 static unw_rec_list *
1880 output_pfs_sprel (unsigned int offset)
1881 {
1882 unw_rec_list *ptr = alloc_record (pfs_sprel);
1883 ptr->r.record.p.off.sp = offset / 4;
1884 return ptr;
1885 }
1886
1887 static unw_rec_list *
1888 output_preds_when (void)
1889 {
1890 unw_rec_list *ptr = alloc_record (preds_when);
1891 return ptr;
1892 }
1893
1894 static unw_rec_list *
1895 output_preds_gr (unsigned int gr)
1896 {
1897 unw_rec_list *ptr = alloc_record (preds_gr);
1898 ptr->r.record.p.r.gr = gr;
1899 return ptr;
1900 }
1901
1902 static unw_rec_list *
1903 output_preds_psprel (unsigned int offset)
1904 {
1905 unw_rec_list *ptr = alloc_record (preds_psprel);
1906 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1907 return ptr;
1908 }
1909
1910 static unw_rec_list *
1911 output_preds_sprel (unsigned int offset)
1912 {
1913 unw_rec_list *ptr = alloc_record (preds_sprel);
1914 ptr->r.record.p.off.sp = offset / 4;
1915 return ptr;
1916 }
1917
1918 static unw_rec_list *
1919 output_fr_mem (unsigned int mask)
1920 {
1921 unw_rec_list *ptr = alloc_record (fr_mem);
1922 unw_rec_list *cur = ptr;
1923
1924 ptr->r.record.p.frmask = mask;
1925 unwind.pending_saves = &ptr->r.record.p;
1926 for (;;)
1927 {
1928 unw_rec_list *prev = cur;
1929
1930 /* Clear least significant set bit. */
1931 mask &= ~(mask & (~mask + 1));
1932 if (!mask)
1933 return ptr;
1934 cur = alloc_record (fr_mem);
1935 cur->r.record.p.frmask = mask;
1936 /* Retain only least significant bit. */
1937 prev->r.record.p.frmask ^= mask;
1938 prev->r.record.p.next = cur;
1939 }
1940 }
1941
1942 static unw_rec_list *
1943 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1944 {
1945 unw_rec_list *ptr = alloc_record (frgr_mem);
1946 unw_rec_list *cur = ptr;
1947
1948 unwind.pending_saves = &cur->r.record.p;
1949 cur->r.record.p.frmask = fr_mask;
1950 while (fr_mask)
1951 {
1952 unw_rec_list *prev = cur;
1953
1954 /* Clear least significant set bit. */
1955 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1956 if (!gr_mask && !fr_mask)
1957 return ptr;
1958 cur = alloc_record (frgr_mem);
1959 cur->r.record.p.frmask = fr_mask;
1960 /* Retain only least significant bit. */
1961 prev->r.record.p.frmask ^= fr_mask;
1962 prev->r.record.p.next = cur;
1963 }
1964 cur->r.record.p.grmask = gr_mask;
1965 for (;;)
1966 {
1967 unw_rec_list *prev = cur;
1968
1969 /* Clear least significant set bit. */
1970 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1971 if (!gr_mask)
1972 return ptr;
1973 cur = alloc_record (frgr_mem);
1974 cur->r.record.p.grmask = gr_mask;
1975 /* Retain only least significant bit. */
1976 prev->r.record.p.grmask ^= gr_mask;
1977 prev->r.record.p.next = cur;
1978 }
1979 }
1980
1981 static unw_rec_list *
1982 output_gr_gr (unsigned int mask, unsigned int reg)
1983 {
1984 unw_rec_list *ptr = alloc_record (gr_gr);
1985 unw_rec_list *cur = ptr;
1986
1987 ptr->r.record.p.grmask = mask;
1988 ptr->r.record.p.r.gr = reg;
1989 unwind.pending_saves = &ptr->r.record.p;
1990 for (;;)
1991 {
1992 unw_rec_list *prev = cur;
1993
1994 /* Clear least significant set bit. */
1995 mask &= ~(mask & (~mask + 1));
1996 if (!mask)
1997 return ptr;
1998 cur = alloc_record (gr_gr);
1999 cur->r.record.p.grmask = mask;
2000 /* Indicate this record shouldn't be output. */
2001 cur->r.record.p.r.gr = REG_NUM;
2002 /* Retain only least significant bit. */
2003 prev->r.record.p.grmask ^= mask;
2004 prev->r.record.p.next = cur;
2005 }
2006 }
2007
2008 static unw_rec_list *
2009 output_gr_mem (unsigned int mask)
2010 {
2011 unw_rec_list *ptr = alloc_record (gr_mem);
2012 unw_rec_list *cur = ptr;
2013
2014 ptr->r.record.p.grmask = mask;
2015 unwind.pending_saves = &ptr->r.record.p;
2016 for (;;)
2017 {
2018 unw_rec_list *prev = cur;
2019
2020 /* Clear least significant set bit. */
2021 mask &= ~(mask & (~mask + 1));
2022 if (!mask)
2023 return ptr;
2024 cur = alloc_record (gr_mem);
2025 cur->r.record.p.grmask = mask;
2026 /* Retain only least significant bit. */
2027 prev->r.record.p.grmask ^= mask;
2028 prev->r.record.p.next = cur;
2029 }
2030 }
2031
2032 static unw_rec_list *
2033 output_br_mem (unsigned int mask)
2034 {
2035 unw_rec_list *ptr = alloc_record (br_mem);
2036 unw_rec_list *cur = ptr;
2037
2038 ptr->r.record.p.brmask = mask;
2039 unwind.pending_saves = &ptr->r.record.p;
2040 for (;;)
2041 {
2042 unw_rec_list *prev = cur;
2043
2044 /* Clear least significant set bit. */
2045 mask &= ~(mask & (~mask + 1));
2046 if (!mask)
2047 return ptr;
2048 cur = alloc_record (br_mem);
2049 cur->r.record.p.brmask = mask;
2050 /* Retain only least significant bit. */
2051 prev->r.record.p.brmask ^= mask;
2052 prev->r.record.p.next = cur;
2053 }
2054 }
2055
2056 static unw_rec_list *
2057 output_br_gr (unsigned int mask, unsigned int reg)
2058 {
2059 unw_rec_list *ptr = alloc_record (br_gr);
2060 unw_rec_list *cur = ptr;
2061
2062 ptr->r.record.p.brmask = mask;
2063 ptr->r.record.p.r.gr = reg;
2064 unwind.pending_saves = &ptr->r.record.p;
2065 for (;;)
2066 {
2067 unw_rec_list *prev = cur;
2068
2069 /* Clear least significant set bit. */
2070 mask &= ~(mask & (~mask + 1));
2071 if (!mask)
2072 return ptr;
2073 cur = alloc_record (br_gr);
2074 cur->r.record.p.brmask = mask;
2075 /* Indicate this record shouldn't be output. */
2076 cur->r.record.p.r.gr = REG_NUM;
2077 /* Retain only least significant bit. */
2078 prev->r.record.p.brmask ^= mask;
2079 prev->r.record.p.next = cur;
2080 }
2081 }
2082
2083 static unw_rec_list *
2084 output_spill_base (unsigned int offset)
2085 {
2086 unw_rec_list *ptr = alloc_record (spill_base);
2087 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2088 return ptr;
2089 }
2090
2091 static unw_rec_list *
2092 output_unat_when (void)
2093 {
2094 unw_rec_list *ptr = alloc_record (unat_when);
2095 return ptr;
2096 }
2097
2098 static unw_rec_list *
2099 output_unat_gr (unsigned int gr)
2100 {
2101 unw_rec_list *ptr = alloc_record (unat_gr);
2102 ptr->r.record.p.r.gr = gr;
2103 return ptr;
2104 }
2105
2106 static unw_rec_list *
2107 output_unat_psprel (unsigned int offset)
2108 {
2109 unw_rec_list *ptr = alloc_record (unat_psprel);
2110 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2111 return ptr;
2112 }
2113
2114 static unw_rec_list *
2115 output_unat_sprel (unsigned int offset)
2116 {
2117 unw_rec_list *ptr = alloc_record (unat_sprel);
2118 ptr->r.record.p.off.sp = offset / 4;
2119 return ptr;
2120 }
2121
2122 static unw_rec_list *
2123 output_lc_when (void)
2124 {
2125 unw_rec_list *ptr = alloc_record (lc_when);
2126 return ptr;
2127 }
2128
2129 static unw_rec_list *
2130 output_lc_gr (unsigned int gr)
2131 {
2132 unw_rec_list *ptr = alloc_record (lc_gr);
2133 ptr->r.record.p.r.gr = gr;
2134 return ptr;
2135 }
2136
2137 static unw_rec_list *
2138 output_lc_psprel (unsigned int offset)
2139 {
2140 unw_rec_list *ptr = alloc_record (lc_psprel);
2141 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2142 return ptr;
2143 }
2144
2145 static unw_rec_list *
2146 output_lc_sprel (unsigned int offset)
2147 {
2148 unw_rec_list *ptr = alloc_record (lc_sprel);
2149 ptr->r.record.p.off.sp = offset / 4;
2150 return ptr;
2151 }
2152
2153 static unw_rec_list *
2154 output_fpsr_when (void)
2155 {
2156 unw_rec_list *ptr = alloc_record (fpsr_when);
2157 return ptr;
2158 }
2159
2160 static unw_rec_list *
2161 output_fpsr_gr (unsigned int gr)
2162 {
2163 unw_rec_list *ptr = alloc_record (fpsr_gr);
2164 ptr->r.record.p.r.gr = gr;
2165 return ptr;
2166 }
2167
2168 static unw_rec_list *
2169 output_fpsr_psprel (unsigned int offset)
2170 {
2171 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2172 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2173 return ptr;
2174 }
2175
2176 static unw_rec_list *
2177 output_fpsr_sprel (unsigned int offset)
2178 {
2179 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2180 ptr->r.record.p.off.sp = offset / 4;
2181 return ptr;
2182 }
2183
2184 static unw_rec_list *
2185 output_priunat_when_gr (void)
2186 {
2187 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2188 return ptr;
2189 }
2190
2191 static unw_rec_list *
2192 output_priunat_when_mem (void)
2193 {
2194 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2195 return ptr;
2196 }
2197
2198 static unw_rec_list *
2199 output_priunat_gr (unsigned int gr)
2200 {
2201 unw_rec_list *ptr = alloc_record (priunat_gr);
2202 ptr->r.record.p.r.gr = gr;
2203 return ptr;
2204 }
2205
2206 static unw_rec_list *
2207 output_priunat_psprel (unsigned int offset)
2208 {
2209 unw_rec_list *ptr = alloc_record (priunat_psprel);
2210 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2211 return ptr;
2212 }
2213
2214 static unw_rec_list *
2215 output_priunat_sprel (unsigned int offset)
2216 {
2217 unw_rec_list *ptr = alloc_record (priunat_sprel);
2218 ptr->r.record.p.off.sp = offset / 4;
2219 return ptr;
2220 }
2221
2222 static unw_rec_list *
2223 output_bsp_when (void)
2224 {
2225 unw_rec_list *ptr = alloc_record (bsp_when);
2226 return ptr;
2227 }
2228
2229 static unw_rec_list *
2230 output_bsp_gr (unsigned int gr)
2231 {
2232 unw_rec_list *ptr = alloc_record (bsp_gr);
2233 ptr->r.record.p.r.gr = gr;
2234 return ptr;
2235 }
2236
2237 static unw_rec_list *
2238 output_bsp_psprel (unsigned int offset)
2239 {
2240 unw_rec_list *ptr = alloc_record (bsp_psprel);
2241 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2242 return ptr;
2243 }
2244
2245 static unw_rec_list *
2246 output_bsp_sprel (unsigned int offset)
2247 {
2248 unw_rec_list *ptr = alloc_record (bsp_sprel);
2249 ptr->r.record.p.off.sp = offset / 4;
2250 return ptr;
2251 }
2252
2253 static unw_rec_list *
2254 output_bspstore_when (void)
2255 {
2256 unw_rec_list *ptr = alloc_record (bspstore_when);
2257 return ptr;
2258 }
2259
2260 static unw_rec_list *
2261 output_bspstore_gr (unsigned int gr)
2262 {
2263 unw_rec_list *ptr = alloc_record (bspstore_gr);
2264 ptr->r.record.p.r.gr = gr;
2265 return ptr;
2266 }
2267
2268 static unw_rec_list *
2269 output_bspstore_psprel (unsigned int offset)
2270 {
2271 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2272 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2273 return ptr;
2274 }
2275
2276 static unw_rec_list *
2277 output_bspstore_sprel (unsigned int offset)
2278 {
2279 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2280 ptr->r.record.p.off.sp = offset / 4;
2281 return ptr;
2282 }
2283
2284 static unw_rec_list *
2285 output_rnat_when (void)
2286 {
2287 unw_rec_list *ptr = alloc_record (rnat_when);
2288 return ptr;
2289 }
2290
2291 static unw_rec_list *
2292 output_rnat_gr (unsigned int gr)
2293 {
2294 unw_rec_list *ptr = alloc_record (rnat_gr);
2295 ptr->r.record.p.r.gr = gr;
2296 return ptr;
2297 }
2298
2299 static unw_rec_list *
2300 output_rnat_psprel (unsigned int offset)
2301 {
2302 unw_rec_list *ptr = alloc_record (rnat_psprel);
2303 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2304 return ptr;
2305 }
2306
2307 static unw_rec_list *
2308 output_rnat_sprel (unsigned int offset)
2309 {
2310 unw_rec_list *ptr = alloc_record (rnat_sprel);
2311 ptr->r.record.p.off.sp = offset / 4;
2312 return ptr;
2313 }
2314
2315 static unw_rec_list *
2316 output_unwabi (unsigned long abi, unsigned long context)
2317 {
2318 unw_rec_list *ptr = alloc_record (unwabi);
2319 ptr->r.record.p.abi = abi;
2320 ptr->r.record.p.context = context;
2321 return ptr;
2322 }
2323
2324 static unw_rec_list *
2325 output_epilogue (unsigned long ecount)
2326 {
2327 unw_rec_list *ptr = alloc_record (epilogue);
2328 ptr->r.record.b.ecount = ecount;
2329 return ptr;
2330 }
2331
2332 static unw_rec_list *
2333 output_label_state (unsigned long label)
2334 {
2335 unw_rec_list *ptr = alloc_record (label_state);
2336 ptr->r.record.b.label = label;
2337 return ptr;
2338 }
2339
2340 static unw_rec_list *
2341 output_copy_state (unsigned long label)
2342 {
2343 unw_rec_list *ptr = alloc_record (copy_state);
2344 ptr->r.record.b.label = label;
2345 return ptr;
2346 }
2347
2348 static unw_rec_list *
2349 output_spill_psprel (unsigned int ab,
2350 unsigned int reg,
2351 unsigned int offset,
2352 unsigned int predicate)
2353 {
2354 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2355 ptr->r.record.x.ab = ab;
2356 ptr->r.record.x.reg = reg;
2357 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2358 ptr->r.record.x.qp = predicate;
2359 return ptr;
2360 }
2361
2362 static unw_rec_list *
2363 output_spill_sprel (unsigned int ab,
2364 unsigned int reg,
2365 unsigned int offset,
2366 unsigned int predicate)
2367 {
2368 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2369 ptr->r.record.x.ab = ab;
2370 ptr->r.record.x.reg = reg;
2371 ptr->r.record.x.where.spoff = offset / 4;
2372 ptr->r.record.x.qp = predicate;
2373 return ptr;
2374 }
2375
2376 static unw_rec_list *
2377 output_spill_reg (unsigned int ab,
2378 unsigned int reg,
2379 unsigned int targ_reg,
2380 unsigned int xy,
2381 unsigned int predicate)
2382 {
2383 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2384 ptr->r.record.x.ab = ab;
2385 ptr->r.record.x.reg = reg;
2386 ptr->r.record.x.where.reg = targ_reg;
2387 ptr->r.record.x.xy = xy;
2388 ptr->r.record.x.qp = predicate;
2389 return ptr;
2390 }
2391
2392 /* Given a unw_rec_list process the correct format with the
2393 specified function. */
2394
2395 static void
2396 process_one_record (unw_rec_list *ptr, vbyte_func f)
2397 {
2398 unsigned int fr_mask, gr_mask;
2399
2400 switch (ptr->r.type)
2401 {
2402 /* This is a dummy record that takes up no space in the output. */
2403 case endp:
2404 break;
2405
2406 case gr_mem:
2407 case fr_mem:
2408 case br_mem:
2409 case frgr_mem:
2410 /* These are taken care of by prologue/prologue_gr. */
2411 break;
2412
2413 case prologue_gr:
2414 case prologue:
2415 if (ptr->r.type == prologue_gr)
2416 output_R2_format (f, ptr->r.record.r.grmask,
2417 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2418 else
2419 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2420
2421 /* Output descriptor(s) for union of register spills (if any). */
2422 gr_mask = ptr->r.record.r.mask.gr_mem;
2423 fr_mask = ptr->r.record.r.mask.fr_mem;
2424 if (fr_mask)
2425 {
2426 if ((fr_mask & ~0xfUL) == 0)
2427 output_P6_format (f, fr_mem, fr_mask);
2428 else
2429 {
2430 output_P5_format (f, gr_mask, fr_mask);
2431 gr_mask = 0;
2432 }
2433 }
2434 if (gr_mask)
2435 output_P6_format (f, gr_mem, gr_mask);
2436 if (ptr->r.record.r.mask.br_mem)
2437 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2438
2439 /* output imask descriptor if necessary: */
2440 if (ptr->r.record.r.mask.i)
2441 output_P4_format (f, ptr->r.record.r.mask.i,
2442 ptr->r.record.r.imask_size);
2443 break;
2444
2445 case body:
2446 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2447 break;
2448 case mem_stack_f:
2449 case mem_stack_v:
2450 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2451 ptr->r.record.p.size);
2452 break;
2453 case psp_gr:
2454 case rp_gr:
2455 case pfs_gr:
2456 case preds_gr:
2457 case unat_gr:
2458 case lc_gr:
2459 case fpsr_gr:
2460 case priunat_gr:
2461 case bsp_gr:
2462 case bspstore_gr:
2463 case rnat_gr:
2464 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2465 break;
2466 case rp_br:
2467 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2468 break;
2469 case psp_sprel:
2470 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2471 break;
2472 case rp_when:
2473 case pfs_when:
2474 case preds_when:
2475 case unat_when:
2476 case lc_when:
2477 case fpsr_when:
2478 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2479 break;
2480 case rp_psprel:
2481 case pfs_psprel:
2482 case preds_psprel:
2483 case unat_psprel:
2484 case lc_psprel:
2485 case fpsr_psprel:
2486 case spill_base:
2487 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2488 break;
2489 case rp_sprel:
2490 case pfs_sprel:
2491 case preds_sprel:
2492 case unat_sprel:
2493 case lc_sprel:
2494 case fpsr_sprel:
2495 case priunat_sprel:
2496 case bsp_sprel:
2497 case bspstore_sprel:
2498 case rnat_sprel:
2499 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2500 break;
2501 case gr_gr:
2502 if (ptr->r.record.p.r.gr < REG_NUM)
2503 {
2504 const unw_rec_list *cur = ptr;
2505
2506 gr_mask = cur->r.record.p.grmask;
2507 while ((cur = cur->r.record.p.next) != NULL)
2508 gr_mask |= cur->r.record.p.grmask;
2509 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2510 }
2511 break;
2512 case br_gr:
2513 if (ptr->r.record.p.r.gr < REG_NUM)
2514 {
2515 const unw_rec_list *cur = ptr;
2516
2517 gr_mask = cur->r.record.p.brmask;
2518 while ((cur = cur->r.record.p.next) != NULL)
2519 gr_mask |= cur->r.record.p.brmask;
2520 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2521 }
2522 break;
2523 case spill_mask:
2524 as_bad (_("spill_mask record unimplemented."));
2525 break;
2526 case priunat_when_gr:
2527 case priunat_when_mem:
2528 case bsp_when:
2529 case bspstore_when:
2530 case rnat_when:
2531 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2532 break;
2533 case priunat_psprel:
2534 case bsp_psprel:
2535 case bspstore_psprel:
2536 case rnat_psprel:
2537 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2538 break;
2539 case unwabi:
2540 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2541 break;
2542 case epilogue:
2543 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2544 break;
2545 case label_state:
2546 case copy_state:
2547 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2548 break;
2549 case spill_psprel:
2550 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2551 ptr->r.record.x.reg, ptr->r.record.x.t,
2552 ptr->r.record.x.where.pspoff);
2553 break;
2554 case spill_sprel:
2555 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2556 ptr->r.record.x.reg, ptr->r.record.x.t,
2557 ptr->r.record.x.where.spoff);
2558 break;
2559 case spill_reg:
2560 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2561 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2562 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2563 break;
2564 case spill_psprel_p:
2565 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2566 ptr->r.record.x.ab, ptr->r.record.x.reg,
2567 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2568 break;
2569 case spill_sprel_p:
2570 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2571 ptr->r.record.x.ab, ptr->r.record.x.reg,
2572 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2573 break;
2574 case spill_reg_p:
2575 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2576 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2577 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2578 ptr->r.record.x.t);
2579 break;
2580 default:
2581 as_bad (_("record_type_not_valid"));
2582 break;
2583 }
2584 }
2585
2586 /* Given a unw_rec_list list, process all the records with
2587 the specified function. */
2588 static void
2589 process_unw_records (unw_rec_list *list, vbyte_func f)
2590 {
2591 unw_rec_list *ptr;
2592 for (ptr = list; ptr; ptr = ptr->next)
2593 process_one_record (ptr, f);
2594 }
2595
2596 /* Determine the size of a record list in bytes. */
2597 static int
2598 calc_record_size (unw_rec_list *list)
2599 {
2600 vbyte_count = 0;
2601 process_unw_records (list, count_output);
2602 return vbyte_count;
2603 }
2604
2605 /* Return the number of bits set in the input value.
2606 Perhaps this has a better place... */
2607 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2608 # define popcount __builtin_popcount
2609 #else
2610 static int
2611 popcount (unsigned x)
2612 {
2613 static const unsigned char popcnt[16] =
2614 {
2615 0, 1, 1, 2,
2616 1, 2, 2, 3,
2617 1, 2, 2, 3,
2618 2, 3, 3, 4
2619 };
2620
2621 if (x < NELEMS (popcnt))
2622 return popcnt[x];
2623 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2624 }
2625 #endif
2626
2627 /* Update IMASK bitmask to reflect the fact that one or more registers
2628 of type TYPE are saved starting at instruction with index T. If N
2629 bits are set in REGMASK, it is assumed that instructions T through
2630 T+N-1 save these registers.
2631
2632 TYPE values:
2633 0: no save
2634 1: instruction saves next fp reg
2635 2: instruction saves next general reg
2636 3: instruction saves next branch reg */
2637 static void
2638 set_imask (unw_rec_list *region,
2639 unsigned long regmask,
2640 unsigned long t,
2641 unsigned int type)
2642 {
2643 unsigned char *imask;
2644 unsigned long imask_size;
2645 unsigned int i;
2646 int pos;
2647
2648 imask = region->r.record.r.mask.i;
2649 imask_size = region->r.record.r.imask_size;
2650 if (!imask)
2651 {
2652 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2653 imask = XCNEWVEC (unsigned char, imask_size);
2654
2655 region->r.record.r.imask_size = imask_size;
2656 region->r.record.r.mask.i = imask;
2657 }
2658
2659 i = (t / 4) + 1;
2660 pos = 2 * (3 - t % 4);
2661 while (regmask)
2662 {
2663 if (i >= imask_size)
2664 {
2665 as_bad (_("Ignoring attempt to spill beyond end of region"));
2666 return;
2667 }
2668
2669 imask[i] |= (type & 0x3) << pos;
2670
2671 regmask &= (regmask - 1);
2672 pos -= 2;
2673 if (pos < 0)
2674 {
2675 pos = 0;
2676 ++i;
2677 }
2678 }
2679 }
2680
2681 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2682 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2683 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2684 for frag sizes. */
2685
2686 static unsigned long
2687 slot_index (unsigned long slot_addr,
2688 fragS *slot_frag,
2689 unsigned long first_addr,
2690 fragS *first_frag,
2691 int before_relax)
2692 {
2693 unsigned long s_index = 0;
2694
2695 /* First time we are called, the initial address and frag are invalid. */
2696 if (first_addr == 0)
2697 return 0;
2698
2699 /* If the two addresses are in different frags, then we need to add in
2700 the remaining size of this frag, and then the entire size of intermediate
2701 frags. */
2702 while (slot_frag != first_frag)
2703 {
2704 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2705
2706 if (! before_relax)
2707 {
2708 /* We can get the final addresses only during and after
2709 relaxation. */
2710 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2711 s_index += 3 * ((first_frag->fr_next->fr_address
2712 - first_frag->fr_address
2713 - first_frag->fr_fix) >> 4);
2714 }
2715 else
2716 /* We don't know what the final addresses will be. We try our
2717 best to estimate. */
2718 switch (first_frag->fr_type)
2719 {
2720 default:
2721 break;
2722
2723 case rs_space:
2724 as_fatal (_("Only constant space allocation is supported"));
2725 break;
2726
2727 case rs_align:
2728 case rs_align_code:
2729 case rs_align_test:
2730 /* Take alignment into account. Assume the worst case
2731 before relaxation. */
2732 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2733 break;
2734
2735 case rs_org:
2736 if (first_frag->fr_symbol)
2737 {
2738 as_fatal (_("Only constant offsets are supported"));
2739 break;
2740 }
2741 /* Fall through. */
2742 case rs_fill:
2743 s_index += 3 * (first_frag->fr_offset >> 4);
2744 break;
2745 }
2746
2747 /* Add in the full size of the frag converted to instruction slots. */
2748 s_index += 3 * (first_frag->fr_fix >> 4);
2749 /* Subtract away the initial part before first_addr. */
2750 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2751 + ((first_addr & 0x3) - (start_addr & 0x3)));
2752
2753 /* Move to the beginning of the next frag. */
2754 first_frag = first_frag->fr_next;
2755 first_addr = (unsigned long) &first_frag->fr_literal;
2756
2757 /* This can happen if there is section switching in the middle of a
2758 function, causing the frag chain for the function to be broken.
2759 It is too difficult to recover safely from this problem, so we just
2760 exit with an error. */
2761 if (first_frag == NULL)
2762 as_fatal (_("Section switching in code is not supported."));
2763 }
2764
2765 /* Add in the used part of the last frag. */
2766 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2767 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2768 return s_index;
2769 }
2770
2771 /* Optimize unwind record directives. */
2772
2773 static unw_rec_list *
2774 optimize_unw_records (unw_rec_list *list)
2775 {
2776 if (!list)
2777 return NULL;
2778
2779 /* If the only unwind record is ".prologue" or ".prologue" followed
2780 by ".body", then we can optimize the unwind directives away. */
2781 if (list->r.type == prologue
2782 && (list->next->r.type == endp
2783 || (list->next->r.type == body && list->next->next->r.type == endp)))
2784 return NULL;
2785
2786 return list;
2787 }
2788
2789 /* Given a complete record list, process any records which have
2790 unresolved fields, (ie length counts for a prologue). After
2791 this has been run, all necessary information should be available
2792 within each record to generate an image. */
2793
2794 static void
2795 fixup_unw_records (unw_rec_list *list, int before_relax)
2796 {
2797 unw_rec_list *ptr, *region = 0;
2798 unsigned long first_addr = 0, rlen = 0, t;
2799 fragS *first_frag = 0;
2800
2801 for (ptr = list; ptr; ptr = ptr->next)
2802 {
2803 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2804 as_bad (_("Insn slot not set in unwind record."));
2805 t = slot_index (ptr->slot_number, ptr->slot_frag,
2806 first_addr, first_frag, before_relax);
2807 switch (ptr->r.type)
2808 {
2809 case prologue:
2810 case prologue_gr:
2811 case body:
2812 {
2813 unw_rec_list *last;
2814 int size;
2815 unsigned long last_addr = 0;
2816 fragS *last_frag = NULL;
2817
2818 first_addr = ptr->slot_number;
2819 first_frag = ptr->slot_frag;
2820 /* Find either the next body/prologue start, or the end of
2821 the function, and determine the size of the region. */
2822 for (last = ptr->next; last != NULL; last = last->next)
2823 if (last->r.type == prologue || last->r.type == prologue_gr
2824 || last->r.type == body || last->r.type == endp)
2825 {
2826 last_addr = last->slot_number;
2827 last_frag = last->slot_frag;
2828 break;
2829 }
2830 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2831 before_relax);
2832 rlen = ptr->r.record.r.rlen = size;
2833 if (ptr->r.type == body)
2834 /* End of region. */
2835 region = 0;
2836 else
2837 region = ptr;
2838 break;
2839 }
2840 case epilogue:
2841 if (t < rlen)
2842 ptr->r.record.b.t = rlen - 1 - t;
2843 else
2844 /* This happens when a memory-stack-less procedure uses a
2845 ".restore sp" directive at the end of a region to pop
2846 the frame state. */
2847 ptr->r.record.b.t = 0;
2848 break;
2849
2850 case mem_stack_f:
2851 case mem_stack_v:
2852 case rp_when:
2853 case pfs_when:
2854 case preds_when:
2855 case unat_when:
2856 case lc_when:
2857 case fpsr_when:
2858 case priunat_when_gr:
2859 case priunat_when_mem:
2860 case bsp_when:
2861 case bspstore_when:
2862 case rnat_when:
2863 ptr->r.record.p.t = t;
2864 break;
2865
2866 case spill_reg:
2867 case spill_sprel:
2868 case spill_psprel:
2869 case spill_reg_p:
2870 case spill_sprel_p:
2871 case spill_psprel_p:
2872 ptr->r.record.x.t = t;
2873 break;
2874
2875 case frgr_mem:
2876 if (!region)
2877 {
2878 as_bad (_("frgr_mem record before region record!"));
2879 return;
2880 }
2881 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2882 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2883 set_imask (region, ptr->r.record.p.frmask, t, 1);
2884 set_imask (region, ptr->r.record.p.grmask, t, 2);
2885 break;
2886 case fr_mem:
2887 if (!region)
2888 {
2889 as_bad (_("fr_mem record before region record!"));
2890 return;
2891 }
2892 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2893 set_imask (region, ptr->r.record.p.frmask, t, 1);
2894 break;
2895 case gr_mem:
2896 if (!region)
2897 {
2898 as_bad (_("gr_mem record before region record!"));
2899 return;
2900 }
2901 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2902 set_imask (region, ptr->r.record.p.grmask, t, 2);
2903 break;
2904 case br_mem:
2905 if (!region)
2906 {
2907 as_bad (_("br_mem record before region record!"));
2908 return;
2909 }
2910 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2911 set_imask (region, ptr->r.record.p.brmask, t, 3);
2912 break;
2913
2914 case gr_gr:
2915 if (!region)
2916 {
2917 as_bad (_("gr_gr record before region record!"));
2918 return;
2919 }
2920 set_imask (region, ptr->r.record.p.grmask, t, 2);
2921 break;
2922 case br_gr:
2923 if (!region)
2924 {
2925 as_bad (_("br_gr record before region record!"));
2926 return;
2927 }
2928 set_imask (region, ptr->r.record.p.brmask, t, 3);
2929 break;
2930
2931 default:
2932 break;
2933 }
2934 }
2935 }
2936
2937 /* Estimate the size of a frag before relaxing. We only have one type of frag
2938 to handle here, which is the unwind info frag. */
2939
2940 int
2941 ia64_estimate_size_before_relax (fragS *frag,
2942 asection *segtype ATTRIBUTE_UNUSED)
2943 {
2944 unw_rec_list *list;
2945 int len, size, pad;
2946
2947 /* ??? This code is identical to the first part of ia64_convert_frag. */
2948 list = (unw_rec_list *) frag->fr_opcode;
2949 fixup_unw_records (list, 0);
2950
2951 len = calc_record_size (list);
2952 /* pad to pointer-size boundary. */
2953 pad = len % md.pointer_size;
2954 if (pad != 0)
2955 len += md.pointer_size - pad;
2956 /* Add 8 for the header. */
2957 size = len + 8;
2958 /* Add a pointer for the personality offset. */
2959 if (frag->fr_offset)
2960 size += md.pointer_size;
2961
2962 /* fr_var carries the max_chars that we created the fragment with.
2963 We must, of course, have allocated enough memory earlier. */
2964 gas_assert (frag->fr_var >= size);
2965
2966 return frag->fr_fix + size;
2967 }
2968
2969 /* This function converts a rs_machine_dependent variant frag into a
2970 normal fill frag with the unwind image from the record list. */
2971 void
2972 ia64_convert_frag (fragS *frag)
2973 {
2974 unw_rec_list *list;
2975 int len, size, pad;
2976 valueT flag_value;
2977
2978 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2979 list = (unw_rec_list *) frag->fr_opcode;
2980 fixup_unw_records (list, 0);
2981
2982 len = calc_record_size (list);
2983 /* pad to pointer-size boundary. */
2984 pad = len % md.pointer_size;
2985 if (pad != 0)
2986 len += md.pointer_size - pad;
2987 /* Add 8 for the header. */
2988 size = len + 8;
2989 /* Add a pointer for the personality offset. */
2990 if (frag->fr_offset)
2991 size += md.pointer_size;
2992
2993 /* fr_var carries the max_chars that we created the fragment with.
2994 We must, of course, have allocated enough memory earlier. */
2995 gas_assert (frag->fr_var >= size);
2996
2997 /* Initialize the header area. fr_offset is initialized with
2998 unwind.personality_routine. */
2999 if (frag->fr_offset)
3000 {
3001 if (md.flags & EF_IA_64_ABI64)
3002 flag_value = (bfd_vma) 3 << 32;
3003 else
3004 /* 32-bit unwind info block. */
3005 flag_value = (bfd_vma) 0x1003 << 32;
3006 }
3007 else
3008 flag_value = 0;
3009
3010 md_number_to_chars (frag->fr_literal,
3011 (((bfd_vma) 1 << 48) /* Version. */
3012 | flag_value /* U & E handler flags. */
3013 | (len / md.pointer_size)), /* Length. */
3014 8);
3015
3016 /* Skip the header. */
3017 vbyte_mem_ptr = frag->fr_literal + 8;
3018 process_unw_records (list, output_vbyte_mem);
3019
3020 /* Fill the padding bytes with zeros. */
3021 if (pad != 0)
3022 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3023 md.pointer_size - pad);
3024 /* Fill the unwind personality with zeros. */
3025 if (frag->fr_offset)
3026 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3027 md.pointer_size);
3028
3029 frag->fr_fix += size;
3030 frag->fr_type = rs_fill;
3031 frag->fr_var = 0;
3032 frag->fr_offset = 0;
3033 }
3034
3035 static int
3036 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3037 {
3038 int sep = parse_operand_and_eval (e, ',');
3039
3040 *qp = e->X_add_number - REG_P;
3041 if (e->X_op != O_register || *qp > 63)
3042 {
3043 as_bad (_("First operand to .%s must be a predicate"), po);
3044 *qp = 0;
3045 }
3046 else if (*qp == 0)
3047 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3048 if (sep == ',')
3049 sep = parse_operand_and_eval (e, ',');
3050 else
3051 e->X_op = O_absent;
3052 return sep;
3053 }
3054
3055 static void
3056 convert_expr_to_ab_reg (const expressionS *e,
3057 unsigned int *ab,
3058 unsigned int *regp,
3059 const char *po,
3060 int n)
3061 {
3062 unsigned int reg = e->X_add_number;
3063
3064 *ab = *regp = 0; /* Anything valid is good here. */
3065
3066 if (e->X_op != O_register)
3067 reg = REG_GR; /* Anything invalid is good here. */
3068
3069 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3070 {
3071 *ab = 0;
3072 *regp = reg - REG_GR;
3073 }
3074 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3075 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3076 {
3077 *ab = 1;
3078 *regp = reg - REG_FR;
3079 }
3080 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3081 {
3082 *ab = 2;
3083 *regp = reg - REG_BR;
3084 }
3085 else
3086 {
3087 *ab = 3;
3088 switch (reg)
3089 {
3090 case REG_PR: *regp = 0; break;
3091 case REG_PSP: *regp = 1; break;
3092 case REG_PRIUNAT: *regp = 2; break;
3093 case REG_BR + 0: *regp = 3; break;
3094 case REG_AR + AR_BSP: *regp = 4; break;
3095 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3096 case REG_AR + AR_RNAT: *regp = 6; break;
3097 case REG_AR + AR_UNAT: *regp = 7; break;
3098 case REG_AR + AR_FPSR: *regp = 8; break;
3099 case REG_AR + AR_PFS: *regp = 9; break;
3100 case REG_AR + AR_LC: *regp = 10; break;
3101
3102 default:
3103 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3104 break;
3105 }
3106 }
3107 }
3108
3109 static void
3110 convert_expr_to_xy_reg (const expressionS *e,
3111 unsigned int *xy,
3112 unsigned int *regp,
3113 const char *po,
3114 int n)
3115 {
3116 unsigned int reg = e->X_add_number;
3117
3118 *xy = *regp = 0; /* Anything valid is good here. */
3119
3120 if (e->X_op != O_register)
3121 reg = REG_GR; /* Anything invalid is good here. */
3122
3123 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3124 {
3125 *xy = 0;
3126 *regp = reg - REG_GR;
3127 }
3128 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3129 {
3130 *xy = 1;
3131 *regp = reg - REG_FR;
3132 }
3133 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3134 {
3135 *xy = 2;
3136 *regp = reg - REG_BR;
3137 }
3138 else
3139 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3140 }
3141
3142 static void
3143 dot_align (int arg)
3144 {
3145 /* The current frag is an alignment frag. */
3146 align_frag = frag_now;
3147 s_align_bytes (arg);
3148 }
3149
3150 static void
3151 dot_radix (int dummy ATTRIBUTE_UNUSED)
3152 {
3153 char *radix;
3154 int ch;
3155
3156 SKIP_WHITESPACE ();
3157
3158 if (is_it_end_of_statement ())
3159 return;
3160 ch = get_symbol_name (&radix);
3161 ia64_canonicalize_symbol_name (radix);
3162 if (strcasecmp (radix, "C"))
3163 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3164 (void) restore_line_pointer (ch);
3165 demand_empty_rest_of_line ();
3166 }
3167
3168 /* Helper function for .loc directives. If the assembler is not generating
3169 line number info, then we need to remember which instructions have a .loc
3170 directive, and only call dwarf2_gen_line_info for those instructions. */
3171
3172 static void
3173 dot_loc (int x)
3174 {
3175 CURR_SLOT.loc_directive_seen = 1;
3176 dwarf2_directive_loc (x);
3177 }
3178
3179 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3180 static void
3181 dot_special_section (int which)
3182 {
3183 set_section ((char *) special_section_name[which]);
3184 }
3185
3186 /* Return -1 for warning and 0 for error. */
3187
3188 static int
3189 unwind_diagnostic (const char * region, const char *directive)
3190 {
3191 if (md.unwind_check == unwind_check_warning)
3192 {
3193 as_warn (_(".%s outside of %s"), directive, region);
3194 return -1;
3195 }
3196 else
3197 {
3198 as_bad (_(".%s outside of %s"), directive, region);
3199 ignore_rest_of_line ();
3200 return 0;
3201 }
3202 }
3203
3204 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3205 a procedure but the unwind directive check is set to warning, 0 if
3206 a directive isn't in a procedure and the unwind directive check is set
3207 to error. */
3208
3209 static int
3210 in_procedure (const char *directive)
3211 {
3212 if (unwind.proc_pending.sym
3213 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3214 return 1;
3215 return unwind_diagnostic ("procedure", directive);
3216 }
3217
3218 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3219 a prologue but the unwind directive check is set to warning, 0 if
3220 a directive isn't in a prologue and the unwind directive check is set
3221 to error. */
3222
3223 static int
3224 in_prologue (const char *directive)
3225 {
3226 int in = in_procedure (directive);
3227
3228 if (in > 0 && !unwind.prologue)
3229 in = unwind_diagnostic ("prologue", directive);
3230 check_pending_save ();
3231 return in;
3232 }
3233
3234 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3235 a body but the unwind directive check is set to warning, 0 if
3236 a directive isn't in a body and the unwind directive check is set
3237 to error. */
3238
3239 static int
3240 in_body (const char *directive)
3241 {
3242 int in = in_procedure (directive);
3243
3244 if (in > 0 && !unwind.body)
3245 in = unwind_diagnostic ("body region", directive);
3246 return in;
3247 }
3248
3249 static void
3250 add_unwind_entry (unw_rec_list *ptr, int sep)
3251 {
3252 if (ptr)
3253 {
3254 if (unwind.tail)
3255 unwind.tail->next = ptr;
3256 else
3257 unwind.list = ptr;
3258 unwind.tail = ptr;
3259
3260 /* The current entry can in fact be a chain of unwind entries. */
3261 if (unwind.current_entry == NULL)
3262 unwind.current_entry = ptr;
3263 }
3264
3265 /* The current entry can in fact be a chain of unwind entries. */
3266 if (unwind.current_entry == NULL)
3267 unwind.current_entry = ptr;
3268
3269 if (sep == ',')
3270 {
3271 char *name;
3272 /* Parse a tag permitted for the current directive. */
3273 int ch;
3274
3275 SKIP_WHITESPACE ();
3276 ch = get_symbol_name (&name);
3277 /* FIXME: For now, just issue a warning that this isn't implemented. */
3278 {
3279 static int warned;
3280
3281 if (!warned)
3282 {
3283 warned = 1;
3284 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3285 }
3286 }
3287 (void) restore_line_pointer (ch);
3288 }
3289 if (sep != NOT_A_CHAR)
3290 demand_empty_rest_of_line ();
3291 }
3292
3293 static void
3294 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3295 {
3296 expressionS e;
3297 int sep;
3298
3299 if (!in_prologue ("fframe"))
3300 return;
3301
3302 sep = parse_operand_and_eval (&e, ',');
3303
3304 if (e.X_op != O_constant)
3305 {
3306 as_bad (_("First operand to .fframe must be a constant"));
3307 e.X_add_number = 0;
3308 }
3309 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3310 }
3311
3312 static void
3313 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3314 {
3315 expressionS e;
3316 unsigned reg;
3317 int sep;
3318
3319 if (!in_prologue ("vframe"))
3320 return;
3321
3322 sep = parse_operand_and_eval (&e, ',');
3323 reg = e.X_add_number - REG_GR;
3324 if (e.X_op != O_register || reg > 127)
3325 {
3326 as_bad (_("First operand to .vframe must be a general register"));
3327 reg = 0;
3328 }
3329 add_unwind_entry (output_mem_stack_v (), sep);
3330 if (! (unwind.prologue_mask & 2))
3331 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3332 else if (reg != unwind.prologue_gr
3333 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3334 as_warn (_("Operand of .vframe contradicts .prologue"));
3335 }
3336
3337 static void
3338 dot_vframesp (int psp)
3339 {
3340 expressionS e;
3341 int sep;
3342
3343 if (psp)
3344 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3345
3346 if (!in_prologue ("vframesp"))
3347 return;
3348
3349 sep = parse_operand_and_eval (&e, ',');
3350 if (e.X_op != O_constant)
3351 {
3352 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3353 e.X_add_number = 0;
3354 }
3355 add_unwind_entry (output_mem_stack_v (), sep);
3356 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3357 }
3358
3359 static void
3360 dot_save (int dummy ATTRIBUTE_UNUSED)
3361 {
3362 expressionS e1, e2;
3363 unsigned reg1, reg2;
3364 int sep;
3365
3366 if (!in_prologue ("save"))
3367 return;
3368
3369 sep = parse_operand_and_eval (&e1, ',');
3370 if (sep == ',')
3371 sep = parse_operand_and_eval (&e2, ',');
3372 else
3373 e2.X_op = O_absent;
3374
3375 reg1 = e1.X_add_number;
3376 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3377 if (e1.X_op != O_register)
3378 {
3379 as_bad (_("First operand to .save not a register"));
3380 reg1 = REG_PR; /* Anything valid is good here. */
3381 }
3382 reg2 = e2.X_add_number - REG_GR;
3383 if (e2.X_op != O_register || reg2 > 127)
3384 {
3385 as_bad (_("Second operand to .save not a valid register"));
3386 reg2 = 0;
3387 }
3388 switch (reg1)
3389 {
3390 case REG_AR + AR_BSP:
3391 add_unwind_entry (output_bsp_when (), sep);
3392 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_BSPSTORE:
3395 add_unwind_entry (output_bspstore_when (), sep);
3396 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_RNAT:
3399 add_unwind_entry (output_rnat_when (), sep);
3400 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_UNAT:
3403 add_unwind_entry (output_unat_when (), sep);
3404 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_FPSR:
3407 add_unwind_entry (output_fpsr_when (), sep);
3408 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3409 break;
3410 case REG_AR + AR_PFS:
3411 add_unwind_entry (output_pfs_when (), sep);
3412 if (! (unwind.prologue_mask & 4))
3413 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3414 else if (reg2 != unwind.prologue_gr
3415 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3416 as_warn (_("Second operand of .save contradicts .prologue"));
3417 break;
3418 case REG_AR + AR_LC:
3419 add_unwind_entry (output_lc_when (), sep);
3420 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3421 break;
3422 case REG_BR:
3423 add_unwind_entry (output_rp_when (), sep);
3424 if (! (unwind.prologue_mask & 8))
3425 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3426 else if (reg2 != unwind.prologue_gr)
3427 as_warn (_("Second operand of .save contradicts .prologue"));
3428 break;
3429 case REG_PR:
3430 add_unwind_entry (output_preds_when (), sep);
3431 if (! (unwind.prologue_mask & 1))
3432 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3433 else if (reg2 != unwind.prologue_gr
3434 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3435 as_warn (_("Second operand of .save contradicts .prologue"));
3436 break;
3437 case REG_PRIUNAT:
3438 add_unwind_entry (output_priunat_when_gr (), sep);
3439 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3440 break;
3441 default:
3442 as_bad (_("First operand to .save not a valid register"));
3443 add_unwind_entry (NULL, sep);
3444 break;
3445 }
3446 }
3447
3448 static void
3449 dot_restore (int dummy ATTRIBUTE_UNUSED)
3450 {
3451 expressionS e1;
3452 unsigned long ecount; /* # of _additional_ regions to pop */
3453 int sep;
3454
3455 if (!in_body ("restore"))
3456 return;
3457
3458 sep = parse_operand_and_eval (&e1, ',');
3459 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3460 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3461
3462 if (sep == ',')
3463 {
3464 expressionS e2;
3465
3466 sep = parse_operand_and_eval (&e2, ',');
3467 if (e2.X_op != O_constant || e2.X_add_number < 0)
3468 {
3469 as_bad (_("Second operand to .restore must be a constant >= 0"));
3470 e2.X_add_number = 0;
3471 }
3472 ecount = e2.X_add_number;
3473 }
3474 else
3475 ecount = unwind.prologue_count - 1;
3476
3477 if (ecount >= unwind.prologue_count)
3478 {
3479 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3480 ecount + 1, unwind.prologue_count);
3481 ecount = 0;
3482 }
3483
3484 add_unwind_entry (output_epilogue (ecount), sep);
3485
3486 if (ecount < unwind.prologue_count)
3487 unwind.prologue_count -= ecount + 1;
3488 else
3489 unwind.prologue_count = 0;
3490 }
3491
3492 static void
3493 dot_restorereg (int pred)
3494 {
3495 unsigned int qp, ab, reg;
3496 expressionS e;
3497 int sep;
3498 const char * const po = pred ? "restorereg.p" : "restorereg";
3499
3500 if (!in_procedure (po))
3501 return;
3502
3503 if (pred)
3504 sep = parse_predicate_and_operand (&e, &qp, po);
3505 else
3506 {
3507 sep = parse_operand_and_eval (&e, ',');
3508 qp = 0;
3509 }
3510 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
3511
3512 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3513 }
3514
3515 static const char *special_linkonce_name[] =
3516 {
3517 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3518 };
3519
3520 static void
3521 start_unwind_section (const segT text_seg, int sec_index)
3522 {
3523 /*
3524 Use a slightly ugly scheme to derive the unwind section names from
3525 the text section name:
3526
3527 text sect. unwind table sect.
3528 name: name: comments:
3529 ---------- ----------------- --------------------------------
3530 .text .IA_64.unwind
3531 .text.foo .IA_64.unwind.text.foo
3532 .foo .IA_64.unwind.foo
3533 .gnu.linkonce.t.foo
3534 .gnu.linkonce.ia64unw.foo
3535 _info .IA_64.unwind_info gas issues error message (ditto)
3536 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3537
3538 This mapping is done so that:
3539
3540 (a) An object file with unwind info only in .text will use
3541 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3542 This follows the letter of the ABI and also ensures backwards
3543 compatibility with older toolchains.
3544
3545 (b) An object file with unwind info in multiple text sections
3546 will use separate unwind sections for each text section.
3547 This allows us to properly set the "sh_info" and "sh_link"
3548 fields in SHT_IA_64_UNWIND as required by the ABI and also
3549 lets GNU ld support programs with multiple segments
3550 containing unwind info (as might be the case for certain
3551 embedded applications).
3552
3553 (c) An error is issued if there would be a name clash.
3554 */
3555
3556 const char *text_name, *sec_text_name;
3557 char *sec_name;
3558 const char *prefix = special_section_name [sec_index];
3559 const char *suffix;
3560
3561 sec_text_name = segment_name (text_seg);
3562 text_name = sec_text_name;
3563 if (strncmp (text_name, "_info", 5) == 0)
3564 {
3565 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3566 text_name);
3567 ignore_rest_of_line ();
3568 return;
3569 }
3570 if (strcmp (text_name, ".text") == 0)
3571 text_name = "";
3572
3573 /* Build the unwind section name by appending the (possibly stripped)
3574 text section name to the unwind prefix. */
3575 suffix = text_name;
3576 if (strncmp (text_name, ".gnu.linkonce.t.",
3577 sizeof (".gnu.linkonce.t.") - 1) == 0)
3578 {
3579 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3580 suffix += sizeof (".gnu.linkonce.t.") - 1;
3581 }
3582
3583 sec_name = concat (prefix, suffix, NULL);
3584
3585 /* Handle COMDAT group. */
3586 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3587 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3588 {
3589 char *section;
3590 const char *group_name = elf_group_name (text_seg);
3591
3592 if (group_name == NULL)
3593 {
3594 as_bad (_("Group section `%s' has no group signature"),
3595 sec_text_name);
3596 ignore_rest_of_line ();
3597 free (sec_name);
3598 return;
3599 }
3600
3601 /* We have to construct a fake section directive. */
3602 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3603 set_section (section);
3604 free (section);
3605 }
3606 else
3607 {
3608 set_section (sec_name);
3609 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3610 }
3611
3612 elf_linked_to_section (now_seg) = text_seg;
3613 free (sec_name);
3614 }
3615
3616 static void
3617 generate_unwind_image (const segT text_seg)
3618 {
3619 int size, pad;
3620 unw_rec_list *list;
3621
3622 /* Mark the end of the unwind info, so that we can compute the size of the
3623 last unwind region. */
3624 add_unwind_entry (output_endp (), NOT_A_CHAR);
3625
3626 /* Force out pending instructions, to make sure all unwind records have
3627 a valid slot_number field. */
3628 ia64_flush_insns ();
3629
3630 /* Generate the unwind record. */
3631 list = optimize_unw_records (unwind.list);
3632 fixup_unw_records (list, 1);
3633 size = calc_record_size (list);
3634
3635 if (size > 0 || unwind.force_unwind_entry)
3636 {
3637 unwind.force_unwind_entry = 0;
3638 /* pad to pointer-size boundary. */
3639 pad = size % md.pointer_size;
3640 if (pad != 0)
3641 size += md.pointer_size - pad;
3642 /* Add 8 for the header. */
3643 size += 8;
3644 /* Add a pointer for the personality offset. */
3645 if (unwind.personality_routine)
3646 size += md.pointer_size;
3647 }
3648
3649 /* If there are unwind records, switch sections, and output the info. */
3650 if (size != 0)
3651 {
3652 expressionS exp;
3653 bfd_reloc_code_real_type reloc;
3654
3655 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3656
3657 /* Make sure the section has 4 byte alignment for ILP32 and
3658 8 byte alignment for LP64. */
3659 frag_align (md.pointer_size_shift, 0, 0);
3660 record_alignment (now_seg, md.pointer_size_shift);
3661
3662 /* Set expression which points to start of unwind descriptor area. */
3663 unwind.info = expr_build_dot ();
3664
3665 frag_var (rs_machine_dependent, size, size, 0, 0,
3666 (offsetT) (long) unwind.personality_routine,
3667 (char *) list);
3668
3669 /* Add the personality address to the image. */
3670 if (unwind.personality_routine != 0)
3671 {
3672 exp.X_op = O_symbol;
3673 exp.X_add_symbol = unwind.personality_routine;
3674 exp.X_add_number = 0;
3675
3676 if (md.flags & EF_IA_64_BE)
3677 {
3678 if (md.flags & EF_IA_64_ABI64)
3679 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3680 else
3681 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3682 }
3683 else
3684 {
3685 if (md.flags & EF_IA_64_ABI64)
3686 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3687 else
3688 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3689 }
3690
3691 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3692 md.pointer_size, &exp, 0, reloc);
3693 unwind.personality_routine = 0;
3694 }
3695 }
3696
3697 free_saved_prologue_counts ();
3698 unwind.list = unwind.tail = unwind.current_entry = NULL;
3699 }
3700
3701 static void
3702 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3703 {
3704 if (!in_procedure ("handlerdata"))
3705 return;
3706 unwind.force_unwind_entry = 1;
3707
3708 /* Remember which segment we're in so we can switch back after .endp */
3709 unwind.saved_text_seg = now_seg;
3710 unwind.saved_text_subseg = now_subseg;
3711
3712 /* Generate unwind info into unwind-info section and then leave that
3713 section as the currently active one so dataXX directives go into
3714 the language specific data area of the unwind info block. */
3715 generate_unwind_image (now_seg);
3716 demand_empty_rest_of_line ();
3717 }
3718
3719 static void
3720 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3721 {
3722 if (!in_procedure ("unwentry"))
3723 return;
3724 unwind.force_unwind_entry = 1;
3725 demand_empty_rest_of_line ();
3726 }
3727
3728 static void
3729 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3730 {
3731 expressionS e;
3732 unsigned reg;
3733
3734 if (!in_prologue ("altrp"))
3735 return;
3736
3737 parse_operand_and_eval (&e, 0);
3738 reg = e.X_add_number - REG_BR;
3739 if (e.X_op != O_register || reg > 7)
3740 {
3741 as_bad (_("First operand to .altrp not a valid branch register"));
3742 reg = 0;
3743 }
3744 add_unwind_entry (output_rp_br (reg), 0);
3745 }
3746
3747 static void
3748 dot_savemem (int psprel)
3749 {
3750 expressionS e1, e2;
3751 int sep;
3752 int reg1, val;
3753 const char * const po = psprel ? "savepsp" : "savesp";
3754
3755 if (!in_prologue (po))
3756 return;
3757
3758 sep = parse_operand_and_eval (&e1, ',');
3759 if (sep == ',')
3760 sep = parse_operand_and_eval (&e2, ',');
3761 else
3762 e2.X_op = O_absent;
3763
3764 reg1 = e1.X_add_number;
3765 val = e2.X_add_number;
3766
3767 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3768 if (e1.X_op != O_register)
3769 {
3770 as_bad (_("First operand to .%s not a register"), po);
3771 reg1 = REG_PR; /* Anything valid is good here. */
3772 }
3773 if (e2.X_op != O_constant)
3774 {
3775 as_bad (_("Second operand to .%s not a constant"), po);
3776 val = 0;
3777 }
3778
3779 switch (reg1)
3780 {
3781 case REG_AR + AR_BSP:
3782 add_unwind_entry (output_bsp_when (), sep);
3783 add_unwind_entry ((psprel
3784 ? output_bsp_psprel
3785 : output_bsp_sprel) (val), NOT_A_CHAR);
3786 break;
3787 case REG_AR + AR_BSPSTORE:
3788 add_unwind_entry (output_bspstore_when (), sep);
3789 add_unwind_entry ((psprel
3790 ? output_bspstore_psprel
3791 : output_bspstore_sprel) (val), NOT_A_CHAR);
3792 break;
3793 case REG_AR + AR_RNAT:
3794 add_unwind_entry (output_rnat_when (), sep);
3795 add_unwind_entry ((psprel
3796 ? output_rnat_psprel
3797 : output_rnat_sprel) (val), NOT_A_CHAR);
3798 break;
3799 case REG_AR + AR_UNAT:
3800 add_unwind_entry (output_unat_when (), sep);
3801 add_unwind_entry ((psprel
3802 ? output_unat_psprel
3803 : output_unat_sprel) (val), NOT_A_CHAR);
3804 break;
3805 case REG_AR + AR_FPSR:
3806 add_unwind_entry (output_fpsr_when (), sep);
3807 add_unwind_entry ((psprel
3808 ? output_fpsr_psprel
3809 : output_fpsr_sprel) (val), NOT_A_CHAR);
3810 break;
3811 case REG_AR + AR_PFS:
3812 add_unwind_entry (output_pfs_when (), sep);
3813 add_unwind_entry ((psprel
3814 ? output_pfs_psprel
3815 : output_pfs_sprel) (val), NOT_A_CHAR);
3816 break;
3817 case REG_AR + AR_LC:
3818 add_unwind_entry (output_lc_when (), sep);
3819 add_unwind_entry ((psprel
3820 ? output_lc_psprel
3821 : output_lc_sprel) (val), NOT_A_CHAR);
3822 break;
3823 case REG_BR:
3824 add_unwind_entry (output_rp_when (), sep);
3825 add_unwind_entry ((psprel
3826 ? output_rp_psprel
3827 : output_rp_sprel) (val), NOT_A_CHAR);
3828 break;
3829 case REG_PR:
3830 add_unwind_entry (output_preds_when (), sep);
3831 add_unwind_entry ((psprel
3832 ? output_preds_psprel
3833 : output_preds_sprel) (val), NOT_A_CHAR);
3834 break;
3835 case REG_PRIUNAT:
3836 add_unwind_entry (output_priunat_when_mem (), sep);
3837 add_unwind_entry ((psprel
3838 ? output_priunat_psprel
3839 : output_priunat_sprel) (val), NOT_A_CHAR);
3840 break;
3841 default:
3842 as_bad (_("First operand to .%s not a valid register"), po);
3843 add_unwind_entry (NULL, sep);
3844 break;
3845 }
3846 }
3847
3848 static void
3849 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3850 {
3851 expressionS e;
3852 unsigned grmask;
3853 int sep;
3854
3855 if (!in_prologue ("save.g"))
3856 return;
3857
3858 sep = parse_operand_and_eval (&e, ',');
3859
3860 grmask = e.X_add_number;
3861 if (e.X_op != O_constant
3862 || e.X_add_number <= 0
3863 || e.X_add_number > 0xf)
3864 {
3865 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3866 grmask = 0;
3867 }
3868
3869 if (sep == ',')
3870 {
3871 unsigned reg;
3872 int n = popcount (grmask);
3873
3874 parse_operand_and_eval (&e, 0);
3875 reg = e.X_add_number - REG_GR;
3876 if (e.X_op != O_register || reg > 127)
3877 {
3878 as_bad (_("Second operand to .save.g must be a general register"));
3879 reg = 0;
3880 }
3881 else if (reg > 128U - n)
3882 {
3883 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3884 reg = 0;
3885 }
3886 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3887 }
3888 else
3889 add_unwind_entry (output_gr_mem (grmask), 0);
3890 }
3891
3892 static void
3893 dot_savef (int dummy ATTRIBUTE_UNUSED)
3894 {
3895 expressionS e;
3896
3897 if (!in_prologue ("save.f"))
3898 return;
3899
3900 parse_operand_and_eval (&e, 0);
3901
3902 if (e.X_op != O_constant
3903 || e.X_add_number <= 0
3904 || e.X_add_number > 0xfffff)
3905 {
3906 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3907 e.X_add_number = 0;
3908 }
3909 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3910 }
3911
3912 static void
3913 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3914 {
3915 expressionS e;
3916 unsigned brmask;
3917 int sep;
3918
3919 if (!in_prologue ("save.b"))
3920 return;
3921
3922 sep = parse_operand_and_eval (&e, ',');
3923
3924 brmask = e.X_add_number;
3925 if (e.X_op != O_constant
3926 || e.X_add_number <= 0
3927 || e.X_add_number > 0x1f)
3928 {
3929 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3930 brmask = 0;
3931 }
3932
3933 if (sep == ',')
3934 {
3935 unsigned reg;
3936 int n = popcount (brmask);
3937
3938 parse_operand_and_eval (&e, 0);
3939 reg = e.X_add_number - REG_GR;
3940 if (e.X_op != O_register || reg > 127)
3941 {
3942 as_bad (_("Second operand to .save.b must be a general register"));
3943 reg = 0;
3944 }
3945 else if (reg > 128U - n)
3946 {
3947 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3948 reg = 0;
3949 }
3950 add_unwind_entry (output_br_gr (brmask, reg), 0);
3951 }
3952 else
3953 add_unwind_entry (output_br_mem (brmask), 0);
3954 }
3955
3956 static void
3957 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3958 {
3959 expressionS e1, e2;
3960
3961 if (!in_prologue ("save.gf"))
3962 return;
3963
3964 if (parse_operand_and_eval (&e1, ',') == ',')
3965 parse_operand_and_eval (&e2, 0);
3966 else
3967 e2.X_op = O_absent;
3968
3969 if (e1.X_op != O_constant
3970 || e1.X_add_number < 0
3971 || e1.X_add_number > 0xf)
3972 {
3973 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3974 e1.X_op = O_absent;
3975 e1.X_add_number = 0;
3976 }
3977 if (e2.X_op != O_constant
3978 || e2.X_add_number < 0
3979 || e2.X_add_number > 0xfffff)
3980 {
3981 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3982 e2.X_op = O_absent;
3983 e2.X_add_number = 0;
3984 }
3985 if (e1.X_op == O_constant
3986 && e2.X_op == O_constant
3987 && e1.X_add_number == 0
3988 && e2.X_add_number == 0)
3989 as_bad (_("Operands to .save.gf may not be both zero"));
3990
3991 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3992 }
3993
3994 static void
3995 dot_spill (int dummy ATTRIBUTE_UNUSED)
3996 {
3997 expressionS e;
3998
3999 if (!in_prologue ("spill"))
4000 return;
4001
4002 parse_operand_and_eval (&e, 0);
4003
4004 if (e.X_op != O_constant)
4005 {
4006 as_bad (_("Operand to .spill must be a constant"));
4007 e.X_add_number = 0;
4008 }
4009 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4010 }
4011
4012 static void
4013 dot_spillreg (int pred)
4014 {
4015 int sep;
4016 unsigned int qp, ab, xy, reg, treg;
4017 expressionS e;
4018 const char * const po = pred ? "spillreg.p" : "spillreg";
4019
4020 if (!in_procedure (po))
4021 return;
4022
4023 if (pred)
4024 sep = parse_predicate_and_operand (&e, &qp, po);
4025 else
4026 {
4027 sep = parse_operand_and_eval (&e, ',');
4028 qp = 0;
4029 }
4030 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4031
4032 if (sep == ',')
4033 sep = parse_operand_and_eval (&e, ',');
4034 else
4035 e.X_op = O_absent;
4036 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4037
4038 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4039 }
4040
4041 static void
4042 dot_spillmem (int psprel)
4043 {
4044 expressionS e;
4045 int pred = (psprel < 0), sep;
4046 unsigned int qp, ab, reg;
4047 const char * po;
4048
4049 if (pred)
4050 {
4051 psprel = ~psprel;
4052 po = psprel ? "spillpsp.p" : "spillsp.p";
4053 }
4054 else
4055 po = psprel ? "spillpsp" : "spillsp";
4056
4057 if (!in_procedure (po))
4058 return;
4059
4060 if (pred)
4061 sep = parse_predicate_and_operand (&e, &qp, po);
4062 else
4063 {
4064 sep = parse_operand_and_eval (&e, ',');
4065 qp = 0;
4066 }
4067 convert_expr_to_ab_reg (&e, &ab, &reg, po, 1 + pred);
4068
4069 if (sep == ',')
4070 sep = parse_operand_and_eval (&e, ',');
4071 else
4072 e.X_op = O_absent;
4073 if (e.X_op != O_constant)
4074 {
4075 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4076 e.X_add_number = 0;
4077 }
4078
4079 if (psprel)
4080 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4081 else
4082 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4083 }
4084
4085 static unsigned int
4086 get_saved_prologue_count (unsigned long lbl)
4087 {
4088 label_prologue_count *lpc = unwind.saved_prologue_counts;
4089
4090 while (lpc != NULL && lpc->label_number != lbl)
4091 lpc = lpc->next;
4092
4093 if (lpc != NULL)
4094 return lpc->prologue_count;
4095
4096 as_bad (_("Missing .label_state %ld"), lbl);
4097 return 1;
4098 }
4099
4100 static void
4101 save_prologue_count (unsigned long lbl, unsigned int count)
4102 {
4103 label_prologue_count *lpc = unwind.saved_prologue_counts;
4104
4105 while (lpc != NULL && lpc->label_number != lbl)
4106 lpc = lpc->next;
4107
4108 if (lpc != NULL)
4109 lpc->prologue_count = count;
4110 else
4111 {
4112 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4113
4114 new_lpc->next = unwind.saved_prologue_counts;
4115 new_lpc->label_number = lbl;
4116 new_lpc->prologue_count = count;
4117 unwind.saved_prologue_counts = new_lpc;
4118 }
4119 }
4120
4121 static void
4122 free_saved_prologue_counts (void)
4123 {
4124 label_prologue_count *lpc = unwind.saved_prologue_counts;
4125 label_prologue_count *next;
4126
4127 while (lpc != NULL)
4128 {
4129 next = lpc->next;
4130 free (lpc);
4131 lpc = next;
4132 }
4133
4134 unwind.saved_prologue_counts = NULL;
4135 }
4136
4137 static void
4138 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4139 {
4140 expressionS e;
4141
4142 if (!in_body ("label_state"))
4143 return;
4144
4145 parse_operand_and_eval (&e, 0);
4146 if (e.X_op == O_constant)
4147 save_prologue_count (e.X_add_number, unwind.prologue_count);
4148 else
4149 {
4150 as_bad (_("Operand to .label_state must be a constant"));
4151 e.X_add_number = 0;
4152 }
4153 add_unwind_entry (output_label_state (e.X_add_number), 0);
4154 }
4155
4156 static void
4157 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4158 {
4159 expressionS e;
4160
4161 if (!in_body ("copy_state"))
4162 return;
4163
4164 parse_operand_and_eval (&e, 0);
4165 if (e.X_op == O_constant)
4166 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4167 else
4168 {
4169 as_bad (_("Operand to .copy_state must be a constant"));
4170 e.X_add_number = 0;
4171 }
4172 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4173 }
4174
4175 static void
4176 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4177 {
4178 expressionS e1, e2;
4179 unsigned char sep;
4180
4181 if (!in_prologue ("unwabi"))
4182 return;
4183
4184 sep = parse_operand_and_eval (&e1, ',');
4185 if (sep == ',')
4186 parse_operand_and_eval (&e2, 0);
4187 else
4188 e2.X_op = O_absent;
4189
4190 if (e1.X_op != O_constant)
4191 {
4192 as_bad (_("First operand to .unwabi must be a constant"));
4193 e1.X_add_number = 0;
4194 }
4195
4196 if (e2.X_op != O_constant)
4197 {
4198 as_bad (_("Second operand to .unwabi must be a constant"));
4199 e2.X_add_number = 0;
4200 }
4201
4202 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4203 }
4204
4205 static void
4206 dot_personality (int dummy ATTRIBUTE_UNUSED)
4207 {
4208 char *name, *p, c;
4209
4210 if (!in_procedure ("personality"))
4211 return;
4212 SKIP_WHITESPACE ();
4213 c = get_symbol_name (&name);
4214 p = input_line_pointer;
4215 unwind.personality_routine = symbol_find_or_make (name);
4216 unwind.force_unwind_entry = 1;
4217 *p = c;
4218 SKIP_WHITESPACE_AFTER_NAME ();
4219 demand_empty_rest_of_line ();
4220 }
4221
4222 static void
4223 dot_proc (int dummy ATTRIBUTE_UNUSED)
4224 {
4225 char *name, *p, c;
4226 symbolS *sym;
4227 proc_pending *pending, *last_pending;
4228
4229 if (unwind.proc_pending.sym)
4230 {
4231 (md.unwind_check == unwind_check_warning
4232 ? as_warn
4233 : as_bad) (_("Missing .endp after previous .proc"));
4234 while (unwind.proc_pending.next)
4235 {
4236 pending = unwind.proc_pending.next;
4237 unwind.proc_pending.next = pending->next;
4238 free (pending);
4239 }
4240 }
4241 last_pending = NULL;
4242
4243 /* Parse names of main and alternate entry points and mark them as
4244 function symbols: */
4245 while (1)
4246 {
4247 SKIP_WHITESPACE ();
4248 c = get_symbol_name (&name);
4249 p = input_line_pointer;
4250 if (!*name)
4251 as_bad (_("Empty argument of .proc"));
4252 else
4253 {
4254 sym = symbol_find_or_make (name);
4255 if (S_IS_DEFINED (sym))
4256 as_bad (_("`%s' was already defined"), name);
4257 else if (!last_pending)
4258 {
4259 unwind.proc_pending.sym = sym;
4260 last_pending = &unwind.proc_pending;
4261 }
4262 else
4263 {
4264 pending = XNEW (proc_pending);
4265 pending->sym = sym;
4266 last_pending = last_pending->next = pending;
4267 }
4268 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4269 }
4270 *p = c;
4271 SKIP_WHITESPACE_AFTER_NAME ();
4272 if (*input_line_pointer != ',')
4273 break;
4274 ++input_line_pointer;
4275 }
4276 if (!last_pending)
4277 {
4278 unwind.proc_pending.sym = expr_build_dot ();
4279 last_pending = &unwind.proc_pending;
4280 }
4281 last_pending->next = NULL;
4282 demand_empty_rest_of_line ();
4283 do_align (4, NULL, 0, 0);
4284
4285 unwind.prologue = 0;
4286 unwind.prologue_count = 0;
4287 unwind.body = 0;
4288 unwind.insn = 0;
4289 unwind.list = unwind.tail = unwind.current_entry = NULL;
4290 unwind.personality_routine = 0;
4291 }
4292
4293 static void
4294 dot_body (int dummy ATTRIBUTE_UNUSED)
4295 {
4296 if (!in_procedure ("body"))
4297 return;
4298 if (!unwind.prologue && !unwind.body && unwind.insn)
4299 as_warn (_("Initial .body should precede any instructions"));
4300 check_pending_save ();
4301
4302 unwind.prologue = 0;
4303 unwind.prologue_mask = 0;
4304 unwind.body = 1;
4305
4306 add_unwind_entry (output_body (), 0);
4307 }
4308
4309 static void
4310 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4311 {
4312 unsigned mask = 0, grsave = 0;
4313
4314 if (!in_procedure ("prologue"))
4315 return;
4316 if (unwind.prologue)
4317 {
4318 as_bad (_(".prologue within prologue"));
4319 ignore_rest_of_line ();
4320 return;
4321 }
4322 if (!unwind.body && unwind.insn)
4323 as_warn (_("Initial .prologue should precede any instructions"));
4324
4325 if (!is_it_end_of_statement ())
4326 {
4327 expressionS e;
4328 int n, sep = parse_operand_and_eval (&e, ',');
4329
4330 if (e.X_op != O_constant
4331 || e.X_add_number < 0
4332 || e.X_add_number > 0xf)
4333 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4334 else if (e.X_add_number == 0)
4335 as_warn (_("Pointless use of zero first operand to .prologue"));
4336 else
4337 mask = e.X_add_number;
4338
4339 n = popcount (mask);
4340
4341 if (sep == ',')
4342 parse_operand_and_eval (&e, 0);
4343 else
4344 e.X_op = O_absent;
4345
4346 if (e.X_op == O_constant
4347 && e.X_add_number >= 0
4348 && e.X_add_number < 128)
4349 {
4350 if (md.unwind_check == unwind_check_error)
4351 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4352 grsave = e.X_add_number;
4353 }
4354 else if (e.X_op != O_register
4355 || (grsave = e.X_add_number - REG_GR) > 127)
4356 {
4357 as_bad (_("Second operand to .prologue must be a general register"));
4358 grsave = 0;
4359 }
4360 else if (grsave > 128U - n)
4361 {
4362 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4363 grsave = 0;
4364 }
4365 }
4366
4367 if (mask)
4368 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4369 else
4370 add_unwind_entry (output_prologue (), 0);
4371
4372 unwind.prologue = 1;
4373 unwind.prologue_mask = mask;
4374 unwind.prologue_gr = grsave;
4375 unwind.body = 0;
4376 ++unwind.prologue_count;
4377 }
4378
4379 static void
4380 dot_endp (int dummy ATTRIBUTE_UNUSED)
4381 {
4382 expressionS e;
4383 int bytes_per_address;
4384 long where;
4385 segT saved_seg;
4386 subsegT saved_subseg;
4387 proc_pending *pending;
4388 int unwind_check = md.unwind_check;
4389
4390 md.unwind_check = unwind_check_error;
4391 if (!in_procedure ("endp"))
4392 return;
4393 md.unwind_check = unwind_check;
4394
4395 if (unwind.saved_text_seg)
4396 {
4397 saved_seg = unwind.saved_text_seg;
4398 saved_subseg = unwind.saved_text_subseg;
4399 unwind.saved_text_seg = NULL;
4400 }
4401 else
4402 {
4403 saved_seg = now_seg;
4404 saved_subseg = now_subseg;
4405 }
4406
4407 insn_group_break (1, 0, 0);
4408
4409 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4410 if (!unwind.info)
4411 generate_unwind_image (saved_seg);
4412
4413 if (unwind.info || unwind.force_unwind_entry)
4414 {
4415 symbolS *proc_end;
4416
4417 subseg_set (md.last_text_seg, 0);
4418 proc_end = expr_build_dot ();
4419
4420 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4421
4422 /* Make sure that section has 4 byte alignment for ILP32 and
4423 8 byte alignment for LP64. */
4424 record_alignment (now_seg, md.pointer_size_shift);
4425
4426 /* Need space for 3 pointers for procedure start, procedure end,
4427 and unwind info. */
4428 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4429 where = frag_now_fix () - (3 * md.pointer_size);
4430 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4431
4432 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4433 e.X_op = O_pseudo_fixup;
4434 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4435 e.X_add_number = 0;
4436 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4437 && S_IS_DEFINED (unwind.proc_pending.sym))
4438 e.X_add_symbol = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4439 S_GET_VALUE (unwind.proc_pending.sym),
4440 symbol_get_frag (unwind.proc_pending.sym));
4441 else
4442 e.X_add_symbol = unwind.proc_pending.sym;
4443 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4444 BFD_RELOC_NONE);
4445
4446 e.X_op = O_pseudo_fixup;
4447 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4448 e.X_add_number = 0;
4449 e.X_add_symbol = proc_end;
4450 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4451 bytes_per_address, &e, BFD_RELOC_NONE);
4452
4453 if (unwind.info)
4454 {
4455 e.X_op = O_pseudo_fixup;
4456 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4457 e.X_add_number = 0;
4458 e.X_add_symbol = unwind.info;
4459 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4460 bytes_per_address, &e, BFD_RELOC_NONE);
4461 }
4462 }
4463 subseg_set (saved_seg, saved_subseg);
4464
4465 /* Set symbol sizes. */
4466 pending = &unwind.proc_pending;
4467 if (S_GET_NAME (pending->sym))
4468 {
4469 do
4470 {
4471 symbolS *sym = pending->sym;
4472
4473 if (!S_IS_DEFINED (sym))
4474 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4475 else if (S_GET_SIZE (sym) == 0
4476 && symbol_get_obj (sym)->size == NULL)
4477 {
4478 fragS *frag = symbol_get_frag (sym);
4479
4480 if (frag)
4481 {
4482 if (frag == frag_now && SEG_NORMAL (now_seg))
4483 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4484 else
4485 {
4486 symbol_get_obj (sym)->size = XNEW (expressionS);
4487 symbol_get_obj (sym)->size->X_op = O_subtract;
4488 symbol_get_obj (sym)->size->X_add_symbol
4489 = symbol_new (FAKE_LABEL_NAME, now_seg,
4490 frag_now_fix (), frag_now);
4491 symbol_get_obj (sym)->size->X_op_symbol = sym;
4492 symbol_get_obj (sym)->size->X_add_number = 0;
4493 }
4494 }
4495 }
4496 } while ((pending = pending->next) != NULL);
4497 }
4498
4499 /* Parse names of main and alternate entry points. */
4500 while (1)
4501 {
4502 char *name, *p, c;
4503
4504 SKIP_WHITESPACE ();
4505 c = get_symbol_name (&name);
4506 p = input_line_pointer;
4507 if (!*name)
4508 (md.unwind_check == unwind_check_warning
4509 ? as_warn
4510 : as_bad) (_("Empty argument of .endp"));
4511 else
4512 {
4513 symbolS *sym = symbol_find (name);
4514
4515 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4516 {
4517 if (sym == pending->sym)
4518 {
4519 pending->sym = NULL;
4520 break;
4521 }
4522 }
4523 if (!sym || !pending)
4524 as_warn (_("`%s' was not specified with previous .proc"), name);
4525 }
4526 *p = c;
4527 SKIP_WHITESPACE_AFTER_NAME ();
4528 if (*input_line_pointer != ',')
4529 break;
4530 ++input_line_pointer;
4531 }
4532 demand_empty_rest_of_line ();
4533
4534 /* Deliberately only checking for the main entry point here; the
4535 language spec even says all arguments to .endp are ignored. */
4536 if (unwind.proc_pending.sym
4537 && S_GET_NAME (unwind.proc_pending.sym)
4538 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4539 as_warn (_("`%s' should be an operand to this .endp"),
4540 S_GET_NAME (unwind.proc_pending.sym));
4541 while (unwind.proc_pending.next)
4542 {
4543 pending = unwind.proc_pending.next;
4544 unwind.proc_pending.next = pending->next;
4545 free (pending);
4546 }
4547 unwind.proc_pending.sym = unwind.info = NULL;
4548 }
4549
4550 static void
4551 dot_template (int template_val)
4552 {
4553 CURR_SLOT.user_template = template_val;
4554 }
4555
4556 static void
4557 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4558 {
4559 int ins, locs, outs, rots;
4560
4561 if (is_it_end_of_statement ())
4562 ins = locs = outs = rots = 0;
4563 else
4564 {
4565 ins = get_absolute_expression ();
4566 if (*input_line_pointer++ != ',')
4567 goto err;
4568 locs = get_absolute_expression ();
4569 if (*input_line_pointer++ != ',')
4570 goto err;
4571 outs = get_absolute_expression ();
4572 if (*input_line_pointer++ != ',')
4573 goto err;
4574 rots = get_absolute_expression ();
4575 }
4576 set_regstack (ins, locs, outs, rots);
4577 return;
4578
4579 err:
4580 as_bad (_("Comma expected"));
4581 ignore_rest_of_line ();
4582 }
4583
4584 static void
4585 dot_rot (int type)
4586 {
4587 offsetT num_regs;
4588 valueT num_alloced = 0;
4589 struct dynreg **drpp, *dr;
4590 int ch, base_reg = 0;
4591 char *name, *start;
4592 size_t len;
4593
4594 switch (type)
4595 {
4596 case DYNREG_GR: base_reg = REG_GR + 32; break;
4597 case DYNREG_FR: base_reg = REG_FR + 32; break;
4598 case DYNREG_PR: base_reg = REG_P + 16; break;
4599 default: break;
4600 }
4601
4602 /* First, remove existing names from hash table. */
4603 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4604 {
4605 hash_delete (md.dynreg_hash, dr->name, FALSE);
4606 /* FIXME: Free dr->name. */
4607 dr->num_regs = 0;
4608 }
4609
4610 drpp = &md.dynreg[type];
4611 while (1)
4612 {
4613 ch = get_symbol_name (&start);
4614 len = strlen (ia64_canonicalize_symbol_name (start));
4615 *input_line_pointer = ch;
4616
4617 SKIP_WHITESPACE_AFTER_NAME ();
4618 if (*input_line_pointer != '[')
4619 {
4620 as_bad (_("Expected '['"));
4621 goto err;
4622 }
4623 ++input_line_pointer; /* skip '[' */
4624
4625 num_regs = get_absolute_expression ();
4626
4627 if (*input_line_pointer++ != ']')
4628 {
4629 as_bad (_("Expected ']'"));
4630 goto err;
4631 }
4632 if (num_regs <= 0)
4633 {
4634 as_bad (_("Number of elements must be positive"));
4635 goto err;
4636 }
4637 SKIP_WHITESPACE ();
4638
4639 num_alloced += num_regs;
4640 switch (type)
4641 {
4642 case DYNREG_GR:
4643 if (num_alloced > md.rot.num_regs)
4644 {
4645 as_bad (_("Used more than the declared %d rotating registers"),
4646 md.rot.num_regs);
4647 goto err;
4648 }
4649 break;
4650 case DYNREG_FR:
4651 if (num_alloced > 96)
4652 {
4653 as_bad (_("Used more than the available 96 rotating registers"));
4654 goto err;
4655 }
4656 break;
4657 case DYNREG_PR:
4658 if (num_alloced > 48)
4659 {
4660 as_bad (_("Used more than the available 48 rotating registers"));
4661 goto err;
4662 }
4663 break;
4664
4665 default:
4666 break;
4667 }
4668
4669 if (!*drpp)
4670 {
4671 *drpp = XOBNEW (&notes, struct dynreg);
4672 memset (*drpp, 0, sizeof (*dr));
4673 }
4674
4675 name = XOBNEWVEC (&notes, char, len + 1);
4676 memcpy (name, start, len);
4677 name[len] = '\0';
4678
4679 dr = *drpp;
4680 dr->name = name;
4681 dr->num_regs = num_regs;
4682 dr->base = base_reg;
4683 drpp = &dr->next;
4684 base_reg += num_regs;
4685
4686 if (hash_insert (md.dynreg_hash, name, dr))
4687 {
4688 as_bad (_("Attempt to redefine register set `%s'"), name);
4689 obstack_free (&notes, name);
4690 goto err;
4691 }
4692
4693 if (*input_line_pointer != ',')
4694 break;
4695 ++input_line_pointer; /* skip comma */
4696 SKIP_WHITESPACE ();
4697 }
4698 demand_empty_rest_of_line ();
4699 return;
4700
4701 err:
4702 ignore_rest_of_line ();
4703 }
4704
4705 static void
4706 dot_byteorder (int byteorder)
4707 {
4708 segment_info_type *seginfo = seg_info (now_seg);
4709
4710 if (byteorder == -1)
4711 {
4712 if (seginfo->tc_segment_info_data.endian == 0)
4713 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4714 byteorder = seginfo->tc_segment_info_data.endian == 1;
4715 }
4716 else
4717 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4718
4719 if (target_big_endian != byteorder)
4720 {
4721 target_big_endian = byteorder;
4722 if (target_big_endian)
4723 {
4724 ia64_number_to_chars = number_to_chars_bigendian;
4725 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4726 }
4727 else
4728 {
4729 ia64_number_to_chars = number_to_chars_littleendian;
4730 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4731 }
4732 }
4733 }
4734
4735 static void
4736 dot_psr (int dummy ATTRIBUTE_UNUSED)
4737 {
4738 char *option;
4739 int ch;
4740
4741 while (1)
4742 {
4743 ch = get_symbol_name (&option);
4744 if (strcmp (option, "lsb") == 0)
4745 md.flags &= ~EF_IA_64_BE;
4746 else if (strcmp (option, "msb") == 0)
4747 md.flags |= EF_IA_64_BE;
4748 else if (strcmp (option, "abi32") == 0)
4749 md.flags &= ~EF_IA_64_ABI64;
4750 else if (strcmp (option, "abi64") == 0)
4751 md.flags |= EF_IA_64_ABI64;
4752 else
4753 as_bad (_("Unknown psr option `%s'"), option);
4754 *input_line_pointer = ch;
4755
4756 SKIP_WHITESPACE_AFTER_NAME ();
4757 if (*input_line_pointer != ',')
4758 break;
4759
4760 ++input_line_pointer;
4761 SKIP_WHITESPACE ();
4762 }
4763 demand_empty_rest_of_line ();
4764 }
4765
4766 static void
4767 dot_ln (int dummy ATTRIBUTE_UNUSED)
4768 {
4769 new_logical_line (0, get_absolute_expression ());
4770 demand_empty_rest_of_line ();
4771 }
4772
4773 static void
4774 cross_section (int ref, void (*builder) (int), int ua)
4775 {
4776 char *start, *end;
4777 int saved_auto_align;
4778 unsigned int section_count;
4779 char *name;
4780 char c;
4781
4782 SKIP_WHITESPACE ();
4783 start = input_line_pointer;
4784 c = get_symbol_name (&name);
4785 if (input_line_pointer == start)
4786 {
4787 as_bad (_("Missing section name"));
4788 ignore_rest_of_line ();
4789 return;
4790 }
4791 * input_line_pointer = c;
4792 SKIP_WHITESPACE_AFTER_NAME ();
4793 end = input_line_pointer;
4794 if (*input_line_pointer != ',')
4795 {
4796 as_bad (_("Comma expected after section name"));
4797 ignore_rest_of_line ();
4798 return;
4799 }
4800 *end = '\0';
4801 end = input_line_pointer + 1; /* skip comma */
4802 input_line_pointer = start;
4803 md.keep_pending_output = 1;
4804 section_count = bfd_count_sections (stdoutput);
4805 obj_elf_section (0);
4806 if (section_count != bfd_count_sections (stdoutput))
4807 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4808 input_line_pointer = end;
4809 saved_auto_align = md.auto_align;
4810 if (ua)
4811 md.auto_align = 0;
4812 (*builder) (ref);
4813 if (ua)
4814 md.auto_align = saved_auto_align;
4815 obj_elf_previous (0);
4816 md.keep_pending_output = 0;
4817 }
4818
4819 static void
4820 dot_xdata (int size)
4821 {
4822 cross_section (size, cons, 0);
4823 }
4824
4825 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4826
4827 static void
4828 stmt_float_cons (int kind)
4829 {
4830 size_t alignment;
4831
4832 switch (kind)
4833 {
4834 case 'd':
4835 alignment = 3;
4836 break;
4837
4838 case 'x':
4839 case 'X':
4840 alignment = 4;
4841 break;
4842
4843 case 'f':
4844 default:
4845 alignment = 2;
4846 break;
4847 }
4848 do_align (alignment, NULL, 0, 0);
4849 float_cons (kind);
4850 }
4851
4852 static void
4853 stmt_cons_ua (int size)
4854 {
4855 int saved_auto_align = md.auto_align;
4856
4857 md.auto_align = 0;
4858 cons (size);
4859 md.auto_align = saved_auto_align;
4860 }
4861
4862 static void
4863 dot_xfloat_cons (int kind)
4864 {
4865 cross_section (kind, stmt_float_cons, 0);
4866 }
4867
4868 static void
4869 dot_xstringer (int zero)
4870 {
4871 cross_section (zero, stringer, 0);
4872 }
4873
4874 static void
4875 dot_xdata_ua (int size)
4876 {
4877 cross_section (size, cons, 1);
4878 }
4879
4880 static void
4881 dot_xfloat_cons_ua (int kind)
4882 {
4883 cross_section (kind, float_cons, 1);
4884 }
4885
4886 /* .reg.val <regname>,value */
4887
4888 static void
4889 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4890 {
4891 expressionS reg;
4892
4893 expression_and_evaluate (&reg);
4894 if (reg.X_op != O_register)
4895 {
4896 as_bad (_("Register name expected"));
4897 ignore_rest_of_line ();
4898 }
4899 else if (*input_line_pointer++ != ',')
4900 {
4901 as_bad (_("Comma expected"));
4902 ignore_rest_of_line ();
4903 }
4904 else
4905 {
4906 valueT value = get_absolute_expression ();
4907 int regno = reg.X_add_number;
4908 if (regno <= REG_GR || regno > REG_GR + 127)
4909 as_warn (_("Register value annotation ignored"));
4910 else
4911 {
4912 gr_values[regno - REG_GR].known = 1;
4913 gr_values[regno - REG_GR].value = value;
4914 gr_values[regno - REG_GR].path = md.path;
4915 }
4916 }
4917 demand_empty_rest_of_line ();
4918 }
4919
4920 /*
4921 .serialize.data
4922 .serialize.instruction
4923 */
4924 static void
4925 dot_serialize (int type)
4926 {
4927 insn_group_break (0, 0, 0);
4928 if (type)
4929 instruction_serialization ();
4930 else
4931 data_serialization ();
4932 insn_group_break (0, 0, 0);
4933 demand_empty_rest_of_line ();
4934 }
4935
4936 /* select dv checking mode
4937 .auto
4938 .explicit
4939 .default
4940
4941 A stop is inserted when changing modes
4942 */
4943
4944 static void
4945 dot_dv_mode (int type)
4946 {
4947 if (md.manual_bundling)
4948 as_warn (_("Directive invalid within a bundle"));
4949
4950 if (type == 'E' || type == 'A')
4951 md.mode_explicitly_set = 0;
4952 else
4953 md.mode_explicitly_set = 1;
4954
4955 md.detect_dv = 1;
4956 switch (type)
4957 {
4958 case 'A':
4959 case 'a':
4960 if (md.explicit_mode)
4961 insn_group_break (1, 0, 0);
4962 md.explicit_mode = 0;
4963 break;
4964 case 'E':
4965 case 'e':
4966 if (!md.explicit_mode)
4967 insn_group_break (1, 0, 0);
4968 md.explicit_mode = 1;
4969 break;
4970 default:
4971 case 'd':
4972 if (md.explicit_mode != md.default_explicit_mode)
4973 insn_group_break (1, 0, 0);
4974 md.explicit_mode = md.default_explicit_mode;
4975 md.mode_explicitly_set = 0;
4976 break;
4977 }
4978 }
4979
4980 static void
4981 print_prmask (valueT mask)
4982 {
4983 int regno;
4984 const char *comma = "";
4985 for (regno = 0; regno < 64; regno++)
4986 {
4987 if (mask & ((valueT) 1 << regno))
4988 {
4989 fprintf (stderr, "%s p%d", comma, regno);
4990 comma = ",";
4991 }
4992 }
4993 }
4994
4995 /*
4996 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4997 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4998 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4999 .pred.safe_across_calls p1 [, p2 [,...]]
5000 */
5001
5002 static void
5003 dot_pred_rel (int type)
5004 {
5005 valueT mask = 0;
5006 int count = 0;
5007 int p1 = -1, p2 = -1;
5008
5009 if (type == 0)
5010 {
5011 if (*input_line_pointer == '"')
5012 {
5013 int len;
5014 char *form = demand_copy_C_string (&len);
5015
5016 if (strcmp (form, "mutex") == 0)
5017 type = 'm';
5018 else if (strcmp (form, "clear") == 0)
5019 type = 'c';
5020 else if (strcmp (form, "imply") == 0)
5021 type = 'i';
5022 obstack_free (&notes, form);
5023 }
5024 else if (*input_line_pointer == '@')
5025 {
5026 char *form;
5027 char c;
5028
5029 ++input_line_pointer;
5030 c = get_symbol_name (&form);
5031
5032 if (strcmp (form, "mutex") == 0)
5033 type = 'm';
5034 else if (strcmp (form, "clear") == 0)
5035 type = 'c';
5036 else if (strcmp (form, "imply") == 0)
5037 type = 'i';
5038 (void) restore_line_pointer (c);
5039 }
5040 else
5041 {
5042 as_bad (_("Missing predicate relation type"));
5043 ignore_rest_of_line ();
5044 return;
5045 }
5046 if (type == 0)
5047 {
5048 as_bad (_("Unrecognized predicate relation type"));
5049 ignore_rest_of_line ();
5050 return;
5051 }
5052 if (*input_line_pointer == ',')
5053 ++input_line_pointer;
5054 SKIP_WHITESPACE ();
5055 }
5056
5057 while (1)
5058 {
5059 valueT bits = 1;
5060 int sep, regno;
5061 expressionS pr, *pr1, *pr2;
5062
5063 sep = parse_operand_and_eval (&pr, ',');
5064 if (pr.X_op == O_register
5065 && pr.X_add_number >= REG_P
5066 && pr.X_add_number <= REG_P + 63)
5067 {
5068 regno = pr.X_add_number - REG_P;
5069 bits <<= regno;
5070 count++;
5071 if (p1 == -1)
5072 p1 = regno;
5073 else if (p2 == -1)
5074 p2 = regno;
5075 }
5076 else if (type != 'i'
5077 && pr.X_op == O_subtract
5078 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5079 && pr1->X_op == O_register
5080 && pr1->X_add_number >= REG_P
5081 && pr1->X_add_number <= REG_P + 63
5082 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5083 && pr2->X_op == O_register
5084 && pr2->X_add_number >= REG_P
5085 && pr2->X_add_number <= REG_P + 63)
5086 {
5087 /* It's a range. */
5088 int stop;
5089
5090 regno = pr1->X_add_number - REG_P;
5091 stop = pr2->X_add_number - REG_P;
5092 if (regno >= stop)
5093 {
5094 as_bad (_("Bad register range"));
5095 ignore_rest_of_line ();
5096 return;
5097 }
5098 bits = ((bits << stop) << 1) - (bits << regno);
5099 count += stop - regno + 1;
5100 }
5101 else
5102 {
5103 as_bad (_("Predicate register expected"));
5104 ignore_rest_of_line ();
5105 return;
5106 }
5107 if (mask & bits)
5108 as_warn (_("Duplicate predicate register ignored"));
5109 mask |= bits;
5110 if (sep != ',')
5111 break;
5112 }
5113
5114 switch (type)
5115 {
5116 case 'c':
5117 if (count == 0)
5118 mask = ~(valueT) 0;
5119 clear_qp_mutex (mask);
5120 clear_qp_implies (mask, (valueT) 0);
5121 break;
5122 case 'i':
5123 if (count != 2 || p1 == -1 || p2 == -1)
5124 as_bad (_("Predicate source and target required"));
5125 else if (p1 == 0 || p2 == 0)
5126 as_bad (_("Use of p0 is not valid in this context"));
5127 else
5128 add_qp_imply (p1, p2);
5129 break;
5130 case 'm':
5131 if (count < 2)
5132 {
5133 as_bad (_("At least two PR arguments expected"));
5134 break;
5135 }
5136 else if (mask & 1)
5137 {
5138 as_bad (_("Use of p0 is not valid in this context"));
5139 break;
5140 }
5141 add_qp_mutex (mask);
5142 break;
5143 case 's':
5144 /* note that we don't override any existing relations */
5145 if (count == 0)
5146 {
5147 as_bad (_("At least one PR argument expected"));
5148 break;
5149 }
5150 if (md.debug_dv)
5151 {
5152 fprintf (stderr, "Safe across calls: ");
5153 print_prmask (mask);
5154 fprintf (stderr, "\n");
5155 }
5156 qp_safe_across_calls = mask;
5157 break;
5158 }
5159 demand_empty_rest_of_line ();
5160 }
5161
5162 /* .entry label [, label [, ...]]
5163 Hint to DV code that the given labels are to be considered entry points.
5164 Otherwise, only global labels are considered entry points. */
5165
5166 static void
5167 dot_entry (int dummy ATTRIBUTE_UNUSED)
5168 {
5169 const char *err;
5170 char *name;
5171 int c;
5172 symbolS *symbolP;
5173
5174 do
5175 {
5176 c = get_symbol_name (&name);
5177 symbolP = symbol_find_or_make (name);
5178
5179 err = hash_insert (md.entry_hash, S_GET_NAME (symbolP), (void *) symbolP);
5180 if (err)
5181 as_fatal (_("Inserting \"%s\" into entry hint table failed: %s"),
5182 name, err);
5183
5184 *input_line_pointer = c;
5185 SKIP_WHITESPACE_AFTER_NAME ();
5186 c = *input_line_pointer;
5187 if (c == ',')
5188 {
5189 input_line_pointer++;
5190 SKIP_WHITESPACE ();
5191 if (*input_line_pointer == '\n')
5192 c = '\n';
5193 }
5194 }
5195 while (c == ',');
5196
5197 demand_empty_rest_of_line ();
5198 }
5199
5200 /* .mem.offset offset, base
5201 "base" is used to distinguish between offsets from a different base. */
5202
5203 static void
5204 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5205 {
5206 md.mem_offset.hint = 1;
5207 md.mem_offset.offset = get_absolute_expression ();
5208 if (*input_line_pointer != ',')
5209 {
5210 as_bad (_("Comma expected"));
5211 ignore_rest_of_line ();
5212 return;
5213 }
5214 ++input_line_pointer;
5215 md.mem_offset.base = get_absolute_expression ();
5216 demand_empty_rest_of_line ();
5217 }
5218
5219 /* ia64-specific pseudo-ops: */
5220 const pseudo_typeS md_pseudo_table[] =
5221 {
5222 { "radix", dot_radix, 0 },
5223 { "lcomm", s_lcomm_bytes, 1 },
5224 { "loc", dot_loc, 0 },
5225 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5226 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5227 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5228 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5229 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5230 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5231 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5232 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5233 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5234 { "proc", dot_proc, 0 },
5235 { "body", dot_body, 0 },
5236 { "prologue", dot_prologue, 0 },
5237 { "endp", dot_endp, 0 },
5238
5239 { "fframe", dot_fframe, 0 },
5240 { "vframe", dot_vframe, 0 },
5241 { "vframesp", dot_vframesp, 0 },
5242 { "vframepsp", dot_vframesp, 1 },
5243 { "save", dot_save, 0 },
5244 { "restore", dot_restore, 0 },
5245 { "restorereg", dot_restorereg, 0 },
5246 { "restorereg.p", dot_restorereg, 1 },
5247 { "handlerdata", dot_handlerdata, 0 },
5248 { "unwentry", dot_unwentry, 0 },
5249 { "altrp", dot_altrp, 0 },
5250 { "savesp", dot_savemem, 0 },
5251 { "savepsp", dot_savemem, 1 },
5252 { "save.g", dot_saveg, 0 },
5253 { "save.f", dot_savef, 0 },
5254 { "save.b", dot_saveb, 0 },
5255 { "save.gf", dot_savegf, 0 },
5256 { "spill", dot_spill, 0 },
5257 { "spillreg", dot_spillreg, 0 },
5258 { "spillsp", dot_spillmem, 0 },
5259 { "spillpsp", dot_spillmem, 1 },
5260 { "spillreg.p", dot_spillreg, 1 },
5261 { "spillsp.p", dot_spillmem, ~0 },
5262 { "spillpsp.p", dot_spillmem, ~1 },
5263 { "label_state", dot_label_state, 0 },
5264 { "copy_state", dot_copy_state, 0 },
5265 { "unwabi", dot_unwabi, 0 },
5266 { "personality", dot_personality, 0 },
5267 { "mii", dot_template, 0x0 },
5268 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5269 { "mlx", dot_template, 0x2 },
5270 { "mmi", dot_template, 0x4 },
5271 { "mfi", dot_template, 0x6 },
5272 { "mmf", dot_template, 0x7 },
5273 { "mib", dot_template, 0x8 },
5274 { "mbb", dot_template, 0x9 },
5275 { "bbb", dot_template, 0xb },
5276 { "mmb", dot_template, 0xc },
5277 { "mfb", dot_template, 0xe },
5278 { "align", dot_align, 0 },
5279 { "regstk", dot_regstk, 0 },
5280 { "rotr", dot_rot, DYNREG_GR },
5281 { "rotf", dot_rot, DYNREG_FR },
5282 { "rotp", dot_rot, DYNREG_PR },
5283 { "lsb", dot_byteorder, 0 },
5284 { "msb", dot_byteorder, 1 },
5285 { "psr", dot_psr, 0 },
5286 { "alias", dot_alias, 0 },
5287 { "secalias", dot_alias, 1 },
5288 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5289
5290 { "xdata1", dot_xdata, 1 },
5291 { "xdata2", dot_xdata, 2 },
5292 { "xdata4", dot_xdata, 4 },
5293 { "xdata8", dot_xdata, 8 },
5294 { "xdata16", dot_xdata, 16 },
5295 { "xreal4", dot_xfloat_cons, 'f' },
5296 { "xreal8", dot_xfloat_cons, 'd' },
5297 { "xreal10", dot_xfloat_cons, 'x' },
5298 { "xreal16", dot_xfloat_cons, 'X' },
5299 { "xstring", dot_xstringer, 8 + 0 },
5300 { "xstringz", dot_xstringer, 8 + 1 },
5301
5302 /* unaligned versions: */
5303 { "xdata2.ua", dot_xdata_ua, 2 },
5304 { "xdata4.ua", dot_xdata_ua, 4 },
5305 { "xdata8.ua", dot_xdata_ua, 8 },
5306 { "xdata16.ua", dot_xdata_ua, 16 },
5307 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5308 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5309 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5310 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5311
5312 /* annotations/DV checking support */
5313 { "entry", dot_entry, 0 },
5314 { "mem.offset", dot_mem_offset, 0 },
5315 { "pred.rel", dot_pred_rel, 0 },
5316 { "pred.rel.clear", dot_pred_rel, 'c' },
5317 { "pred.rel.imply", dot_pred_rel, 'i' },
5318 { "pred.rel.mutex", dot_pred_rel, 'm' },
5319 { "pred.safe_across_calls", dot_pred_rel, 's' },
5320 { "reg.val", dot_reg_val, 0 },
5321 { "serialize.data", dot_serialize, 0 },
5322 { "serialize.instruction", dot_serialize, 1 },
5323 { "auto", dot_dv_mode, 'a' },
5324 { "explicit", dot_dv_mode, 'e' },
5325 { "default", dot_dv_mode, 'd' },
5326
5327 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5328 IA-64 aligns data allocation pseudo-ops by default, so we have to
5329 tell it that these ones are supposed to be unaligned. Long term,
5330 should rewrite so that only IA-64 specific data allocation pseudo-ops
5331 are aligned by default. */
5332 {"2byte", stmt_cons_ua, 2},
5333 {"4byte", stmt_cons_ua, 4},
5334 {"8byte", stmt_cons_ua, 8},
5335
5336 #ifdef TE_VMS
5337 {"vms_common", obj_elf_vms_common, 0},
5338 #endif
5339
5340 { NULL, 0, 0 }
5341 };
5342
5343 static const struct pseudo_opcode
5344 {
5345 const char *name;
5346 void (*handler) (int);
5347 int arg;
5348 }
5349 pseudo_opcode[] =
5350 {
5351 /* these are more like pseudo-ops, but don't start with a dot */
5352 { "data1", cons, 1 },
5353 { "data2", cons, 2 },
5354 { "data4", cons, 4 },
5355 { "data8", cons, 8 },
5356 { "data16", cons, 16 },
5357 { "real4", stmt_float_cons, 'f' },
5358 { "real8", stmt_float_cons, 'd' },
5359 { "real10", stmt_float_cons, 'x' },
5360 { "real16", stmt_float_cons, 'X' },
5361 { "string", stringer, 8 + 0 },
5362 { "stringz", stringer, 8 + 1 },
5363
5364 /* unaligned versions: */
5365 { "data2.ua", stmt_cons_ua, 2 },
5366 { "data4.ua", stmt_cons_ua, 4 },
5367 { "data8.ua", stmt_cons_ua, 8 },
5368 { "data16.ua", stmt_cons_ua, 16 },
5369 { "real4.ua", float_cons, 'f' },
5370 { "real8.ua", float_cons, 'd' },
5371 { "real10.ua", float_cons, 'x' },
5372 { "real16.ua", float_cons, 'X' },
5373 };
5374
5375 /* Declare a register by creating a symbol for it and entering it in
5376 the symbol table. */
5377
5378 static symbolS *
5379 declare_register (const char *name, unsigned int regnum)
5380 {
5381 const char *err;
5382 symbolS *sym;
5383
5384 sym = symbol_create (name, reg_section, regnum, &zero_address_frag);
5385
5386 err = hash_insert (md.reg_hash, S_GET_NAME (sym), (void *) sym);
5387 if (err)
5388 as_fatal ("Inserting \"%s\" into register table failed: %s",
5389 name, err);
5390
5391 return sym;
5392 }
5393
5394 static void
5395 declare_register_set (const char *prefix,
5396 unsigned int num_regs,
5397 unsigned int base_regnum)
5398 {
5399 char name[8];
5400 unsigned int i;
5401
5402 for (i = 0; i < num_regs; ++i)
5403 {
5404 snprintf (name, sizeof (name), "%s%u", prefix, i);
5405 declare_register (name, base_regnum + i);
5406 }
5407 }
5408
5409 static unsigned int
5410 operand_width (enum ia64_opnd opnd)
5411 {
5412 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5413 unsigned int bits = 0;
5414 int i;
5415
5416 bits = 0;
5417 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5418 bits += odesc->field[i].bits;
5419
5420 return bits;
5421 }
5422
5423 static enum operand_match_result
5424 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5425 {
5426 enum ia64_opnd opnd = idesc->operands[res_index];
5427 int bits, relocatable = 0;
5428 struct insn_fix *fix;
5429 bfd_signed_vma val;
5430
5431 switch (opnd)
5432 {
5433 /* constants: */
5434
5435 case IA64_OPND_AR_CCV:
5436 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5437 return OPERAND_MATCH;
5438 break;
5439
5440 case IA64_OPND_AR_CSD:
5441 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5442 return OPERAND_MATCH;
5443 break;
5444
5445 case IA64_OPND_AR_PFS:
5446 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5447 return OPERAND_MATCH;
5448 break;
5449
5450 case IA64_OPND_GR0:
5451 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5452 return OPERAND_MATCH;
5453 break;
5454
5455 case IA64_OPND_IP:
5456 if (e->X_op == O_register && e->X_add_number == REG_IP)
5457 return OPERAND_MATCH;
5458 break;
5459
5460 case IA64_OPND_PR:
5461 if (e->X_op == O_register && e->X_add_number == REG_PR)
5462 return OPERAND_MATCH;
5463 break;
5464
5465 case IA64_OPND_PR_ROT:
5466 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5467 return OPERAND_MATCH;
5468 break;
5469
5470 case IA64_OPND_PSR:
5471 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5472 return OPERAND_MATCH;
5473 break;
5474
5475 case IA64_OPND_PSR_L:
5476 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5477 return OPERAND_MATCH;
5478 break;
5479
5480 case IA64_OPND_PSR_UM:
5481 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5482 return OPERAND_MATCH;
5483 break;
5484
5485 case IA64_OPND_C1:
5486 if (e->X_op == O_constant)
5487 {
5488 if (e->X_add_number == 1)
5489 return OPERAND_MATCH;
5490 else
5491 return OPERAND_OUT_OF_RANGE;
5492 }
5493 break;
5494
5495 case IA64_OPND_C8:
5496 if (e->X_op == O_constant)
5497 {
5498 if (e->X_add_number == 8)
5499 return OPERAND_MATCH;
5500 else
5501 return OPERAND_OUT_OF_RANGE;
5502 }
5503 break;
5504
5505 case IA64_OPND_C16:
5506 if (e->X_op == O_constant)
5507 {
5508 if (e->X_add_number == 16)
5509 return OPERAND_MATCH;
5510 else
5511 return OPERAND_OUT_OF_RANGE;
5512 }
5513 break;
5514
5515 /* register operands: */
5516
5517 case IA64_OPND_AR3:
5518 if (e->X_op == O_register && e->X_add_number >= REG_AR
5519 && e->X_add_number < REG_AR + 128)
5520 return OPERAND_MATCH;
5521 break;
5522
5523 case IA64_OPND_B1:
5524 case IA64_OPND_B2:
5525 if (e->X_op == O_register && e->X_add_number >= REG_BR
5526 && e->X_add_number < REG_BR + 8)
5527 return OPERAND_MATCH;
5528 break;
5529
5530 case IA64_OPND_CR3:
5531 if (e->X_op == O_register && e->X_add_number >= REG_CR
5532 && e->X_add_number < REG_CR + 128)
5533 return OPERAND_MATCH;
5534 break;
5535
5536 case IA64_OPND_DAHR3:
5537 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5538 && e->X_add_number < REG_DAHR + 8)
5539 return OPERAND_MATCH;
5540 break;
5541
5542 case IA64_OPND_F1:
5543 case IA64_OPND_F2:
5544 case IA64_OPND_F3:
5545 case IA64_OPND_F4:
5546 if (e->X_op == O_register && e->X_add_number >= REG_FR
5547 && e->X_add_number < REG_FR + 128)
5548 return OPERAND_MATCH;
5549 break;
5550
5551 case IA64_OPND_P1:
5552 case IA64_OPND_P2:
5553 if (e->X_op == O_register && e->X_add_number >= REG_P
5554 && e->X_add_number < REG_P + 64)
5555 return OPERAND_MATCH;
5556 break;
5557
5558 case IA64_OPND_R1:
5559 case IA64_OPND_R2:
5560 case IA64_OPND_R3:
5561 if (e->X_op == O_register && e->X_add_number >= REG_GR
5562 && e->X_add_number < REG_GR + 128)
5563 return OPERAND_MATCH;
5564 break;
5565
5566 case IA64_OPND_R3_2:
5567 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5568 {
5569 if (e->X_add_number < REG_GR + 4)
5570 return OPERAND_MATCH;
5571 else if (e->X_add_number < REG_GR + 128)
5572 return OPERAND_OUT_OF_RANGE;
5573 }
5574 break;
5575
5576 /* indirect operands: */
5577 case IA64_OPND_CPUID_R3:
5578 case IA64_OPND_DBR_R3:
5579 case IA64_OPND_DTR_R3:
5580 case IA64_OPND_ITR_R3:
5581 case IA64_OPND_IBR_R3:
5582 case IA64_OPND_MSR_R3:
5583 case IA64_OPND_PKR_R3:
5584 case IA64_OPND_PMC_R3:
5585 case IA64_OPND_PMD_R3:
5586 case IA64_OPND_DAHR_R3:
5587 case IA64_OPND_RR_R3:
5588 if (e->X_op == O_index && e->X_op_symbol
5589 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5590 == opnd - IA64_OPND_CPUID_R3))
5591 return OPERAND_MATCH;
5592 break;
5593
5594 case IA64_OPND_MR3:
5595 if (e->X_op == O_index && !e->X_op_symbol)
5596 return OPERAND_MATCH;
5597 break;
5598
5599 /* immediate operands: */
5600 case IA64_OPND_CNT2a:
5601 case IA64_OPND_LEN4:
5602 case IA64_OPND_LEN6:
5603 bits = operand_width (idesc->operands[res_index]);
5604 if (e->X_op == O_constant)
5605 {
5606 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5607 return OPERAND_MATCH;
5608 else
5609 return OPERAND_OUT_OF_RANGE;
5610 }
5611 break;
5612
5613 case IA64_OPND_CNT2b:
5614 if (e->X_op == O_constant)
5615 {
5616 if ((bfd_vma) (e->X_add_number - 1) < 3)
5617 return OPERAND_MATCH;
5618 else
5619 return OPERAND_OUT_OF_RANGE;
5620 }
5621 break;
5622
5623 case IA64_OPND_CNT2c:
5624 val = e->X_add_number;
5625 if (e->X_op == O_constant)
5626 {
5627 if ((val == 0 || val == 7 || val == 15 || val == 16))
5628 return OPERAND_MATCH;
5629 else
5630 return OPERAND_OUT_OF_RANGE;
5631 }
5632 break;
5633
5634 case IA64_OPND_SOR:
5635 /* SOR must be an integer multiple of 8 */
5636 if (e->X_op == O_constant && e->X_add_number & 0x7)
5637 return OPERAND_OUT_OF_RANGE;
5638 /* Fall through. */
5639 case IA64_OPND_SOF:
5640 case IA64_OPND_SOL:
5641 if (e->X_op == O_constant)
5642 {
5643 if ((bfd_vma) e->X_add_number <= 96)
5644 return OPERAND_MATCH;
5645 else
5646 return OPERAND_OUT_OF_RANGE;
5647 }
5648 break;
5649
5650 case IA64_OPND_IMMU62:
5651 if (e->X_op == O_constant)
5652 {
5653 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5654 return OPERAND_MATCH;
5655 else
5656 return OPERAND_OUT_OF_RANGE;
5657 }
5658 else
5659 {
5660 /* FIXME -- need 62-bit relocation type */
5661 as_bad (_("62-bit relocation not yet implemented"));
5662 }
5663 break;
5664
5665 case IA64_OPND_IMMU64:
5666 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5667 || e->X_op == O_subtract)
5668 {
5669 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5670 fix->code = BFD_RELOC_IA64_IMM64;
5671 if (e->X_op != O_subtract)
5672 {
5673 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5674 if (e->X_op == O_pseudo_fixup)
5675 e->X_op = O_symbol;
5676 }
5677
5678 fix->opnd = idesc->operands[res_index];
5679 fix->expr = *e;
5680 fix->is_pcrel = 0;
5681 ++CURR_SLOT.num_fixups;
5682 return OPERAND_MATCH;
5683 }
5684 else if (e->X_op == O_constant)
5685 return OPERAND_MATCH;
5686 break;
5687
5688 case IA64_OPND_IMMU5b:
5689 if (e->X_op == O_constant)
5690 {
5691 val = e->X_add_number;
5692 if (val >= 32 && val <= 63)
5693 return OPERAND_MATCH;
5694 else
5695 return OPERAND_OUT_OF_RANGE;
5696 }
5697 break;
5698
5699 case IA64_OPND_CCNT5:
5700 case IA64_OPND_CNT5:
5701 case IA64_OPND_CNT6:
5702 case IA64_OPND_CPOS6a:
5703 case IA64_OPND_CPOS6b:
5704 case IA64_OPND_CPOS6c:
5705 case IA64_OPND_IMMU2:
5706 case IA64_OPND_IMMU7a:
5707 case IA64_OPND_IMMU7b:
5708 case IA64_OPND_IMMU16:
5709 case IA64_OPND_IMMU19:
5710 case IA64_OPND_IMMU21:
5711 case IA64_OPND_IMMU24:
5712 case IA64_OPND_MBTYPE4:
5713 case IA64_OPND_MHTYPE8:
5714 case IA64_OPND_POS6:
5715 bits = operand_width (idesc->operands[res_index]);
5716 if (e->X_op == O_constant)
5717 {
5718 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5719 return OPERAND_MATCH;
5720 else
5721 return OPERAND_OUT_OF_RANGE;
5722 }
5723 break;
5724
5725 case IA64_OPND_IMMU9:
5726 bits = operand_width (idesc->operands[res_index]);
5727 if (e->X_op == O_constant)
5728 {
5729 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5730 {
5731 int lobits = e->X_add_number & 0x3;
5732 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5733 e->X_add_number |= (bfd_vma) 0x3;
5734 return OPERAND_MATCH;
5735 }
5736 else
5737 return OPERAND_OUT_OF_RANGE;
5738 }
5739 break;
5740
5741 case IA64_OPND_IMM44:
5742 /* least 16 bits must be zero */
5743 if ((e->X_add_number & 0xffff) != 0)
5744 /* XXX technically, this is wrong: we should not be issuing warning
5745 messages until we're sure this instruction pattern is going to
5746 be used! */
5747 as_warn (_("lower 16 bits of mask ignored"));
5748
5749 if (e->X_op == O_constant)
5750 {
5751 if (((e->X_add_number >= 0
5752 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5753 || (e->X_add_number < 0
5754 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5755 {
5756 /* sign-extend */
5757 if (e->X_add_number >= 0
5758 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5759 {
5760 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5761 }
5762 return OPERAND_MATCH;
5763 }
5764 else
5765 return OPERAND_OUT_OF_RANGE;
5766 }
5767 break;
5768
5769 case IA64_OPND_IMM17:
5770 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5771 if (e->X_op == O_constant)
5772 {
5773 if (((e->X_add_number >= 0
5774 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5775 || (e->X_add_number < 0
5776 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5777 {
5778 /* sign-extend */
5779 if (e->X_add_number >= 0
5780 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5781 {
5782 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5783 }
5784 return OPERAND_MATCH;
5785 }
5786 else
5787 return OPERAND_OUT_OF_RANGE;
5788 }
5789 break;
5790
5791 case IA64_OPND_IMM14:
5792 case IA64_OPND_IMM22:
5793 relocatable = 1;
5794 /* Fall through. */
5795 case IA64_OPND_IMM1:
5796 case IA64_OPND_IMM8:
5797 case IA64_OPND_IMM8U4:
5798 case IA64_OPND_IMM8M1:
5799 case IA64_OPND_IMM8M1U4:
5800 case IA64_OPND_IMM8M1U8:
5801 case IA64_OPND_IMM9a:
5802 case IA64_OPND_IMM9b:
5803 bits = operand_width (idesc->operands[res_index]);
5804 if (relocatable && (e->X_op == O_symbol
5805 || e->X_op == O_subtract
5806 || e->X_op == O_pseudo_fixup))
5807 {
5808 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5809
5810 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5811 fix->code = BFD_RELOC_IA64_IMM14;
5812 else
5813 fix->code = BFD_RELOC_IA64_IMM22;
5814
5815 if (e->X_op != O_subtract)
5816 {
5817 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5818 if (e->X_op == O_pseudo_fixup)
5819 e->X_op = O_symbol;
5820 }
5821
5822 fix->opnd = idesc->operands[res_index];
5823 fix->expr = *e;
5824 fix->is_pcrel = 0;
5825 ++CURR_SLOT.num_fixups;
5826 return OPERAND_MATCH;
5827 }
5828 else if (e->X_op != O_constant
5829 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5830 return OPERAND_MISMATCH;
5831
5832 if (opnd == IA64_OPND_IMM8M1U4)
5833 {
5834 /* Zero is not valid for unsigned compares that take an adjusted
5835 constant immediate range. */
5836 if (e->X_add_number == 0)
5837 return OPERAND_OUT_OF_RANGE;
5838
5839 /* Sign-extend 32-bit unsigned numbers, so that the following range
5840 checks will work. */
5841 val = e->X_add_number;
5842 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5843 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5844
5845 /* Check for 0x100000000. This is valid because
5846 0x100000000-1 is the same as ((uint32_t) -1). */
5847 if (val == ((bfd_signed_vma) 1 << 32))
5848 return OPERAND_MATCH;
5849
5850 val = val - 1;
5851 }
5852 else if (opnd == IA64_OPND_IMM8M1U8)
5853 {
5854 /* Zero is not valid for unsigned compares that take an adjusted
5855 constant immediate range. */
5856 if (e->X_add_number == 0)
5857 return OPERAND_OUT_OF_RANGE;
5858
5859 /* Check for 0x10000000000000000. */
5860 if (e->X_op == O_big)
5861 {
5862 if (generic_bignum[0] == 0
5863 && generic_bignum[1] == 0
5864 && generic_bignum[2] == 0
5865 && generic_bignum[3] == 0
5866 && generic_bignum[4] == 1)
5867 return OPERAND_MATCH;
5868 else
5869 return OPERAND_OUT_OF_RANGE;
5870 }
5871 else
5872 val = e->X_add_number - 1;
5873 }
5874 else if (opnd == IA64_OPND_IMM8M1)
5875 val = e->X_add_number - 1;
5876 else if (opnd == IA64_OPND_IMM8U4)
5877 {
5878 /* Sign-extend 32-bit unsigned numbers, so that the following range
5879 checks will work. */
5880 val = e->X_add_number;
5881 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5882 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5883 }
5884 else
5885 val = e->X_add_number;
5886
5887 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5888 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5889 return OPERAND_MATCH;
5890 else
5891 return OPERAND_OUT_OF_RANGE;
5892
5893 case IA64_OPND_INC3:
5894 /* +/- 1, 4, 8, 16 */
5895 val = e->X_add_number;
5896 if (val < 0)
5897 val = -val;
5898 if (e->X_op == O_constant)
5899 {
5900 if ((val == 1 || val == 4 || val == 8 || val == 16))
5901 return OPERAND_MATCH;
5902 else
5903 return OPERAND_OUT_OF_RANGE;
5904 }
5905 break;
5906
5907 case IA64_OPND_TGT25:
5908 case IA64_OPND_TGT25b:
5909 case IA64_OPND_TGT25c:
5910 case IA64_OPND_TGT64:
5911 if (e->X_op == O_symbol)
5912 {
5913 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5914 if (opnd == IA64_OPND_TGT25)
5915 fix->code = BFD_RELOC_IA64_PCREL21F;
5916 else if (opnd == IA64_OPND_TGT25b)
5917 fix->code = BFD_RELOC_IA64_PCREL21M;
5918 else if (opnd == IA64_OPND_TGT25c)
5919 fix->code = BFD_RELOC_IA64_PCREL21B;
5920 else if (opnd == IA64_OPND_TGT64)
5921 fix->code = BFD_RELOC_IA64_PCREL60B;
5922 else
5923 abort ();
5924
5925 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5926 fix->opnd = idesc->operands[res_index];
5927 fix->expr = *e;
5928 fix->is_pcrel = 1;
5929 ++CURR_SLOT.num_fixups;
5930 return OPERAND_MATCH;
5931 }
5932 /* Fall through. */
5933 case IA64_OPND_TAG13:
5934 case IA64_OPND_TAG13b:
5935 switch (e->X_op)
5936 {
5937 case O_constant:
5938 return OPERAND_MATCH;
5939
5940 case O_symbol:
5941 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5942 /* There are no external relocs for TAG13/TAG13b fields, so we
5943 create a dummy reloc. This will not live past md_apply_fix. */
5944 fix->code = BFD_RELOC_UNUSED;
5945 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5946 fix->opnd = idesc->operands[res_index];
5947 fix->expr = *e;
5948 fix->is_pcrel = 1;
5949 ++CURR_SLOT.num_fixups;
5950 return OPERAND_MATCH;
5951
5952 default:
5953 break;
5954 }
5955 break;
5956
5957 case IA64_OPND_LDXMOV:
5958 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5959 fix->code = BFD_RELOC_IA64_LDXMOV;
5960 fix->opnd = idesc->operands[res_index];
5961 fix->expr = *e;
5962 fix->is_pcrel = 0;
5963 ++CURR_SLOT.num_fixups;
5964 return OPERAND_MATCH;
5965
5966 case IA64_OPND_STRD5b:
5967 if (e->X_op == O_constant)
5968 {
5969 /* 5-bit signed scaled by 64 */
5970 if ((e->X_add_number <= ( 0xf << 6 ))
5971 && (e->X_add_number >= -( 0x10 << 6 )))
5972 {
5973
5974 /* Must be a multiple of 64 */
5975 if ((e->X_add_number & 0x3f) != 0)
5976 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5977
5978 e->X_add_number &= ~ 0x3f;
5979 return OPERAND_MATCH;
5980 }
5981 else
5982 return OPERAND_OUT_OF_RANGE;
5983 }
5984 break;
5985 case IA64_OPND_CNT6a:
5986 if (e->X_op == O_constant)
5987 {
5988 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5989 if ((e->X_add_number <= 64)
5990 && (e->X_add_number > 0) )
5991 {
5992 return OPERAND_MATCH;
5993 }
5994 else
5995 return OPERAND_OUT_OF_RANGE;
5996 }
5997 break;
5998
5999 default:
6000 break;
6001 }
6002 return OPERAND_MISMATCH;
6003 }
6004
6005 static int
6006 parse_operand (expressionS *e, int more)
6007 {
6008 int sep = '\0';
6009
6010 memset (e, 0, sizeof (*e));
6011 e->X_op = O_absent;
6012 SKIP_WHITESPACE ();
6013 expression (e);
6014 sep = *input_line_pointer;
6015 if (more && (sep == ',' || sep == more))
6016 ++input_line_pointer;
6017 return sep;
6018 }
6019
6020 static int
6021 parse_operand_and_eval (expressionS *e, int more)
6022 {
6023 int sep = parse_operand (e, more);
6024 resolve_expression (e);
6025 return sep;
6026 }
6027
6028 static int
6029 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6030 {
6031 int sep = parse_operand (e, more);
6032 switch (op)
6033 {
6034 case IA64_OPND_IMM14:
6035 case IA64_OPND_IMM22:
6036 case IA64_OPND_IMMU64:
6037 case IA64_OPND_TGT25:
6038 case IA64_OPND_TGT25b:
6039 case IA64_OPND_TGT25c:
6040 case IA64_OPND_TGT64:
6041 case IA64_OPND_TAG13:
6042 case IA64_OPND_TAG13b:
6043 case IA64_OPND_LDXMOV:
6044 break;
6045 default:
6046 resolve_expression (e);
6047 break;
6048 }
6049 return sep;
6050 }
6051
6052 /* Returns the next entry in the opcode table that matches the one in
6053 IDESC, and frees the entry in IDESC. If no matching entry is
6054 found, NULL is returned instead. */
6055
6056 static struct ia64_opcode *
6057 get_next_opcode (struct ia64_opcode *idesc)
6058 {
6059 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6060 ia64_free_opcode (idesc);
6061 return next;
6062 }
6063
6064 /* Parse the operands for the opcode and find the opcode variant that
6065 matches the specified operands, or NULL if no match is possible. */
6066
6067 static struct ia64_opcode *
6068 parse_operands (struct ia64_opcode *idesc)
6069 {
6070 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6071 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6072 int reg1, reg2;
6073 char reg_class;
6074 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6075 enum operand_match_result result;
6076 char mnemonic[129];
6077 char *first_arg = 0, *end, *saved_input_pointer;
6078 unsigned int sof;
6079
6080 gas_assert (strlen (idesc->name) <= 128);
6081
6082 strcpy (mnemonic, idesc->name);
6083 if (idesc->operands[2] == IA64_OPND_SOF
6084 || idesc->operands[1] == IA64_OPND_SOF)
6085 {
6086 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6087 can't parse the first operand until we have parsed the
6088 remaining operands of the "alloc" instruction. */
6089 SKIP_WHITESPACE ();
6090 first_arg = input_line_pointer;
6091 end = strchr (input_line_pointer, '=');
6092 if (!end)
6093 {
6094 as_bad (_("Expected separator `='"));
6095 return 0;
6096 }
6097 input_line_pointer = end + 1;
6098 ++i;
6099 ++num_outputs;
6100 }
6101
6102 for (; ; ++i)
6103 {
6104 if (i < NELEMS (CURR_SLOT.opnd))
6105 {
6106 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=',
6107 idesc->operands[i]);
6108 if (CURR_SLOT.opnd[i].X_op == O_absent)
6109 break;
6110 }
6111 else
6112 {
6113 expressionS dummy;
6114
6115 sep = parse_operand (&dummy, '=');
6116 if (dummy.X_op == O_absent)
6117 break;
6118 }
6119
6120 ++num_operands;
6121
6122 if (sep != '=' && sep != ',')
6123 break;
6124
6125 if (sep == '=')
6126 {
6127 if (num_outputs > 0)
6128 as_bad (_("Duplicate equal sign (=) in instruction"));
6129 else
6130 num_outputs = i + 1;
6131 }
6132 }
6133 if (sep != '\0')
6134 {
6135 as_bad (_("Illegal operand separator `%c'"), sep);
6136 return 0;
6137 }
6138
6139 if (idesc->operands[2] == IA64_OPND_SOF
6140 || idesc->operands[1] == IA64_OPND_SOF)
6141 {
6142 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6143 Note, however, that due to that mapping operand numbers in error
6144 messages for any of the constant operands will not be correct. */
6145 know (strcmp (idesc->name, "alloc") == 0);
6146 /* The first operand hasn't been parsed/initialized, yet (but
6147 num_operands intentionally doesn't account for that). */
6148 i = num_operands > 4 ? 2 : 1;
6149 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6150 ? CURR_SLOT.opnd[n].X_add_number \
6151 : 0)
6152 sof = set_regstack (FORCE_CONST(i),
6153 FORCE_CONST(i + 1),
6154 FORCE_CONST(i + 2),
6155 FORCE_CONST(i + 3));
6156 #undef FORCE_CONST
6157
6158 /* now we can parse the first arg: */
6159 saved_input_pointer = input_line_pointer;
6160 input_line_pointer = first_arg;
6161 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6162 idesc->operands[0]);
6163 if (sep != '=')
6164 --num_outputs; /* force error */
6165 input_line_pointer = saved_input_pointer;
6166
6167 CURR_SLOT.opnd[i].X_add_number = sof;
6168 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6169 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6170 CURR_SLOT.opnd[i + 1].X_add_number
6171 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6172 else
6173 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6174 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6175 }
6176
6177 highest_unmatched_operand = -4;
6178 curr_out_of_range_pos = -1;
6179 error_pos = 0;
6180 for (; idesc; idesc = get_next_opcode (idesc))
6181 {
6182 if (num_outputs != idesc->num_outputs)
6183 continue; /* mismatch in # of outputs */
6184 if (highest_unmatched_operand < 0)
6185 highest_unmatched_operand |= 1;
6186 if (num_operands > NELEMS (idesc->operands)
6187 || (num_operands < NELEMS (idesc->operands)
6188 && idesc->operands[num_operands])
6189 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6190 continue; /* mismatch in number of arguments */
6191 if (highest_unmatched_operand < 0)
6192 highest_unmatched_operand |= 2;
6193
6194 CURR_SLOT.num_fixups = 0;
6195
6196 /* Try to match all operands. If we see an out-of-range operand,
6197 then continue trying to match the rest of the operands, since if
6198 the rest match, then this idesc will give the best error message. */
6199
6200 out_of_range_pos = -1;
6201 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6202 {
6203 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6204 if (result != OPERAND_MATCH)
6205 {
6206 if (result != OPERAND_OUT_OF_RANGE)
6207 break;
6208 if (out_of_range_pos < 0)
6209 /* remember position of the first out-of-range operand: */
6210 out_of_range_pos = i;
6211 }
6212 }
6213
6214 /* If we did not match all operands, or if at least one operand was
6215 out-of-range, then this idesc does not match. Keep track of which
6216 idesc matched the most operands before failing. If we have two
6217 idescs that failed at the same position, and one had an out-of-range
6218 operand, then prefer the out-of-range operand. Thus if we have
6219 "add r0=0x1000000,r1" we get an error saying the constant is out
6220 of range instead of an error saying that the constant should have been
6221 a register. */
6222
6223 if (i != num_operands || out_of_range_pos >= 0)
6224 {
6225 if (i > highest_unmatched_operand
6226 || (i == highest_unmatched_operand
6227 && out_of_range_pos > curr_out_of_range_pos))
6228 {
6229 highest_unmatched_operand = i;
6230 if (out_of_range_pos >= 0)
6231 {
6232 expected_operand = idesc->operands[out_of_range_pos];
6233 error_pos = out_of_range_pos;
6234 }
6235 else
6236 {
6237 expected_operand = idesc->operands[i];
6238 error_pos = i;
6239 }
6240 curr_out_of_range_pos = out_of_range_pos;
6241 }
6242 continue;
6243 }
6244
6245 break;
6246 }
6247 if (!idesc)
6248 {
6249 if (expected_operand)
6250 as_bad (_("Operand %u of `%s' should be %s"),
6251 error_pos + 1, mnemonic,
6252 elf64_ia64_operands[expected_operand].desc);
6253 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6254 as_bad (_("Wrong number of output operands"));
6255 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6256 as_bad (_("Wrong number of input operands"));
6257 else
6258 as_bad (_("Operand mismatch"));
6259 return 0;
6260 }
6261
6262 /* Check that the instruction doesn't use
6263 - r0, f0, or f1 as output operands
6264 - the same predicate twice as output operands
6265 - r0 as address of a base update load or store
6266 - the same GR as output and address of a base update load
6267 - two even- or two odd-numbered FRs as output operands of a floating
6268 point parallel load.
6269 At most two (conflicting) output (or output-like) operands can exist,
6270 (floating point parallel loads have three outputs, but the base register,
6271 if updated, cannot conflict with the actual outputs). */
6272 reg2 = reg1 = -1;
6273 for (i = 0; i < num_operands; ++i)
6274 {
6275 int regno = 0;
6276
6277 reg_class = 0;
6278 switch (idesc->operands[i])
6279 {
6280 case IA64_OPND_R1:
6281 case IA64_OPND_R2:
6282 case IA64_OPND_R3:
6283 if (i < num_outputs)
6284 {
6285 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6286 reg_class = 'r';
6287 else if (reg1 < 0)
6288 reg1 = CURR_SLOT.opnd[i].X_add_number;
6289 else if (reg2 < 0)
6290 reg2 = CURR_SLOT.opnd[i].X_add_number;
6291 }
6292 break;
6293 case IA64_OPND_P1:
6294 case IA64_OPND_P2:
6295 if (i < num_outputs)
6296 {
6297 if (reg1 < 0)
6298 reg1 = CURR_SLOT.opnd[i].X_add_number;
6299 else if (reg2 < 0)
6300 reg2 = CURR_SLOT.opnd[i].X_add_number;
6301 }
6302 break;
6303 case IA64_OPND_F1:
6304 case IA64_OPND_F2:
6305 case IA64_OPND_F3:
6306 case IA64_OPND_F4:
6307 if (i < num_outputs)
6308 {
6309 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6310 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6311 {
6312 reg_class = 'f';
6313 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6314 }
6315 else if (reg1 < 0)
6316 reg1 = CURR_SLOT.opnd[i].X_add_number;
6317 else if (reg2 < 0)
6318 reg2 = CURR_SLOT.opnd[i].X_add_number;
6319 }
6320 break;
6321 case IA64_OPND_MR3:
6322 if (idesc->flags & IA64_OPCODE_POSTINC)
6323 {
6324 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6325 reg_class = 'm';
6326 else if (reg1 < 0)
6327 reg1 = CURR_SLOT.opnd[i].X_add_number;
6328 else if (reg2 < 0)
6329 reg2 = CURR_SLOT.opnd[i].X_add_number;
6330 }
6331 break;
6332 default:
6333 break;
6334 }
6335 switch (reg_class)
6336 {
6337 case 0:
6338 break;
6339 default:
6340 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6341 break;
6342 case 'm':
6343 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6344 break;
6345 }
6346 }
6347 if (reg1 == reg2)
6348 {
6349 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6350 {
6351 reg1 -= REG_GR;
6352 reg_class = 'r';
6353 }
6354 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6355 {
6356 reg1 -= REG_P;
6357 reg_class = 'p';
6358 }
6359 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6360 {
6361 reg1 -= REG_FR;
6362 reg_class = 'f';
6363 }
6364 else
6365 reg_class = 0;
6366 if (reg_class)
6367 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6368 }
6369 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6370 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6371 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6372 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6373 && ! ((reg1 ^ reg2) & 1))
6374 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6375 reg1 - REG_FR, reg2 - REG_FR);
6376 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6377 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6378 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6379 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6380 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6381 reg1 - REG_FR, reg2 - REG_FR);
6382 return idesc;
6383 }
6384
6385 static void
6386 build_insn (struct slot *slot, bfd_vma *insnp)
6387 {
6388 const struct ia64_operand *odesc, *o2desc;
6389 struct ia64_opcode *idesc = slot->idesc;
6390 bfd_vma insn;
6391 bfd_signed_vma val;
6392 const char *err;
6393 int i;
6394
6395 insn = idesc->opcode | slot->qp_regno;
6396
6397 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6398 {
6399 if (slot->opnd[i].X_op == O_register
6400 || slot->opnd[i].X_op == O_constant
6401 || slot->opnd[i].X_op == O_index)
6402 val = slot->opnd[i].X_add_number;
6403 else if (slot->opnd[i].X_op == O_big)
6404 {
6405 /* This must be the value 0x10000000000000000. */
6406 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6407 val = 0;
6408 }
6409 else
6410 val = 0;
6411
6412 switch (idesc->operands[i])
6413 {
6414 case IA64_OPND_IMMU64:
6415 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6416 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6417 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6418 | (((val >> 63) & 0x1) << 36));
6419 continue;
6420
6421 case IA64_OPND_IMMU62:
6422 val &= 0x3fffffffffffffffULL;
6423 if (val != slot->opnd[i].X_add_number)
6424 as_warn (_("Value truncated to 62 bits"));
6425 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6426 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6427 continue;
6428
6429 case IA64_OPND_TGT64:
6430 val >>= 4;
6431 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6432 insn |= ((((val >> 59) & 0x1) << 36)
6433 | (((val >> 0) & 0xfffff) << 13));
6434 continue;
6435
6436 case IA64_OPND_AR3:
6437 val -= REG_AR;
6438 break;
6439
6440 case IA64_OPND_B1:
6441 case IA64_OPND_B2:
6442 val -= REG_BR;
6443 break;
6444
6445 case IA64_OPND_CR3:
6446 val -= REG_CR;
6447 break;
6448
6449 case IA64_OPND_DAHR3:
6450 val -= REG_DAHR;
6451 break;
6452
6453 case IA64_OPND_F1:
6454 case IA64_OPND_F2:
6455 case IA64_OPND_F3:
6456 case IA64_OPND_F4:
6457 val -= REG_FR;
6458 break;
6459
6460 case IA64_OPND_P1:
6461 case IA64_OPND_P2:
6462 val -= REG_P;
6463 break;
6464
6465 case IA64_OPND_R1:
6466 case IA64_OPND_R2:
6467 case IA64_OPND_R3:
6468 case IA64_OPND_R3_2:
6469 case IA64_OPND_CPUID_R3:
6470 case IA64_OPND_DBR_R3:
6471 case IA64_OPND_DTR_R3:
6472 case IA64_OPND_ITR_R3:
6473 case IA64_OPND_IBR_R3:
6474 case IA64_OPND_MR3:
6475 case IA64_OPND_MSR_R3:
6476 case IA64_OPND_PKR_R3:
6477 case IA64_OPND_PMC_R3:
6478 case IA64_OPND_PMD_R3:
6479 case IA64_OPND_DAHR_R3:
6480 case IA64_OPND_RR_R3:
6481 val -= REG_GR;
6482 break;
6483
6484 default:
6485 break;
6486 }
6487
6488 odesc = elf64_ia64_operands + idesc->operands[i];
6489 err = (*odesc->insert) (odesc, val, &insn);
6490 if (err)
6491 as_bad_where (slot->src_file, slot->src_line,
6492 _("Bad operand value: %s"), err);
6493 if (idesc->flags & IA64_OPCODE_PSEUDO)
6494 {
6495 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6496 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6497 {
6498 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6499 (*o2desc->insert) (o2desc, val, &insn);
6500 }
6501 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6502 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6503 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6504 {
6505 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6506 (*o2desc->insert) (o2desc, 64 - val, &insn);
6507 }
6508 }
6509 }
6510 *insnp = insn;
6511 }
6512
6513 static void
6514 emit_one_bundle (void)
6515 {
6516 int manual_bundling_off = 0, manual_bundling = 0;
6517 enum ia64_unit required_unit, insn_unit = 0;
6518 enum ia64_insn_type type[3], insn_type;
6519 unsigned int template_val, orig_template;
6520 bfd_vma insn[3] = { -1, -1, -1 };
6521 struct ia64_opcode *idesc;
6522 int end_of_insn_group = 0, user_template = -1;
6523 int n, i, j, first, curr, last_slot;
6524 bfd_vma t0 = 0, t1 = 0;
6525 struct label_fix *lfix;
6526 bfd_boolean mark_label;
6527 struct insn_fix *ifix;
6528 char mnemonic[16];
6529 fixS *fix;
6530 char *f;
6531 int addr_mod;
6532
6533 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6534 know (first >= 0 && first < NUM_SLOTS);
6535 n = MIN (3, md.num_slots_in_use);
6536
6537 /* Determine template: user user_template if specified, best match
6538 otherwise: */
6539
6540 if (md.slot[first].user_template >= 0)
6541 user_template = template_val = md.slot[first].user_template;
6542 else
6543 {
6544 /* Auto select appropriate template. */
6545 memset (type, 0, sizeof (type));
6546 curr = first;
6547 for (i = 0; i < n; ++i)
6548 {
6549 if (md.slot[curr].label_fixups && i != 0)
6550 break;
6551 type[i] = md.slot[curr].idesc->type;
6552 curr = (curr + 1) % NUM_SLOTS;
6553 }
6554 template_val = best_template[type[0]][type[1]][type[2]];
6555 }
6556
6557 /* initialize instructions with appropriate nops: */
6558 for (i = 0; i < 3; ++i)
6559 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6560
6561 f = frag_more (16);
6562
6563 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6564 from the start of the frag. */
6565 addr_mod = frag_now_fix () & 15;
6566 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6567 as_bad (_("instruction address is not a multiple of 16"));
6568 frag_now->insn_addr = addr_mod;
6569 frag_now->has_code = 1;
6570
6571 /* now fill in slots with as many insns as possible: */
6572 curr = first;
6573 idesc = md.slot[curr].idesc;
6574 end_of_insn_group = 0;
6575 last_slot = -1;
6576 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6577 {
6578 /* If we have unwind records, we may need to update some now. */
6579 unw_rec_list *ptr = md.slot[curr].unwind_record;
6580 unw_rec_list *end_ptr = NULL;
6581
6582 if (ptr)
6583 {
6584 /* Find the last prologue/body record in the list for the current
6585 insn, and set the slot number for all records up to that point.
6586 This needs to be done now, because prologue/body records refer to
6587 the current point, not the point after the instruction has been
6588 issued. This matters because there may have been nops emitted
6589 meanwhile. Any non-prologue non-body record followed by a
6590 prologue/body record must also refer to the current point. */
6591 unw_rec_list *last_ptr;
6592
6593 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6594 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6595 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6596 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6597 || ptr->r.type == body)
6598 last_ptr = ptr;
6599 if (last_ptr)
6600 {
6601 /* Make last_ptr point one after the last prologue/body
6602 record. */
6603 last_ptr = last_ptr->next;
6604 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6605 ptr = ptr->next)
6606 {
6607 ptr->slot_number = (unsigned long) f + i;
6608 ptr->slot_frag = frag_now;
6609 }
6610 /* Remove the initialized records, so that we won't accidentally
6611 update them again if we insert a nop and continue. */
6612 md.slot[curr].unwind_record = last_ptr;
6613 }
6614 }
6615
6616 manual_bundling_off = md.slot[curr].manual_bundling_off;
6617 if (md.slot[curr].manual_bundling_on)
6618 {
6619 if (curr == first)
6620 manual_bundling = 1;
6621 else
6622 break; /* Need to start a new bundle. */
6623 }
6624
6625 /* If this instruction specifies a template, then it must be the first
6626 instruction of a bundle. */
6627 if (curr != first && md.slot[curr].user_template >= 0)
6628 break;
6629
6630 if (idesc->flags & IA64_OPCODE_SLOT2)
6631 {
6632 if (manual_bundling && !manual_bundling_off)
6633 {
6634 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6635 _("`%s' must be last in bundle"), idesc->name);
6636 if (i < 2)
6637 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6638 }
6639 i = 2;
6640 }
6641 if (idesc->flags & IA64_OPCODE_LAST)
6642 {
6643 int required_slot;
6644 unsigned int required_template;
6645
6646 /* If we need a stop bit after an M slot, our only choice is
6647 template 5 (M;;MI). If we need a stop bit after a B
6648 slot, our only choice is to place it at the end of the
6649 bundle, because the only available templates are MIB,
6650 MBB, BBB, MMB, and MFB. We don't handle anything other
6651 than M and B slots because these are the only kind of
6652 instructions that can have the IA64_OPCODE_LAST bit set. */
6653 required_template = template_val;
6654 switch (idesc->type)
6655 {
6656 case IA64_TYPE_M:
6657 required_slot = 0;
6658 required_template = 5;
6659 break;
6660
6661 case IA64_TYPE_B:
6662 required_slot = 2;
6663 break;
6664
6665 default:
6666 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6667 _("Internal error: don't know how to force %s to end of instruction group"),
6668 idesc->name);
6669 required_slot = i;
6670 break;
6671 }
6672 if (manual_bundling
6673 && (i > required_slot
6674 || (required_slot == 2 && !manual_bundling_off)
6675 || (user_template >= 0
6676 /* Changing from MMI to M;MI is OK. */
6677 && (template_val ^ required_template) > 1)))
6678 {
6679 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6680 _("`%s' must be last in instruction group"),
6681 idesc->name);
6682 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6683 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6684 }
6685 if (required_slot < i)
6686 /* Can't fit this instruction. */
6687 break;
6688
6689 i = required_slot;
6690 if (required_template != template_val)
6691 {
6692 /* If we switch the template, we need to reset the NOPs
6693 after slot i. The slot-types of the instructions ahead
6694 of i never change, so we don't need to worry about
6695 changing NOPs in front of this slot. */
6696 for (j = i; j < 3; ++j)
6697 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6698
6699 /* We just picked a template that includes the stop bit in the
6700 middle, so we don't need another one emitted later. */
6701 md.slot[curr].end_of_insn_group = 0;
6702 }
6703 template_val = required_template;
6704 }
6705 if (curr != first && md.slot[curr].label_fixups)
6706 {
6707 if (manual_bundling)
6708 {
6709 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6710 _("Label must be first in a bundle"));
6711 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6712 }
6713 /* This insn must go into the first slot of a bundle. */
6714 break;
6715 }
6716
6717 if (end_of_insn_group && md.num_slots_in_use >= 1)
6718 {
6719 /* We need an instruction group boundary in the middle of a
6720 bundle. See if we can switch to an other template with
6721 an appropriate boundary. */
6722
6723 orig_template = template_val;
6724 if (i == 1 && (user_template == 4
6725 || (user_template < 0
6726 && (ia64_templ_desc[template_val].exec_unit[0]
6727 == IA64_UNIT_M))))
6728 {
6729 template_val = 5;
6730 end_of_insn_group = 0;
6731 }
6732 else if (i == 2 && (user_template == 0
6733 || (user_template < 0
6734 && (ia64_templ_desc[template_val].exec_unit[1]
6735 == IA64_UNIT_I)))
6736 /* This test makes sure we don't switch the template if
6737 the next instruction is one that needs to be first in
6738 an instruction group. Since all those instructions are
6739 in the M group, there is no way such an instruction can
6740 fit in this bundle even if we switch the template. The
6741 reason we have to check for this is that otherwise we
6742 may end up generating "MI;;I M.." which has the deadly
6743 effect that the second M instruction is no longer the
6744 first in the group! --davidm 99/12/16 */
6745 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6746 {
6747 template_val = 1;
6748 end_of_insn_group = 0;
6749 }
6750 else if (i == 1
6751 && user_template == 0
6752 && !(idesc->flags & IA64_OPCODE_FIRST))
6753 /* Use the next slot. */
6754 continue;
6755 else if (curr != first)
6756 /* can't fit this insn */
6757 break;
6758
6759 if (template_val != orig_template)
6760 /* if we switch the template, we need to reset the NOPs
6761 after slot i. The slot-types of the instructions ahead
6762 of i never change, so we don't need to worry about
6763 changing NOPs in front of this slot. */
6764 for (j = i; j < 3; ++j)
6765 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6766 }
6767 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6768
6769 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6770 if (idesc->type == IA64_TYPE_DYN)
6771 {
6772 enum ia64_opnd opnd1, opnd2;
6773
6774 if ((strcmp (idesc->name, "nop") == 0)
6775 || (strcmp (idesc->name, "break") == 0))
6776 insn_unit = required_unit;
6777 else if (strcmp (idesc->name, "hint") == 0)
6778 {
6779 insn_unit = required_unit;
6780 if (required_unit == IA64_UNIT_B)
6781 {
6782 switch (md.hint_b)
6783 {
6784 case hint_b_ok:
6785 break;
6786 case hint_b_warning:
6787 as_warn (_("hint in B unit may be treated as nop"));
6788 break;
6789 case hint_b_error:
6790 /* When manual bundling is off and there is no
6791 user template, we choose a different unit so
6792 that hint won't go into the current slot. We
6793 will fill the current bundle with nops and
6794 try to put hint into the next bundle. */
6795 if (!manual_bundling && user_template < 0)
6796 insn_unit = IA64_UNIT_I;
6797 else
6798 as_bad (_("hint in B unit can't be used"));
6799 break;
6800 }
6801 }
6802 }
6803 else if (strcmp (idesc->name, "chk.s") == 0
6804 || strcmp (idesc->name, "mov") == 0)
6805 {
6806 insn_unit = IA64_UNIT_M;
6807 if (required_unit == IA64_UNIT_I
6808 || (required_unit == IA64_UNIT_F && template_val == 6))
6809 insn_unit = IA64_UNIT_I;
6810 }
6811 else
6812 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6813
6814 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6815 idesc->name, "?imbfxx"[insn_unit]);
6816 opnd1 = idesc->operands[0];
6817 opnd2 = idesc->operands[1];
6818 ia64_free_opcode (idesc);
6819 idesc = ia64_find_opcode (mnemonic);
6820 /* moves to/from ARs have collisions */
6821 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6822 {
6823 while (idesc != NULL
6824 && (idesc->operands[0] != opnd1
6825 || idesc->operands[1] != opnd2))
6826 idesc = get_next_opcode (idesc);
6827 }
6828 md.slot[curr].idesc = idesc;
6829 }
6830 else
6831 {
6832 insn_type = idesc->type;
6833 insn_unit = IA64_UNIT_NIL;
6834 switch (insn_type)
6835 {
6836 case IA64_TYPE_A:
6837 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6838 insn_unit = required_unit;
6839 break;
6840 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6841 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6842 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6843 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6844 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6845 default: break;
6846 }
6847 }
6848
6849 if (insn_unit != required_unit)
6850 continue; /* Try next slot. */
6851
6852 /* Now is a good time to fix up the labels for this insn. */
6853 mark_label = FALSE;
6854 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6855 {
6856 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6857 symbol_set_frag (lfix->sym, frag_now);
6858 mark_label |= lfix->dw2_mark_labels;
6859 }
6860 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6861 {
6862 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6863 symbol_set_frag (lfix->sym, frag_now);
6864 }
6865
6866 if (debug_type == DEBUG_DWARF2
6867 || md.slot[curr].loc_directive_seen
6868 || mark_label)
6869 {
6870 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6871
6872 md.slot[curr].loc_directive_seen = 0;
6873 if (mark_label)
6874 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6875
6876 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6877 }
6878
6879 build_insn (md.slot + curr, insn + i);
6880
6881 ptr = md.slot[curr].unwind_record;
6882 if (ptr)
6883 {
6884 /* Set slot numbers for all remaining unwind records belonging to the
6885 current insn. There can not be any prologue/body unwind records
6886 here. */
6887 for (; ptr != end_ptr; ptr = ptr->next)
6888 {
6889 ptr->slot_number = (unsigned long) f + i;
6890 ptr->slot_frag = frag_now;
6891 }
6892 md.slot[curr].unwind_record = NULL;
6893 }
6894
6895 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6896 {
6897 ifix = md.slot[curr].fixup + j;
6898 fix = fix_new_exp (frag_now, frag_now_fix () - 16 + i, 8,
6899 &ifix->expr, ifix->is_pcrel, ifix->code);
6900 fix->tc_fix_data.opnd = ifix->opnd;
6901 fix->fx_file = md.slot[curr].src_file;
6902 fix->fx_line = md.slot[curr].src_line;
6903 }
6904
6905 end_of_insn_group = md.slot[curr].end_of_insn_group;
6906
6907 /* This adjustment to "i" must occur after the fix, otherwise the fix
6908 is assigned to the wrong slot, and the VMS linker complains. */
6909 if (required_unit == IA64_UNIT_L)
6910 {
6911 know (i == 1);
6912 /* skip one slot for long/X-unit instructions */
6913 ++i;
6914 }
6915 --md.num_slots_in_use;
6916 last_slot = i;
6917
6918 /* clear slot: */
6919 ia64_free_opcode (md.slot[curr].idesc);
6920 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6921 md.slot[curr].user_template = -1;
6922
6923 if (manual_bundling_off)
6924 {
6925 manual_bundling = 0;
6926 break;
6927 }
6928 curr = (curr + 1) % NUM_SLOTS;
6929 idesc = md.slot[curr].idesc;
6930 }
6931
6932 /* A user template was specified, but the first following instruction did
6933 not fit. This can happen with or without manual bundling. */
6934 if (md.num_slots_in_use > 0 && last_slot < 0)
6935 {
6936 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6937 _("`%s' does not fit into %s template"),
6938 idesc->name, ia64_templ_desc[template_val].name);
6939 /* Drop first insn so we don't livelock. */
6940 --md.num_slots_in_use;
6941 know (curr == first);
6942 ia64_free_opcode (md.slot[curr].idesc);
6943 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6944 md.slot[curr].user_template = -1;
6945 }
6946 else if (manual_bundling > 0)
6947 {
6948 if (md.num_slots_in_use > 0)
6949 {
6950 if (last_slot >= 2)
6951 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6952 _("`%s' does not fit into bundle"), idesc->name);
6953 else
6954 {
6955 const char *where;
6956
6957 if (template_val == 2)
6958 where = "X slot";
6959 else if (last_slot == 0)
6960 where = "slots 2 or 3";
6961 else
6962 where = "slot 3";
6963 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6964 _("`%s' can't go in %s of %s template"),
6965 idesc->name, where, ia64_templ_desc[template_val].name);
6966 }
6967 }
6968 else
6969 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6970 _("Missing '}' at end of file"));
6971 }
6972
6973 know (md.num_slots_in_use < NUM_SLOTS);
6974
6975 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6976 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6977
6978 number_to_chars_littleendian (f + 0, t0, 8);
6979 number_to_chars_littleendian (f + 8, t1, 8);
6980 }
6981
6982 int
6983 md_parse_option (int c, const char *arg)
6984 {
6985
6986 switch (c)
6987 {
6988 /* Switches from the Intel assembler. */
6989 case 'm':
6990 if (strcmp (arg, "ilp64") == 0
6991 || strcmp (arg, "lp64") == 0
6992 || strcmp (arg, "p64") == 0)
6993 {
6994 md.flags |= EF_IA_64_ABI64;
6995 }
6996 else if (strcmp (arg, "ilp32") == 0)
6997 {
6998 md.flags &= ~EF_IA_64_ABI64;
6999 }
7000 else if (strcmp (arg, "le") == 0)
7001 {
7002 md.flags &= ~EF_IA_64_BE;
7003 default_big_endian = 0;
7004 }
7005 else if (strcmp (arg, "be") == 0)
7006 {
7007 md.flags |= EF_IA_64_BE;
7008 default_big_endian = 1;
7009 }
7010 else if (strncmp (arg, "unwind-check=", 13) == 0)
7011 {
7012 arg += 13;
7013 if (strcmp (arg, "warning") == 0)
7014 md.unwind_check = unwind_check_warning;
7015 else if (strcmp (arg, "error") == 0)
7016 md.unwind_check = unwind_check_error;
7017 else
7018 return 0;
7019 }
7020 else if (strncmp (arg, "hint.b=", 7) == 0)
7021 {
7022 arg += 7;
7023 if (strcmp (arg, "ok") == 0)
7024 md.hint_b = hint_b_ok;
7025 else if (strcmp (arg, "warning") == 0)
7026 md.hint_b = hint_b_warning;
7027 else if (strcmp (arg, "error") == 0)
7028 md.hint_b = hint_b_error;
7029 else
7030 return 0;
7031 }
7032 else if (strncmp (arg, "tune=", 5) == 0)
7033 {
7034 arg += 5;
7035 if (strcmp (arg, "itanium1") == 0)
7036 md.tune = itanium1;
7037 else if (strcmp (arg, "itanium2") == 0)
7038 md.tune = itanium2;
7039 else
7040 return 0;
7041 }
7042 else
7043 return 0;
7044 break;
7045
7046 case 'N':
7047 if (strcmp (arg, "so") == 0)
7048 {
7049 /* Suppress signon message. */
7050 }
7051 else if (strcmp (arg, "pi") == 0)
7052 {
7053 /* Reject privileged instructions. FIXME */
7054 }
7055 else if (strcmp (arg, "us") == 0)
7056 {
7057 /* Allow union of signed and unsigned range. FIXME */
7058 }
7059 else if (strcmp (arg, "close_fcalls") == 0)
7060 {
7061 /* Do not resolve global function calls. */
7062 }
7063 else
7064 return 0;
7065 break;
7066
7067 case 'C':
7068 /* temp[="prefix"] Insert temporary labels into the object file
7069 symbol table prefixed by "prefix".
7070 Default prefix is ":temp:".
7071 */
7072 break;
7073
7074 case 'a':
7075 /* indirect=<tgt> Assume unannotated indirect branches behavior
7076 according to <tgt> --
7077 exit: branch out from the current context (default)
7078 labels: all labels in context may be branch targets
7079 */
7080 if (strncmp (arg, "indirect=", 9) != 0)
7081 return 0;
7082 break;
7083
7084 case 'x':
7085 /* -X conflicts with an ignored option, use -x instead */
7086 md.detect_dv = 1;
7087 if (!arg || strcmp (arg, "explicit") == 0)
7088 {
7089 /* set default mode to explicit */
7090 md.default_explicit_mode = 1;
7091 break;
7092 }
7093 else if (strcmp (arg, "auto") == 0)
7094 {
7095 md.default_explicit_mode = 0;
7096 }
7097 else if (strcmp (arg, "none") == 0)
7098 {
7099 md.detect_dv = 0;
7100 }
7101 else if (strcmp (arg, "debug") == 0)
7102 {
7103 md.debug_dv = 1;
7104 }
7105 else if (strcmp (arg, "debugx") == 0)
7106 {
7107 md.default_explicit_mode = 1;
7108 md.debug_dv = 1;
7109 }
7110 else if (strcmp (arg, "debugn") == 0)
7111 {
7112 md.debug_dv = 1;
7113 md.detect_dv = 0;
7114 }
7115 else
7116 {
7117 as_bad (_("Unrecognized option '-x%s'"), arg);
7118 }
7119 break;
7120
7121 case 'S':
7122 /* nops Print nops statistics. */
7123 break;
7124
7125 /* GNU specific switches for gcc. */
7126 case OPTION_MCONSTANT_GP:
7127 md.flags |= EF_IA_64_CONS_GP;
7128 break;
7129
7130 case OPTION_MAUTO_PIC:
7131 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7132 break;
7133
7134 default:
7135 return 0;
7136 }
7137
7138 return 1;
7139 }
7140
7141 void
7142 md_show_usage (FILE *stream)
7143 {
7144 fputs (_("\
7145 IA-64 options:\n\
7146 --mconstant-gp mark output file as using the constant-GP model\n\
7147 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7148 --mauto-pic mark output file as using the constant-GP model\n\
7149 without function descriptors (sets ELF header flag\n\
7150 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7151 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7152 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7153 -mtune=[itanium1|itanium2]\n\
7154 tune for a specific CPU (default -mtune=itanium2)\n\
7155 -munwind-check=[warning|error]\n\
7156 unwind directive check (default -munwind-check=warning)\n\
7157 -mhint.b=[ok|warning|error]\n\
7158 hint.b check (default -mhint.b=error)\n\
7159 -x | -xexplicit turn on dependency violation checking\n"), stream);
7160 /* Note for translators: "automagically" can be translated as "automatically" here. */
7161 fputs (_("\
7162 -xauto automagically remove dependency violations (default)\n\
7163 -xnone turn off dependency violation checking\n\
7164 -xdebug debug dependency violation checker\n\
7165 -xdebugn debug dependency violation checker but turn off\n\
7166 dependency violation checking\n\
7167 -xdebugx debug dependency violation checker and turn on\n\
7168 dependency violation checking\n"),
7169 stream);
7170 }
7171
7172 void
7173 ia64_after_parse_args (void)
7174 {
7175 if (debug_type == DEBUG_STABS)
7176 as_fatal (_("--gstabs is not supported for ia64"));
7177 }
7178
7179 /* Return true if TYPE fits in TEMPL at SLOT. */
7180
7181 static int
7182 match (int templ, int type, int slot)
7183 {
7184 enum ia64_unit unit;
7185 int result;
7186
7187 unit = ia64_templ_desc[templ].exec_unit[slot];
7188 switch (type)
7189 {
7190 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7191 case IA64_TYPE_A:
7192 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7193 break;
7194 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7195 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7196 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7197 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7198 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7199 default: result = 0; break;
7200 }
7201 return result;
7202 }
7203
7204 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7205 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7206 type M or I would fit in TEMPL at SLOT. */
7207
7208 static inline int
7209 extra_goodness (int templ, int slot)
7210 {
7211 switch (md.tune)
7212 {
7213 case itanium1:
7214 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7215 return 2;
7216 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7217 return 1;
7218 else
7219 return 0;
7220 break;
7221 case itanium2:
7222 if (match (templ, IA64_TYPE_M, slot)
7223 || match (templ, IA64_TYPE_I, slot))
7224 /* Favor M- and I-unit NOPs. We definitely want to avoid
7225 F-unit and B-unit may cause split-issue or less-than-optimal
7226 branch-prediction. */
7227 return 2;
7228 else
7229 return 0;
7230 break;
7231 default:
7232 abort ();
7233 return 0;
7234 }
7235 }
7236
7237 /* This function is called once, at assembler startup time. It sets
7238 up all the tables, etc. that the MD part of the assembler will need
7239 that can be determined before arguments are parsed. */
7240 void
7241 md_begin (void)
7242 {
7243 int i, j, k, t, goodness, best, ok;
7244 const char *err;
7245 char name[8];
7246
7247 md.auto_align = 1;
7248 md.explicit_mode = md.default_explicit_mode;
7249
7250 bfd_set_section_alignment (text_section, 4);
7251
7252 /* Make sure function pointers get initialized. */
7253 target_big_endian = -1;
7254 dot_byteorder (default_big_endian);
7255
7256 alias_hash = hash_new ();
7257 alias_name_hash = hash_new ();
7258 secalias_hash = hash_new ();
7259 secalias_name_hash = hash_new ();
7260
7261 pseudo_func[FUNC_DTP_MODULE].u.sym =
7262 symbol_new (".<dtpmod>", undefined_section, FUNC_DTP_MODULE,
7263 &zero_address_frag);
7264
7265 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7266 symbol_new (".<dtprel>", undefined_section, FUNC_DTP_RELATIVE,
7267 &zero_address_frag);
7268
7269 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7270 symbol_new (".<fptr>", undefined_section, FUNC_FPTR_RELATIVE,
7271 &zero_address_frag);
7272
7273 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7274 symbol_new (".<gprel>", undefined_section, FUNC_GP_RELATIVE,
7275 &zero_address_frag);
7276
7277 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7278 symbol_new (".<ltoff>", undefined_section, FUNC_LT_RELATIVE,
7279 &zero_address_frag);
7280
7281 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7282 symbol_new (".<ltoffx>", undefined_section, FUNC_LT_RELATIVE_X,
7283 &zero_address_frag);
7284
7285 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7286 symbol_new (".<pcrel>", undefined_section, FUNC_PC_RELATIVE,
7287 &zero_address_frag);
7288
7289 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7290 symbol_new (".<pltoff>", undefined_section, FUNC_PLT_RELATIVE,
7291 &zero_address_frag);
7292
7293 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7294 symbol_new (".<secrel>", undefined_section, FUNC_SEC_RELATIVE,
7295 &zero_address_frag);
7296
7297 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7298 symbol_new (".<segrel>", undefined_section, FUNC_SEG_RELATIVE,
7299 &zero_address_frag);
7300
7301 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7302 symbol_new (".<tprel>", undefined_section, FUNC_TP_RELATIVE,
7303 &zero_address_frag);
7304
7305 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7306 symbol_new (".<ltv>", undefined_section, FUNC_LTV_RELATIVE,
7307 &zero_address_frag);
7308
7309 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7310 symbol_new (".<ltoff.fptr>", undefined_section, FUNC_LT_FPTR_RELATIVE,
7311 &zero_address_frag);
7312
7313 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7314 symbol_new (".<ltoff.dtpmod>", undefined_section, FUNC_LT_DTP_MODULE,
7315 &zero_address_frag);
7316
7317 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7318 symbol_new (".<ltoff.dptrel>", undefined_section, FUNC_LT_DTP_RELATIVE,
7319 &zero_address_frag);
7320
7321 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7322 symbol_new (".<ltoff.tprel>", undefined_section, FUNC_LT_TP_RELATIVE,
7323 &zero_address_frag);
7324
7325 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7326 symbol_new (".<iplt>", undefined_section, FUNC_IPLT_RELOC,
7327 &zero_address_frag);
7328
7329 #ifdef TE_VMS
7330 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7331 symbol_new (".<slotcount>", undefined_section, FUNC_SLOTCOUNT_RELOC,
7332 &zero_address_frag);
7333 #endif
7334
7335 if (md.tune != itanium1)
7336 {
7337 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7338 le_nop[0] = 0x8;
7339 le_nop_stop[0] = 0x9;
7340 }
7341
7342 /* Compute the table of best templates. We compute goodness as a
7343 base 4 value, in which each match counts for 3. Match-failures
7344 result in NOPs and we use extra_goodness() to pick the execution
7345 units that are best suited for issuing the NOP. */
7346 for (i = 0; i < IA64_NUM_TYPES; ++i)
7347 for (j = 0; j < IA64_NUM_TYPES; ++j)
7348 for (k = 0; k < IA64_NUM_TYPES; ++k)
7349 {
7350 best = 0;
7351 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7352 {
7353 goodness = 0;
7354 if (match (t, i, 0))
7355 {
7356 if (match (t, j, 1))
7357 {
7358 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7359 goodness = 3 + 3 + 3;
7360 else
7361 goodness = 3 + 3 + extra_goodness (t, 2);
7362 }
7363 else if (match (t, j, 2))
7364 goodness = 3 + 3 + extra_goodness (t, 1);
7365 else
7366 {
7367 goodness = 3;
7368 goodness += extra_goodness (t, 1);
7369 goodness += extra_goodness (t, 2);
7370 }
7371 }
7372 else if (match (t, i, 1))
7373 {
7374 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7375 goodness = 3 + 3;
7376 else
7377 goodness = 3 + extra_goodness (t, 2);
7378 }
7379 else if (match (t, i, 2))
7380 goodness = 3 + extra_goodness (t, 1);
7381
7382 if (goodness > best)
7383 {
7384 best = goodness;
7385 best_template[i][j][k] = t;
7386 }
7387 }
7388 }
7389
7390 #ifdef DEBUG_TEMPLATES
7391 /* For debugging changes to the best_template calculations. We don't care
7392 about combinations with invalid instructions, so start the loops at 1. */
7393 for (i = 0; i < IA64_NUM_TYPES; ++i)
7394 for (j = 0; j < IA64_NUM_TYPES; ++j)
7395 for (k = 0; k < IA64_NUM_TYPES; ++k)
7396 {
7397 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7398 'x', 'd' };
7399 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7400 type_letter[k],
7401 ia64_templ_desc[best_template[i][j][k]].name);
7402 }
7403 #endif
7404
7405 for (i = 0; i < NUM_SLOTS; ++i)
7406 md.slot[i].user_template = -1;
7407
7408 md.pseudo_hash = hash_new ();
7409 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7410 {
7411 err = hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7412 (void *) (pseudo_opcode + i));
7413 if (err)
7414 as_fatal (_("ia64.md_begin: can't hash `%s': %s"),
7415 pseudo_opcode[i].name, err);
7416 }
7417
7418 md.reg_hash = hash_new ();
7419 md.dynreg_hash = hash_new ();
7420 md.const_hash = hash_new ();
7421 md.entry_hash = hash_new ();
7422
7423 /* general registers: */
7424 declare_register_set ("r", 128, REG_GR);
7425 declare_register ("gp", REG_GR + 1);
7426 declare_register ("sp", REG_GR + 12);
7427 declare_register ("tp", REG_GR + 13);
7428 declare_register_set ("ret", 4, REG_GR + 8);
7429
7430 /* floating point registers: */
7431 declare_register_set ("f", 128, REG_FR);
7432 declare_register_set ("farg", 8, REG_FR + 8);
7433 declare_register_set ("fret", 8, REG_FR + 8);
7434
7435 /* branch registers: */
7436 declare_register_set ("b", 8, REG_BR);
7437 declare_register ("rp", REG_BR + 0);
7438
7439 /* predicate registers: */
7440 declare_register_set ("p", 64, REG_P);
7441 declare_register ("pr", REG_PR);
7442 declare_register ("pr.rot", REG_PR_ROT);
7443
7444 /* application registers: */
7445 declare_register_set ("ar", 128, REG_AR);
7446 for (i = 0; i < NELEMS (ar); ++i)
7447 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7448
7449 /* control registers: */
7450 declare_register_set ("cr", 128, REG_CR);
7451 for (i = 0; i < NELEMS (cr); ++i)
7452 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7453
7454 /* dahr registers: */
7455 declare_register_set ("dahr", 8, REG_DAHR);
7456
7457 declare_register ("ip", REG_IP);
7458 declare_register ("cfm", REG_CFM);
7459 declare_register ("psr", REG_PSR);
7460 declare_register ("psr.l", REG_PSR_L);
7461 declare_register ("psr.um", REG_PSR_UM);
7462
7463 for (i = 0; i < NELEMS (indirect_reg); ++i)
7464 {
7465 unsigned int regnum = indirect_reg[i].regnum;
7466
7467 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7468 }
7469
7470 /* pseudo-registers used to specify unwind info: */
7471 declare_register ("psp", REG_PSP);
7472
7473 for (i = 0; i < NELEMS (const_bits); ++i)
7474 {
7475 err = hash_insert (md.const_hash, const_bits[i].name,
7476 (void *) (const_bits + i));
7477 if (err)
7478 as_fatal (_("Inserting \"%s\" into constant hash table failed: %s"),
7479 name, err);
7480 }
7481
7482 /* Set the architecture and machine depending on defaults and command line
7483 options. */
7484 if (md.flags & EF_IA_64_ABI64)
7485 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7486 else
7487 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7488
7489 if (! ok)
7490 as_warn (_("Could not set architecture and machine"));
7491
7492 /* Set the pointer size and pointer shift size depending on md.flags */
7493
7494 if (md.flags & EF_IA_64_ABI64)
7495 {
7496 md.pointer_size = 8; /* pointers are 8 bytes */
7497 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7498 }
7499 else
7500 {
7501 md.pointer_size = 4; /* pointers are 4 bytes */
7502 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7503 }
7504
7505 md.mem_offset.hint = 0;
7506 md.path = 0;
7507 md.maxpaths = 0;
7508 md.entry_labels = NULL;
7509 }
7510
7511 /* Set the default options in md. Cannot do this in md_begin because
7512 that is called after md_parse_option which is where we set the
7513 options in md based on command line options. */
7514
7515 void
7516 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7517 {
7518 md.flags = MD_FLAGS_DEFAULT;
7519 #ifndef TE_VMS
7520 /* Don't turn on dependency checking for VMS, doesn't work. */
7521 md.detect_dv = 1;
7522 #endif
7523 /* FIXME: We should change it to unwind_check_error someday. */
7524 md.unwind_check = unwind_check_warning;
7525 md.hint_b = hint_b_error;
7526 md.tune = itanium2;
7527 }
7528
7529 /* Return a string for the target object file format. */
7530
7531 const char *
7532 ia64_target_format (void)
7533 {
7534 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7535 {
7536 if (md.flags & EF_IA_64_BE)
7537 {
7538 if (md.flags & EF_IA_64_ABI64)
7539 #if defined(TE_AIX50)
7540 return "elf64-ia64-aix-big";
7541 #elif defined(TE_HPUX)
7542 return "elf64-ia64-hpux-big";
7543 #else
7544 return "elf64-ia64-big";
7545 #endif
7546 else
7547 #if defined(TE_AIX50)
7548 return "elf32-ia64-aix-big";
7549 #elif defined(TE_HPUX)
7550 return "elf32-ia64-hpux-big";
7551 #else
7552 return "elf32-ia64-big";
7553 #endif
7554 }
7555 else
7556 {
7557 if (md.flags & EF_IA_64_ABI64)
7558 #if defined (TE_AIX50)
7559 return "elf64-ia64-aix-little";
7560 #elif defined (TE_VMS)
7561 {
7562 md.flags |= EF_IA_64_ARCHVER_1;
7563 return "elf64-ia64-vms";
7564 }
7565 #else
7566 return "elf64-ia64-little";
7567 #endif
7568 else
7569 #ifdef TE_AIX50
7570 return "elf32-ia64-aix-little";
7571 #else
7572 return "elf32-ia64-little";
7573 #endif
7574 }
7575 }
7576 else
7577 return "unknown-format";
7578 }
7579
7580 void
7581 ia64_end_of_source (void)
7582 {
7583 /* terminate insn group upon reaching end of file: */
7584 insn_group_break (1, 0, 0);
7585
7586 /* emits slots we haven't written yet: */
7587 ia64_flush_insns ();
7588
7589 bfd_set_private_flags (stdoutput, md.flags);
7590
7591 md.mem_offset.hint = 0;
7592 }
7593
7594 void
7595 ia64_start_line (void)
7596 {
7597 static int first;
7598
7599 if (!first) {
7600 /* Make sure we don't reference input_line_pointer[-1] when that's
7601 not valid. */
7602 first = 1;
7603 return;
7604 }
7605
7606 if (md.qp.X_op == O_register)
7607 as_bad (_("qualifying predicate not followed by instruction"));
7608 md.qp.X_op = O_absent;
7609
7610 if (ignore_input ())
7611 return;
7612
7613 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7614 {
7615 if (md.detect_dv && !md.explicit_mode)
7616 {
7617 static int warned;
7618
7619 if (!warned)
7620 {
7621 warned = 1;
7622 as_warn (_("Explicit stops are ignored in auto mode"));
7623 }
7624 }
7625 else
7626 insn_group_break (1, 0, 0);
7627 }
7628 else if (input_line_pointer[-1] == '{')
7629 {
7630 if (md.manual_bundling)
7631 as_warn (_("Found '{' when manual bundling is already turned on"));
7632 else
7633 CURR_SLOT.manual_bundling_on = 1;
7634 md.manual_bundling = 1;
7635
7636 /* Bundling is only acceptable in explicit mode
7637 or when in default automatic mode. */
7638 if (md.detect_dv && !md.explicit_mode)
7639 {
7640 if (!md.mode_explicitly_set
7641 && !md.default_explicit_mode)
7642 dot_dv_mode ('E');
7643 else
7644 as_warn (_("Found '{' after explicit switch to automatic mode"));
7645 }
7646 }
7647 else if (input_line_pointer[-1] == '}')
7648 {
7649 if (!md.manual_bundling)
7650 as_warn (_("Found '}' when manual bundling is off"));
7651 else
7652 PREV_SLOT.manual_bundling_off = 1;
7653 md.manual_bundling = 0;
7654
7655 /* switch back to automatic mode, if applicable */
7656 if (md.detect_dv
7657 && md.explicit_mode
7658 && !md.mode_explicitly_set
7659 && !md.default_explicit_mode)
7660 dot_dv_mode ('A');
7661 }
7662 }
7663
7664 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7665 labels. */
7666 static int defining_tag = 0;
7667
7668 int
7669 ia64_unrecognized_line (int ch)
7670 {
7671 switch (ch)
7672 {
7673 case '(':
7674 expression_and_evaluate (&md.qp);
7675 if (*input_line_pointer++ != ')')
7676 {
7677 as_bad (_("Expected ')'"));
7678 return 0;
7679 }
7680 if (md.qp.X_op != O_register)
7681 {
7682 as_bad (_("Qualifying predicate expected"));
7683 return 0;
7684 }
7685 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7686 {
7687 as_bad (_("Predicate register expected"));
7688 return 0;
7689 }
7690 return 1;
7691
7692 case '[':
7693 {
7694 char *s;
7695 char c;
7696 symbolS *tag;
7697 int temp;
7698
7699 if (md.qp.X_op == O_register)
7700 {
7701 as_bad (_("Tag must come before qualifying predicate."));
7702 return 0;
7703 }
7704
7705 /* This implements just enough of read_a_source_file in read.c to
7706 recognize labels. */
7707 if (is_name_beginner (*input_line_pointer))
7708 {
7709 c = get_symbol_name (&s);
7710 }
7711 else if (LOCAL_LABELS_FB
7712 && ISDIGIT (*input_line_pointer))
7713 {
7714 temp = 0;
7715 while (ISDIGIT (*input_line_pointer))
7716 temp = (temp * 10) + *input_line_pointer++ - '0';
7717 fb_label_instance_inc (temp);
7718 s = fb_label_name (temp, 0);
7719 c = *input_line_pointer;
7720 }
7721 else
7722 {
7723 s = NULL;
7724 c = '\0';
7725 }
7726 if (c != ':')
7727 {
7728 /* Put ':' back for error messages' sake. */
7729 *input_line_pointer++ = ':';
7730 as_bad (_("Expected ':'"));
7731 return 0;
7732 }
7733
7734 defining_tag = 1;
7735 tag = colon (s);
7736 defining_tag = 0;
7737 /* Put ':' back for error messages' sake. */
7738 *input_line_pointer++ = ':';
7739 if (*input_line_pointer++ != ']')
7740 {
7741 as_bad (_("Expected ']'"));
7742 return 0;
7743 }
7744 if (! tag)
7745 {
7746 as_bad (_("Tag name expected"));
7747 return 0;
7748 }
7749 return 1;
7750 }
7751
7752 default:
7753 break;
7754 }
7755
7756 /* Not a valid line. */
7757 return 0;
7758 }
7759
7760 void
7761 ia64_frob_label (struct symbol *sym)
7762 {
7763 struct label_fix *fix;
7764
7765 /* Tags need special handling since they are not bundle breaks like
7766 labels. */
7767 if (defining_tag)
7768 {
7769 fix = XOBNEW (&notes, struct label_fix);
7770 fix->sym = sym;
7771 fix->next = CURR_SLOT.tag_fixups;
7772 fix->dw2_mark_labels = FALSE;
7773 CURR_SLOT.tag_fixups = fix;
7774
7775 return;
7776 }
7777
7778 if (bfd_section_flags (now_seg) & SEC_CODE)
7779 {
7780 md.last_text_seg = now_seg;
7781 fix = XOBNEW (&notes, struct label_fix);
7782 fix->sym = sym;
7783 fix->next = CURR_SLOT.label_fixups;
7784 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7785 CURR_SLOT.label_fixups = fix;
7786
7787 /* Keep track of how many code entry points we've seen. */
7788 if (md.path == md.maxpaths)
7789 {
7790 md.maxpaths += 20;
7791 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7792 md.maxpaths);
7793 }
7794 md.entry_labels[md.path++] = S_GET_NAME (sym);
7795 }
7796 }
7797
7798 #ifdef TE_HPUX
7799 /* The HP-UX linker will give unresolved symbol errors for symbols
7800 that are declared but unused. This routine removes declared,
7801 unused symbols from an object. */
7802 int
7803 ia64_frob_symbol (struct symbol *sym)
7804 {
7805 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7806 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7807 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7808 && ! S_IS_EXTERNAL (sym)))
7809 return 1;
7810 return 0;
7811 }
7812 #endif
7813
7814 void
7815 ia64_flush_pending_output (void)
7816 {
7817 if (!md.keep_pending_output
7818 && bfd_section_flags (now_seg) & SEC_CODE)
7819 {
7820 /* ??? This causes many unnecessary stop bits to be emitted.
7821 Unfortunately, it isn't clear if it is safe to remove this. */
7822 insn_group_break (1, 0, 0);
7823 ia64_flush_insns ();
7824 }
7825 }
7826
7827 /* Do ia64-specific expression optimization. All that's done here is
7828 to transform index expressions that are either due to the indexing
7829 of rotating registers or due to the indexing of indirect register
7830 sets. */
7831 int
7832 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7833 {
7834 if (op != O_index)
7835 return 0;
7836 resolve_expression (l);
7837 if (l->X_op == O_register)
7838 {
7839 unsigned num_regs = l->X_add_number >> 16;
7840
7841 resolve_expression (r);
7842 if (num_regs)
7843 {
7844 /* Left side is a .rotX-allocated register. */
7845 if (r->X_op != O_constant)
7846 {
7847 as_bad (_("Rotating register index must be a non-negative constant"));
7848 r->X_add_number = 0;
7849 }
7850 else if ((valueT) r->X_add_number >= num_regs)
7851 {
7852 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7853 r->X_add_number = 0;
7854 }
7855 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7856 return 1;
7857 }
7858 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7859 {
7860 if (r->X_op != O_register
7861 || r->X_add_number < REG_GR
7862 || r->X_add_number > REG_GR + 127)
7863 {
7864 as_bad (_("Indirect register index must be a general register"));
7865 r->X_add_number = REG_GR;
7866 }
7867 l->X_op = O_index;
7868 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7869 l->X_add_number = r->X_add_number;
7870 return 1;
7871 }
7872 }
7873 as_bad (_("Index can only be applied to rotating or indirect registers"));
7874 /* Fall back to some register use of which has as little as possible
7875 side effects, to minimize subsequent error messages. */
7876 l->X_op = O_register;
7877 l->X_add_number = REG_GR + 3;
7878 return 1;
7879 }
7880
7881 int
7882 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7883 {
7884 struct const_desc *cdesc;
7885 struct dynreg *dr = 0;
7886 unsigned int idx;
7887 struct symbol *sym;
7888 char *end;
7889
7890 if (*name == '@')
7891 {
7892 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7893
7894 /* Find what relocation pseudo-function we're dealing with. */
7895 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7896 if (pseudo_func[idx].name
7897 && pseudo_func[idx].name[0] == name[1]
7898 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7899 {
7900 pseudo_type = pseudo_func[idx].type;
7901 break;
7902 }
7903 switch (pseudo_type)
7904 {
7905 case PSEUDO_FUNC_RELOC:
7906 end = input_line_pointer;
7907 if (*nextcharP != '(')
7908 {
7909 as_bad (_("Expected '('"));
7910 break;
7911 }
7912 /* Skip '('. */
7913 ++input_line_pointer;
7914 expression (e);
7915 if (*input_line_pointer != ')')
7916 {
7917 as_bad (_("Missing ')'"));
7918 goto done;
7919 }
7920 /* Skip ')'. */
7921 ++input_line_pointer;
7922 #ifdef TE_VMS
7923 if (idx == FUNC_SLOTCOUNT_RELOC)
7924 {
7925 /* @slotcount can accept any expression. Canonicalize. */
7926 e->X_add_symbol = make_expr_symbol (e);
7927 e->X_op = O_symbol;
7928 e->X_add_number = 0;
7929 }
7930 #endif
7931 if (e->X_op != O_symbol)
7932 {
7933 if (e->X_op != O_pseudo_fixup)
7934 {
7935 as_bad (_("Not a symbolic expression"));
7936 goto done;
7937 }
7938 if (idx != FUNC_LT_RELATIVE)
7939 {
7940 as_bad (_("Illegal combination of relocation functions"));
7941 goto done;
7942 }
7943 switch (S_GET_VALUE (e->X_op_symbol))
7944 {
7945 case FUNC_FPTR_RELATIVE:
7946 idx = FUNC_LT_FPTR_RELATIVE; break;
7947 case FUNC_DTP_MODULE:
7948 idx = FUNC_LT_DTP_MODULE; break;
7949 case FUNC_DTP_RELATIVE:
7950 idx = FUNC_LT_DTP_RELATIVE; break;
7951 case FUNC_TP_RELATIVE:
7952 idx = FUNC_LT_TP_RELATIVE; break;
7953 default:
7954 as_bad (_("Illegal combination of relocation functions"));
7955 goto done;
7956 }
7957 }
7958 /* Make sure gas doesn't get rid of local symbols that are used
7959 in relocs. */
7960 e->X_op = O_pseudo_fixup;
7961 e->X_op_symbol = pseudo_func[idx].u.sym;
7962 done:
7963 *nextcharP = *input_line_pointer;
7964 break;
7965
7966 case PSEUDO_FUNC_CONST:
7967 e->X_op = O_constant;
7968 e->X_add_number = pseudo_func[idx].u.ival;
7969 break;
7970
7971 case PSEUDO_FUNC_REG:
7972 e->X_op = O_register;
7973 e->X_add_number = pseudo_func[idx].u.ival;
7974 break;
7975
7976 default:
7977 return 0;
7978 }
7979 return 1;
7980 }
7981
7982 /* first see if NAME is a known register name: */
7983 sym = hash_find (md.reg_hash, name);
7984 if (sym)
7985 {
7986 e->X_op = O_register;
7987 e->X_add_number = S_GET_VALUE (sym);
7988 return 1;
7989 }
7990
7991 cdesc = hash_find (md.const_hash, name);
7992 if (cdesc)
7993 {
7994 e->X_op = O_constant;
7995 e->X_add_number = cdesc->value;
7996 return 1;
7997 }
7998
7999 /* check for inN, locN, or outN: */
8000 idx = 0;
8001 switch (name[0])
8002 {
8003 case 'i':
8004 if (name[1] == 'n' && ISDIGIT (name[2]))
8005 {
8006 dr = &md.in;
8007 idx = 2;
8008 }
8009 break;
8010
8011 case 'l':
8012 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8013 {
8014 dr = &md.loc;
8015 idx = 3;
8016 }
8017 break;
8018
8019 case 'o':
8020 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8021 {
8022 dr = &md.out;
8023 idx = 3;
8024 }
8025 break;
8026
8027 default:
8028 break;
8029 }
8030
8031 /* Ignore register numbers with leading zeroes, except zero itself. */
8032 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8033 {
8034 unsigned long regnum;
8035
8036 /* The name is inN, locN, or outN; parse the register number. */
8037 regnum = strtoul (name + idx, &end, 10);
8038 if (end > name + idx && *end == '\0' && regnum < 96)
8039 {
8040 if (regnum >= dr->num_regs)
8041 {
8042 if (!dr->num_regs)
8043 as_bad (_("No current frame"));
8044 else
8045 as_bad (_("Register number out of range 0..%u"),
8046 dr->num_regs - 1);
8047 regnum = 0;
8048 }
8049 e->X_op = O_register;
8050 e->X_add_number = dr->base + regnum;
8051 return 1;
8052 }
8053 }
8054
8055 end = xstrdup (name);
8056 name = ia64_canonicalize_symbol_name (end);
8057 if ((dr = hash_find (md.dynreg_hash, name)))
8058 {
8059 /* We've got ourselves the name of a rotating register set.
8060 Store the base register number in the low 16 bits of
8061 X_add_number and the size of the register set in the top 16
8062 bits. */
8063 e->X_op = O_register;
8064 e->X_add_number = dr->base | (dr->num_regs << 16);
8065 free (end);
8066 return 1;
8067 }
8068 free (end);
8069 return 0;
8070 }
8071
8072 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8073
8074 char *
8075 ia64_canonicalize_symbol_name (char *name)
8076 {
8077 size_t len = strlen (name), full = len;
8078
8079 while (len > 0 && name[len - 1] == '#')
8080 --len;
8081 if (len <= 0)
8082 {
8083 if (full > 0)
8084 as_bad (_("Standalone `#' is illegal"));
8085 }
8086 else if (len < full - 1)
8087 as_warn (_("Redundant `#' suffix operators"));
8088 name[len] = '\0';
8089 return name;
8090 }
8091
8092 /* Return true if idesc is a conditional branch instruction. This excludes
8093 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8094 because they always read/write resources regardless of the value of the
8095 qualifying predicate. br.ia must always use p0, and hence is always
8096 taken. Thus this function returns true for branches which can fall
8097 through, and which use no resources if they do fall through. */
8098
8099 static int
8100 is_conditional_branch (struct ia64_opcode *idesc)
8101 {
8102 /* br is a conditional branch. Everything that starts with br. except
8103 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8104 Everything that starts with brl is a conditional branch. */
8105 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8106 && (idesc->name[2] == '\0'
8107 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8108 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8109 || idesc->name[2] == 'l'
8110 /* br.cond, br.call, br.clr */
8111 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8112 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8113 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8114 }
8115
8116 /* Return whether the given opcode is a taken branch. If there's any doubt,
8117 returns zero. */
8118
8119 static int
8120 is_taken_branch (struct ia64_opcode *idesc)
8121 {
8122 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8123 || strncmp (idesc->name, "br.ia", 5) == 0);
8124 }
8125
8126 /* Return whether the given opcode is an interruption or rfi. If there's any
8127 doubt, returns zero. */
8128
8129 static int
8130 is_interruption_or_rfi (struct ia64_opcode *idesc)
8131 {
8132 if (strcmp (idesc->name, "rfi") == 0)
8133 return 1;
8134 return 0;
8135 }
8136
8137 /* Returns the index of the given dependency in the opcode's list of chks, or
8138 -1 if there is no dependency. */
8139
8140 static int
8141 depends_on (int depind, struct ia64_opcode *idesc)
8142 {
8143 int i;
8144 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8145 for (i = 0; i < dep->nchks; i++)
8146 {
8147 if (depind == DEP (dep->chks[i]))
8148 return i;
8149 }
8150 return -1;
8151 }
8152
8153 /* Determine a set of specific resources used for a particular resource
8154 class. Returns the number of specific resources identified For those
8155 cases which are not determinable statically, the resource returned is
8156 marked nonspecific.
8157
8158 Meanings of value in 'NOTE':
8159 1) only read/write when the register number is explicitly encoded in the
8160 insn.
8161 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8162 accesses CFM when qualifying predicate is in the rotating region.
8163 3) general register value is used to specify an indirect register; not
8164 determinable statically.
8165 4) only read the given resource when bits 7:0 of the indirect index
8166 register value does not match the register number of the resource; not
8167 determinable statically.
8168 5) all rules are implementation specific.
8169 6) only when both the index specified by the reader and the index specified
8170 by the writer have the same value in bits 63:61; not determinable
8171 statically.
8172 7) only access the specified resource when the corresponding mask bit is
8173 set
8174 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8175 only read when these insns reference FR2-31
8176 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8177 written when these insns write FR32-127
8178 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8179 instruction
8180 11) The target predicates are written independently of PR[qp], but source
8181 registers are only read if PR[qp] is true. Since the state of PR[qp]
8182 cannot statically be determined, all source registers are marked used.
8183 12) This insn only reads the specified predicate register when that
8184 register is the PR[qp].
8185 13) This reference to ld-c only applies to the GR whose value is loaded
8186 with data returned from memory, not the post-incremented address register.
8187 14) The RSE resource includes the implementation-specific RSE internal
8188 state resources. At least one (and possibly more) of these resources are
8189 read by each instruction listed in IC:rse-readers. At least one (and
8190 possibly more) of these resources are written by each insn listed in
8191 IC:rse-writers.
8192 15+16) Represents reserved instructions, which the assembler does not
8193 generate.
8194 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8195 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8196
8197 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8198 this code; there are no dependency violations based on memory access.
8199 */
8200
8201 #define MAX_SPECS 256
8202 #define DV_CHK 1
8203 #define DV_REG 0
8204
8205 static int
8206 specify_resource (const struct ia64_dependency *dep,
8207 struct ia64_opcode *idesc,
8208 /* is this a DV chk or a DV reg? */
8209 int type,
8210 /* returned specific resources */
8211 struct rsrc specs[MAX_SPECS],
8212 /* resource note for this insn's usage */
8213 int note,
8214 /* which execution path to examine */
8215 int path)
8216 {
8217 int count = 0;
8218 int i;
8219 int rsrc_write = 0;
8220 struct rsrc tmpl;
8221
8222 if (dep->mode == IA64_DV_WAW
8223 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8224 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8225 rsrc_write = 1;
8226
8227 /* template for any resources we identify */
8228 tmpl.dependency = dep;
8229 tmpl.note = note;
8230 tmpl.insn_srlz = tmpl.data_srlz = 0;
8231 tmpl.qp_regno = CURR_SLOT.qp_regno;
8232 tmpl.link_to_qp_branch = 1;
8233 tmpl.mem_offset.hint = 0;
8234 tmpl.mem_offset.offset = 0;
8235 tmpl.mem_offset.base = 0;
8236 tmpl.specific = 1;
8237 tmpl.index = -1;
8238 tmpl.cmp_type = CMP_NONE;
8239 tmpl.depind = 0;
8240 tmpl.file = NULL;
8241 tmpl.line = 0;
8242 tmpl.path = 0;
8243
8244 #define UNHANDLED \
8245 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8246 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8247 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8248
8249 /* we don't need to track these */
8250 if (dep->semantics == IA64_DVS_NONE)
8251 return 0;
8252
8253 switch (dep->specifier)
8254 {
8255 case IA64_RS_AR_K:
8256 if (note == 1)
8257 {
8258 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8259 {
8260 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8261 if (regno >= 0 && regno <= 7)
8262 {
8263 specs[count] = tmpl;
8264 specs[count++].index = regno;
8265 }
8266 }
8267 }
8268 else if (note == 0)
8269 {
8270 for (i = 0; i < 8; i++)
8271 {
8272 specs[count] = tmpl;
8273 specs[count++].index = i;
8274 }
8275 }
8276 else
8277 {
8278 UNHANDLED;
8279 }
8280 break;
8281
8282 case IA64_RS_AR_UNAT:
8283 /* This is a mov =AR or mov AR= instruction. */
8284 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8285 {
8286 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8287 if (regno == AR_UNAT)
8288 {
8289 specs[count++] = tmpl;
8290 }
8291 }
8292 else
8293 {
8294 /* This is a spill/fill, or other instruction that modifies the
8295 unat register. */
8296
8297 /* Unless we can determine the specific bits used, mark the whole
8298 thing; bits 8:3 of the memory address indicate the bit used in
8299 UNAT. The .mem.offset hint may be used to eliminate a small
8300 subset of conflicts. */
8301 specs[count] = tmpl;
8302 if (md.mem_offset.hint)
8303 {
8304 if (md.debug_dv)
8305 fprintf (stderr, " Using hint for spill/fill\n");
8306 /* The index isn't actually used, just set it to something
8307 approximating the bit index. */
8308 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8309 specs[count].mem_offset.hint = 1;
8310 specs[count].mem_offset.offset = md.mem_offset.offset;
8311 specs[count++].mem_offset.base = md.mem_offset.base;
8312 }
8313 else
8314 {
8315 specs[count++].specific = 0;
8316 }
8317 }
8318 break;
8319
8320 case IA64_RS_AR:
8321 if (note == 1)
8322 {
8323 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8324 {
8325 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8326 if ((regno >= 8 && regno <= 15)
8327 || (regno >= 20 && regno <= 23)
8328 || (regno >= 31 && regno <= 39)
8329 || (regno >= 41 && regno <= 47)
8330 || (regno >= 67 && regno <= 111))
8331 {
8332 specs[count] = tmpl;
8333 specs[count++].index = regno;
8334 }
8335 }
8336 }
8337 else
8338 {
8339 UNHANDLED;
8340 }
8341 break;
8342
8343 case IA64_RS_ARb:
8344 if (note == 1)
8345 {
8346 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8347 {
8348 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8349 if ((regno >= 48 && regno <= 63)
8350 || (regno >= 112 && regno <= 127))
8351 {
8352 specs[count] = tmpl;
8353 specs[count++].index = regno;
8354 }
8355 }
8356 }
8357 else if (note == 0)
8358 {
8359 for (i = 48; i < 64; i++)
8360 {
8361 specs[count] = tmpl;
8362 specs[count++].index = i;
8363 }
8364 for (i = 112; i < 128; i++)
8365 {
8366 specs[count] = tmpl;
8367 specs[count++].index = i;
8368 }
8369 }
8370 else
8371 {
8372 UNHANDLED;
8373 }
8374 break;
8375
8376 case IA64_RS_BR:
8377 if (note != 1)
8378 {
8379 UNHANDLED;
8380 }
8381 else
8382 {
8383 if (rsrc_write)
8384 {
8385 for (i = 0; i < idesc->num_outputs; i++)
8386 if (idesc->operands[i] == IA64_OPND_B1
8387 || idesc->operands[i] == IA64_OPND_B2)
8388 {
8389 specs[count] = tmpl;
8390 specs[count++].index =
8391 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8392 }
8393 }
8394 else
8395 {
8396 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8397 if (idesc->operands[i] == IA64_OPND_B1
8398 || idesc->operands[i] == IA64_OPND_B2)
8399 {
8400 specs[count] = tmpl;
8401 specs[count++].index =
8402 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8403 }
8404 }
8405 }
8406 break;
8407
8408 case IA64_RS_CPUID: /* four or more registers */
8409 if (note == 3)
8410 {
8411 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8412 {
8413 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8414 if (regno >= 0 && regno < NELEMS (gr_values)
8415 && KNOWN (regno))
8416 {
8417 specs[count] = tmpl;
8418 specs[count++].index = gr_values[regno].value & 0xFF;
8419 }
8420 else
8421 {
8422 specs[count] = tmpl;
8423 specs[count++].specific = 0;
8424 }
8425 }
8426 }
8427 else
8428 {
8429 UNHANDLED;
8430 }
8431 break;
8432
8433 case IA64_RS_DBR: /* four or more registers */
8434 if (note == 3)
8435 {
8436 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8437 {
8438 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8439 if (regno >= 0 && regno < NELEMS (gr_values)
8440 && KNOWN (regno))
8441 {
8442 specs[count] = tmpl;
8443 specs[count++].index = gr_values[regno].value & 0xFF;
8444 }
8445 else
8446 {
8447 specs[count] = tmpl;
8448 specs[count++].specific = 0;
8449 }
8450 }
8451 }
8452 else if (note == 0 && !rsrc_write)
8453 {
8454 specs[count] = tmpl;
8455 specs[count++].specific = 0;
8456 }
8457 else
8458 {
8459 UNHANDLED;
8460 }
8461 break;
8462
8463 case IA64_RS_IBR: /* four or more registers */
8464 if (note == 3)
8465 {
8466 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8467 {
8468 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8469 if (regno >= 0 && regno < NELEMS (gr_values)
8470 && KNOWN (regno))
8471 {
8472 specs[count] = tmpl;
8473 specs[count++].index = gr_values[regno].value & 0xFF;
8474 }
8475 else
8476 {
8477 specs[count] = tmpl;
8478 specs[count++].specific = 0;
8479 }
8480 }
8481 }
8482 else
8483 {
8484 UNHANDLED;
8485 }
8486 break;
8487
8488 case IA64_RS_MSR:
8489 if (note == 5)
8490 {
8491 /* These are implementation specific. Force all references to
8492 conflict with all other references. */
8493 specs[count] = tmpl;
8494 specs[count++].specific = 0;
8495 }
8496 else
8497 {
8498 UNHANDLED;
8499 }
8500 break;
8501
8502 case IA64_RS_PKR: /* 16 or more registers */
8503 if (note == 3 || note == 4)
8504 {
8505 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8506 {
8507 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8508 if (regno >= 0 && regno < NELEMS (gr_values)
8509 && KNOWN (regno))
8510 {
8511 if (note == 3)
8512 {
8513 specs[count] = tmpl;
8514 specs[count++].index = gr_values[regno].value & 0xFF;
8515 }
8516 else
8517 for (i = 0; i < NELEMS (gr_values); i++)
8518 {
8519 /* Uses all registers *except* the one in R3. */
8520 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8521 {
8522 specs[count] = tmpl;
8523 specs[count++].index = i;
8524 }
8525 }
8526 }
8527 else
8528 {
8529 specs[count] = tmpl;
8530 specs[count++].specific = 0;
8531 }
8532 }
8533 }
8534 else if (note == 0)
8535 {
8536 /* probe et al. */
8537 specs[count] = tmpl;
8538 specs[count++].specific = 0;
8539 }
8540 break;
8541
8542 case IA64_RS_PMC: /* four or more registers */
8543 if (note == 3)
8544 {
8545 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8546 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8547
8548 {
8549 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8550 ? 1 : !rsrc_write);
8551 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8552 if (regno >= 0 && regno < NELEMS (gr_values)
8553 && KNOWN (regno))
8554 {
8555 specs[count] = tmpl;
8556 specs[count++].index = gr_values[regno].value & 0xFF;
8557 }
8558 else
8559 {
8560 specs[count] = tmpl;
8561 specs[count++].specific = 0;
8562 }
8563 }
8564 }
8565 else
8566 {
8567 UNHANDLED;
8568 }
8569 break;
8570
8571 case IA64_RS_PMD: /* four or more registers */
8572 if (note == 3)
8573 {
8574 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8575 {
8576 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8577 if (regno >= 0 && regno < NELEMS (gr_values)
8578 && KNOWN (regno))
8579 {
8580 specs[count] = tmpl;
8581 specs[count++].index = gr_values[regno].value & 0xFF;
8582 }
8583 else
8584 {
8585 specs[count] = tmpl;
8586 specs[count++].specific = 0;
8587 }
8588 }
8589 }
8590 else
8591 {
8592 UNHANDLED;
8593 }
8594 break;
8595
8596 case IA64_RS_RR: /* eight registers */
8597 if (note == 6)
8598 {
8599 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8600 {
8601 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8602 if (regno >= 0 && regno < NELEMS (gr_values)
8603 && KNOWN (regno))
8604 {
8605 specs[count] = tmpl;
8606 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8607 }
8608 else
8609 {
8610 specs[count] = tmpl;
8611 specs[count++].specific = 0;
8612 }
8613 }
8614 }
8615 else if (note == 0 && !rsrc_write)
8616 {
8617 specs[count] = tmpl;
8618 specs[count++].specific = 0;
8619 }
8620 else
8621 {
8622 UNHANDLED;
8623 }
8624 break;
8625
8626 case IA64_RS_CR_IRR:
8627 if (note == 0)
8628 {
8629 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8630 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8631 if (rsrc_write
8632 && idesc->operands[1] == IA64_OPND_CR3
8633 && regno == CR_IVR)
8634 {
8635 for (i = 0; i < 4; i++)
8636 {
8637 specs[count] = tmpl;
8638 specs[count++].index = CR_IRR0 + i;
8639 }
8640 }
8641 }
8642 else if (note == 1)
8643 {
8644 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8645 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8646 && regno >= CR_IRR0
8647 && regno <= CR_IRR3)
8648 {
8649 specs[count] = tmpl;
8650 specs[count++].index = regno;
8651 }
8652 }
8653 else
8654 {
8655 UNHANDLED;
8656 }
8657 break;
8658
8659 case IA64_RS_CR_IIB:
8660 if (note != 0)
8661 {
8662 UNHANDLED;
8663 }
8664 else
8665 {
8666 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8667 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8668 && (regno == CR_IIB0 || regno == CR_IIB1))
8669 {
8670 specs[count] = tmpl;
8671 specs[count++].index = regno;
8672 }
8673 }
8674 break;
8675
8676 case IA64_RS_CR_LRR:
8677 if (note != 1)
8678 {
8679 UNHANDLED;
8680 }
8681 else
8682 {
8683 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8684 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8685 && (regno == CR_LRR0 || regno == CR_LRR1))
8686 {
8687 specs[count] = tmpl;
8688 specs[count++].index = regno;
8689 }
8690 }
8691 break;
8692
8693 case IA64_RS_CR:
8694 if (note == 1)
8695 {
8696 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8697 {
8698 specs[count] = tmpl;
8699 specs[count++].index =
8700 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8701 }
8702 }
8703 else
8704 {
8705 UNHANDLED;
8706 }
8707 break;
8708
8709 case IA64_RS_DAHR:
8710 if (note == 0)
8711 {
8712 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8713 {
8714 specs[count] = tmpl;
8715 specs[count++].index =
8716 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8717 }
8718 }
8719 else
8720 {
8721 UNHANDLED;
8722 }
8723 break;
8724
8725 case IA64_RS_FR:
8726 case IA64_RS_FRb:
8727 if (note != 1)
8728 {
8729 UNHANDLED;
8730 }
8731 else if (rsrc_write)
8732 {
8733 if (dep->specifier == IA64_RS_FRb
8734 && idesc->operands[0] == IA64_OPND_F1)
8735 {
8736 specs[count] = tmpl;
8737 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8738 }
8739 }
8740 else
8741 {
8742 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8743 {
8744 if (idesc->operands[i] == IA64_OPND_F2
8745 || idesc->operands[i] == IA64_OPND_F3
8746 || idesc->operands[i] == IA64_OPND_F4)
8747 {
8748 specs[count] = tmpl;
8749 specs[count++].index =
8750 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8751 }
8752 }
8753 }
8754 break;
8755
8756 case IA64_RS_GR:
8757 if (note == 13)
8758 {
8759 /* This reference applies only to the GR whose value is loaded with
8760 data returned from memory. */
8761 specs[count] = tmpl;
8762 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8763 }
8764 else if (note == 1)
8765 {
8766 if (rsrc_write)
8767 {
8768 for (i = 0; i < idesc->num_outputs; i++)
8769 if (idesc->operands[i] == IA64_OPND_R1
8770 || idesc->operands[i] == IA64_OPND_R2
8771 || idesc->operands[i] == IA64_OPND_R3)
8772 {
8773 specs[count] = tmpl;
8774 specs[count++].index =
8775 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8776 }
8777 if (idesc->flags & IA64_OPCODE_POSTINC)
8778 for (i = 0; i < NELEMS (idesc->operands); i++)
8779 if (idesc->operands[i] == IA64_OPND_MR3)
8780 {
8781 specs[count] = tmpl;
8782 specs[count++].index =
8783 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8784 }
8785 }
8786 else
8787 {
8788 /* Look for anything that reads a GR. */
8789 for (i = 0; i < NELEMS (idesc->operands); i++)
8790 {
8791 if (idesc->operands[i] == IA64_OPND_MR3
8792 || idesc->operands[i] == IA64_OPND_CPUID_R3
8793 || idesc->operands[i] == IA64_OPND_DBR_R3
8794 || idesc->operands[i] == IA64_OPND_IBR_R3
8795 || idesc->operands[i] == IA64_OPND_MSR_R3
8796 || idesc->operands[i] == IA64_OPND_PKR_R3
8797 || idesc->operands[i] == IA64_OPND_PMC_R3
8798 || idesc->operands[i] == IA64_OPND_PMD_R3
8799 || idesc->operands[i] == IA64_OPND_DAHR_R3
8800 || idesc->operands[i] == IA64_OPND_RR_R3
8801 || ((i >= idesc->num_outputs)
8802 && (idesc->operands[i] == IA64_OPND_R1
8803 || idesc->operands[i] == IA64_OPND_R2
8804 || idesc->operands[i] == IA64_OPND_R3
8805 /* addl source register. */
8806 || idesc->operands[i] == IA64_OPND_R3_2)))
8807 {
8808 specs[count] = tmpl;
8809 specs[count++].index =
8810 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8811 }
8812 }
8813 }
8814 }
8815 else
8816 {
8817 UNHANDLED;
8818 }
8819 break;
8820
8821 /* This is the same as IA64_RS_PRr, except that the register range is
8822 from 1 - 15, and there are no rotating register reads/writes here. */
8823 case IA64_RS_PR:
8824 if (note == 0)
8825 {
8826 for (i = 1; i < 16; i++)
8827 {
8828 specs[count] = tmpl;
8829 specs[count++].index = i;
8830 }
8831 }
8832 else if (note == 7)
8833 {
8834 valueT mask = 0;
8835 /* Mark only those registers indicated by the mask. */
8836 if (rsrc_write)
8837 {
8838 mask = CURR_SLOT.opnd[2].X_add_number;
8839 for (i = 1; i < 16; i++)
8840 if (mask & ((valueT) 1 << i))
8841 {
8842 specs[count] = tmpl;
8843 specs[count++].index = i;
8844 }
8845 }
8846 else
8847 {
8848 UNHANDLED;
8849 }
8850 }
8851 else if (note == 11) /* note 11 implies note 1 as well */
8852 {
8853 if (rsrc_write)
8854 {
8855 for (i = 0; i < idesc->num_outputs; i++)
8856 {
8857 if (idesc->operands[i] == IA64_OPND_P1
8858 || idesc->operands[i] == IA64_OPND_P2)
8859 {
8860 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8861 if (regno >= 1 && regno < 16)
8862 {
8863 specs[count] = tmpl;
8864 specs[count++].index = regno;
8865 }
8866 }
8867 }
8868 }
8869 else
8870 {
8871 UNHANDLED;
8872 }
8873 }
8874 else if (note == 12)
8875 {
8876 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8877 {
8878 specs[count] = tmpl;
8879 specs[count++].index = CURR_SLOT.qp_regno;
8880 }
8881 }
8882 else if (note == 1)
8883 {
8884 if (rsrc_write)
8885 {
8886 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8887 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8888 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8889 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8890
8891 if ((idesc->operands[0] == IA64_OPND_P1
8892 || idesc->operands[0] == IA64_OPND_P2)
8893 && p1 >= 1 && p1 < 16)
8894 {
8895 specs[count] = tmpl;
8896 specs[count].cmp_type =
8897 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8898 specs[count++].index = p1;
8899 }
8900 if ((idesc->operands[1] == IA64_OPND_P1
8901 || idesc->operands[1] == IA64_OPND_P2)
8902 && p2 >= 1 && p2 < 16)
8903 {
8904 specs[count] = tmpl;
8905 specs[count].cmp_type =
8906 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8907 specs[count++].index = p2;
8908 }
8909 }
8910 else
8911 {
8912 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8913 {
8914 specs[count] = tmpl;
8915 specs[count++].index = CURR_SLOT.qp_regno;
8916 }
8917 if (idesc->operands[1] == IA64_OPND_PR)
8918 {
8919 for (i = 1; i < 16; i++)
8920 {
8921 specs[count] = tmpl;
8922 specs[count++].index = i;
8923 }
8924 }
8925 }
8926 }
8927 else
8928 {
8929 UNHANDLED;
8930 }
8931 break;
8932
8933 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8934 simplified cases of this. */
8935 case IA64_RS_PRr:
8936 if (note == 0)
8937 {
8938 for (i = 16; i < 63; i++)
8939 {
8940 specs[count] = tmpl;
8941 specs[count++].index = i;
8942 }
8943 }
8944 else if (note == 7)
8945 {
8946 valueT mask = 0;
8947 /* Mark only those registers indicated by the mask. */
8948 if (rsrc_write
8949 && idesc->operands[0] == IA64_OPND_PR)
8950 {
8951 mask = CURR_SLOT.opnd[2].X_add_number;
8952 if (mask & ((valueT) 1 << 16))
8953 for (i = 16; i < 63; i++)
8954 {
8955 specs[count] = tmpl;
8956 specs[count++].index = i;
8957 }
8958 }
8959 else if (rsrc_write
8960 && idesc->operands[0] == IA64_OPND_PR_ROT)
8961 {
8962 for (i = 16; i < 63; i++)
8963 {
8964 specs[count] = tmpl;
8965 specs[count++].index = i;
8966 }
8967 }
8968 else
8969 {
8970 UNHANDLED;
8971 }
8972 }
8973 else if (note == 11) /* note 11 implies note 1 as well */
8974 {
8975 if (rsrc_write)
8976 {
8977 for (i = 0; i < idesc->num_outputs; i++)
8978 {
8979 if (idesc->operands[i] == IA64_OPND_P1
8980 || idesc->operands[i] == IA64_OPND_P2)
8981 {
8982 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8983 if (regno >= 16 && regno < 63)
8984 {
8985 specs[count] = tmpl;
8986 specs[count++].index = regno;
8987 }
8988 }
8989 }
8990 }
8991 else
8992 {
8993 UNHANDLED;
8994 }
8995 }
8996 else if (note == 12)
8997 {
8998 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8999 {
9000 specs[count] = tmpl;
9001 specs[count++].index = CURR_SLOT.qp_regno;
9002 }
9003 }
9004 else if (note == 1)
9005 {
9006 if (rsrc_write)
9007 {
9008 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9009 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9010 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9011 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9012
9013 if ((idesc->operands[0] == IA64_OPND_P1
9014 || idesc->operands[0] == IA64_OPND_P2)
9015 && p1 >= 16 && p1 < 63)
9016 {
9017 specs[count] = tmpl;
9018 specs[count].cmp_type =
9019 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9020 specs[count++].index = p1;
9021 }
9022 if ((idesc->operands[1] == IA64_OPND_P1
9023 || idesc->operands[1] == IA64_OPND_P2)
9024 && p2 >= 16 && p2 < 63)
9025 {
9026 specs[count] = tmpl;
9027 specs[count].cmp_type =
9028 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9029 specs[count++].index = p2;
9030 }
9031 }
9032 else
9033 {
9034 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9035 {
9036 specs[count] = tmpl;
9037 specs[count++].index = CURR_SLOT.qp_regno;
9038 }
9039 if (idesc->operands[1] == IA64_OPND_PR)
9040 {
9041 for (i = 16; i < 63; i++)
9042 {
9043 specs[count] = tmpl;
9044 specs[count++].index = i;
9045 }
9046 }
9047 }
9048 }
9049 else
9050 {
9051 UNHANDLED;
9052 }
9053 break;
9054
9055 case IA64_RS_PSR:
9056 /* Verify that the instruction is using the PSR bit indicated in
9057 dep->regindex. */
9058 if (note == 0)
9059 {
9060 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9061 {
9062 if (dep->regindex < 6)
9063 {
9064 specs[count++] = tmpl;
9065 }
9066 }
9067 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9068 {
9069 if (dep->regindex < 32
9070 || dep->regindex == 35
9071 || dep->regindex == 36
9072 || (!rsrc_write && dep->regindex == PSR_CPL))
9073 {
9074 specs[count++] = tmpl;
9075 }
9076 }
9077 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9078 {
9079 if (dep->regindex < 32
9080 || dep->regindex == 35
9081 || dep->regindex == 36
9082 || (rsrc_write && dep->regindex == PSR_CPL))
9083 {
9084 specs[count++] = tmpl;
9085 }
9086 }
9087 else
9088 {
9089 /* Several PSR bits have very specific dependencies. */
9090 switch (dep->regindex)
9091 {
9092 default:
9093 specs[count++] = tmpl;
9094 break;
9095 case PSR_IC:
9096 if (rsrc_write)
9097 {
9098 specs[count++] = tmpl;
9099 }
9100 else
9101 {
9102 /* Only certain CR accesses use PSR.ic */
9103 if (idesc->operands[0] == IA64_OPND_CR3
9104 || idesc->operands[1] == IA64_OPND_CR3)
9105 {
9106 int reg_index =
9107 ((idesc->operands[0] == IA64_OPND_CR3)
9108 ? 0 : 1);
9109 int regno =
9110 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9111
9112 switch (regno)
9113 {
9114 default:
9115 break;
9116 case CR_ITIR:
9117 case CR_IFS:
9118 case CR_IIM:
9119 case CR_IIP:
9120 case CR_IPSR:
9121 case CR_ISR:
9122 case CR_IFA:
9123 case CR_IHA:
9124 case CR_IIB0:
9125 case CR_IIB1:
9126 case CR_IIPA:
9127 specs[count++] = tmpl;
9128 break;
9129 }
9130 }
9131 }
9132 break;
9133 case PSR_CPL:
9134 if (rsrc_write)
9135 {
9136 specs[count++] = tmpl;
9137 }
9138 else
9139 {
9140 /* Only some AR accesses use cpl */
9141 if (idesc->operands[0] == IA64_OPND_AR3
9142 || idesc->operands[1] == IA64_OPND_AR3)
9143 {
9144 int reg_index =
9145 ((idesc->operands[0] == IA64_OPND_AR3)
9146 ? 0 : 1);
9147 int regno =
9148 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9149
9150 if (regno == AR_ITC
9151 || regno == AR_RUC
9152 || (reg_index == 0
9153 && (regno == AR_RSC
9154 || (regno >= AR_K0
9155 && regno <= AR_K7))))
9156 {
9157 specs[count++] = tmpl;
9158 }
9159 }
9160 else
9161 {
9162 specs[count++] = tmpl;
9163 }
9164 break;
9165 }
9166 }
9167 }
9168 }
9169 else if (note == 7)
9170 {
9171 valueT mask = 0;
9172 if (idesc->operands[0] == IA64_OPND_IMMU24)
9173 {
9174 mask = CURR_SLOT.opnd[0].X_add_number;
9175 }
9176 else
9177 {
9178 UNHANDLED;
9179 }
9180 if (mask & ((valueT) 1 << dep->regindex))
9181 {
9182 specs[count++] = tmpl;
9183 }
9184 }
9185 else if (note == 8)
9186 {
9187 int min = dep->regindex == PSR_DFL ? 2 : 32;
9188 int max = dep->regindex == PSR_DFL ? 31 : 127;
9189 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9190 for (i = 0; i < NELEMS (idesc->operands); i++)
9191 {
9192 if (idesc->operands[i] == IA64_OPND_F1
9193 || idesc->operands[i] == IA64_OPND_F2
9194 || idesc->operands[i] == IA64_OPND_F3
9195 || idesc->operands[i] == IA64_OPND_F4)
9196 {
9197 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9198 if (reg >= min && reg <= max)
9199 {
9200 specs[count++] = tmpl;
9201 }
9202 }
9203 }
9204 }
9205 else if (note == 9)
9206 {
9207 int min = dep->regindex == PSR_MFL ? 2 : 32;
9208 int max = dep->regindex == PSR_MFL ? 31 : 127;
9209 /* mfh is read on writes to FR32-127; mfl is read on writes to
9210 FR2-31 */
9211 for (i = 0; i < idesc->num_outputs; i++)
9212 {
9213 if (idesc->operands[i] == IA64_OPND_F1)
9214 {
9215 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9216 if (reg >= min && reg <= max)
9217 {
9218 specs[count++] = tmpl;
9219 }
9220 }
9221 }
9222 }
9223 else if (note == 10)
9224 {
9225 for (i = 0; i < NELEMS (idesc->operands); i++)
9226 {
9227 if (idesc->operands[i] == IA64_OPND_R1
9228 || idesc->operands[i] == IA64_OPND_R2
9229 || idesc->operands[i] == IA64_OPND_R3)
9230 {
9231 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9232 if (regno >= 16 && regno <= 31)
9233 {
9234 specs[count++] = tmpl;
9235 }
9236 }
9237 }
9238 }
9239 else
9240 {
9241 UNHANDLED;
9242 }
9243 break;
9244
9245 case IA64_RS_AR_FPSR:
9246 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9247 {
9248 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9249 if (regno == AR_FPSR)
9250 {
9251 specs[count++] = tmpl;
9252 }
9253 }
9254 else
9255 {
9256 specs[count++] = tmpl;
9257 }
9258 break;
9259
9260 case IA64_RS_ARX:
9261 /* Handle all AR[REG] resources */
9262 if (note == 0 || note == 1)
9263 {
9264 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9265 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9266 && regno == dep->regindex)
9267 {
9268 specs[count++] = tmpl;
9269 }
9270 /* other AR[REG] resources may be affected by AR accesses */
9271 else if (idesc->operands[0] == IA64_OPND_AR3)
9272 {
9273 /* AR[] writes */
9274 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9275 switch (dep->regindex)
9276 {
9277 default:
9278 break;
9279 case AR_BSP:
9280 case AR_RNAT:
9281 if (regno == AR_BSPSTORE)
9282 {
9283 specs[count++] = tmpl;
9284 }
9285 /* Fall through. */
9286 case AR_RSC:
9287 if (!rsrc_write &&
9288 (regno == AR_BSPSTORE
9289 || regno == AR_RNAT))
9290 {
9291 specs[count++] = tmpl;
9292 }
9293 break;
9294 }
9295 }
9296 else if (idesc->operands[1] == IA64_OPND_AR3)
9297 {
9298 /* AR[] reads */
9299 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9300 switch (dep->regindex)
9301 {
9302 default:
9303 break;
9304 case AR_RSC:
9305 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9306 {
9307 specs[count++] = tmpl;
9308 }
9309 break;
9310 }
9311 }
9312 else
9313 {
9314 specs[count++] = tmpl;
9315 }
9316 }
9317 else
9318 {
9319 UNHANDLED;
9320 }
9321 break;
9322
9323 case IA64_RS_CRX:
9324 /* Handle all CR[REG] resources.
9325 ??? FIXME: The rule 17 isn't really handled correctly. */
9326 if (note == 0 || note == 1 || note == 17)
9327 {
9328 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9329 {
9330 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9331 if (regno == dep->regindex)
9332 {
9333 specs[count++] = tmpl;
9334 }
9335 else if (!rsrc_write)
9336 {
9337 /* Reads from CR[IVR] affect other resources. */
9338 if (regno == CR_IVR)
9339 {
9340 if ((dep->regindex >= CR_IRR0
9341 && dep->regindex <= CR_IRR3)
9342 || dep->regindex == CR_TPR)
9343 {
9344 specs[count++] = tmpl;
9345 }
9346 }
9347 }
9348 }
9349 else
9350 {
9351 specs[count++] = tmpl;
9352 }
9353 }
9354 else
9355 {
9356 UNHANDLED;
9357 }
9358 break;
9359
9360 case IA64_RS_INSERVICE:
9361 /* look for write of EOI (67) or read of IVR (65) */
9362 if ((idesc->operands[0] == IA64_OPND_CR3
9363 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9364 || (idesc->operands[1] == IA64_OPND_CR3
9365 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9366 {
9367 specs[count++] = tmpl;
9368 }
9369 break;
9370
9371 case IA64_RS_GR0:
9372 if (note == 1)
9373 {
9374 specs[count++] = tmpl;
9375 }
9376 else
9377 {
9378 UNHANDLED;
9379 }
9380 break;
9381
9382 case IA64_RS_CFM:
9383 if (note != 2)
9384 {
9385 specs[count++] = tmpl;
9386 }
9387 else
9388 {
9389 /* Check if any of the registers accessed are in the rotating region.
9390 mov to/from pr accesses CFM only when qp_regno is in the rotating
9391 region */
9392 for (i = 0; i < NELEMS (idesc->operands); i++)
9393 {
9394 if (idesc->operands[i] == IA64_OPND_R1
9395 || idesc->operands[i] == IA64_OPND_R2
9396 || idesc->operands[i] == IA64_OPND_R3)
9397 {
9398 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9399 /* Assumes that md.rot.num_regs is always valid */
9400 if (md.rot.num_regs > 0
9401 && num > 31
9402 && num < 31 + md.rot.num_regs)
9403 {
9404 specs[count] = tmpl;
9405 specs[count++].specific = 0;
9406 }
9407 }
9408 else if (idesc->operands[i] == IA64_OPND_F1
9409 || idesc->operands[i] == IA64_OPND_F2
9410 || idesc->operands[i] == IA64_OPND_F3
9411 || idesc->operands[i] == IA64_OPND_F4)
9412 {
9413 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9414 if (num > 31)
9415 {
9416 specs[count] = tmpl;
9417 specs[count++].specific = 0;
9418 }
9419 }
9420 else if (idesc->operands[i] == IA64_OPND_P1
9421 || idesc->operands[i] == IA64_OPND_P2)
9422 {
9423 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9424 if (num > 15)
9425 {
9426 specs[count] = tmpl;
9427 specs[count++].specific = 0;
9428 }
9429 }
9430 }
9431 if (CURR_SLOT.qp_regno > 15)
9432 {
9433 specs[count] = tmpl;
9434 specs[count++].specific = 0;
9435 }
9436 }
9437 break;
9438
9439 /* This is the same as IA64_RS_PRr, except simplified to account for
9440 the fact that there is only one register. */
9441 case IA64_RS_PR63:
9442 if (note == 0)
9443 {
9444 specs[count++] = tmpl;
9445 }
9446 else if (note == 7)
9447 {
9448 valueT mask = 0;
9449 if (idesc->operands[2] == IA64_OPND_IMM17)
9450 mask = CURR_SLOT.opnd[2].X_add_number;
9451 if (mask & ((valueT) 1 << 63))
9452 specs[count++] = tmpl;
9453 }
9454 else if (note == 11)
9455 {
9456 if ((idesc->operands[0] == IA64_OPND_P1
9457 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9458 || (idesc->operands[1] == IA64_OPND_P2
9459 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9460 {
9461 specs[count++] = tmpl;
9462 }
9463 }
9464 else if (note == 12)
9465 {
9466 if (CURR_SLOT.qp_regno == 63)
9467 {
9468 specs[count++] = tmpl;
9469 }
9470 }
9471 else if (note == 1)
9472 {
9473 if (rsrc_write)
9474 {
9475 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9476 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9477 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9478 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9479
9480 if (p1 == 63
9481 && (idesc->operands[0] == IA64_OPND_P1
9482 || idesc->operands[0] == IA64_OPND_P2))
9483 {
9484 specs[count] = tmpl;
9485 specs[count++].cmp_type =
9486 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9487 }
9488 if (p2 == 63
9489 && (idesc->operands[1] == IA64_OPND_P1
9490 || idesc->operands[1] == IA64_OPND_P2))
9491 {
9492 specs[count] = tmpl;
9493 specs[count++].cmp_type =
9494 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9495 }
9496 }
9497 else
9498 {
9499 if (CURR_SLOT.qp_regno == 63)
9500 {
9501 specs[count++] = tmpl;
9502 }
9503 }
9504 }
9505 else
9506 {
9507 UNHANDLED;
9508 }
9509 break;
9510
9511 case IA64_RS_RSE:
9512 /* FIXME we can identify some individual RSE written resources, but RSE
9513 read resources have not yet been completely identified, so for now
9514 treat RSE as a single resource */
9515 if (strncmp (idesc->name, "mov", 3) == 0)
9516 {
9517 if (rsrc_write)
9518 {
9519 if (idesc->operands[0] == IA64_OPND_AR3
9520 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9521 {
9522 specs[count++] = tmpl;
9523 }
9524 }
9525 else
9526 {
9527 if (idesc->operands[0] == IA64_OPND_AR3)
9528 {
9529 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9530 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9531 {
9532 specs[count++] = tmpl;
9533 }
9534 }
9535 else if (idesc->operands[1] == IA64_OPND_AR3)
9536 {
9537 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9538 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9539 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9540 {
9541 specs[count++] = tmpl;
9542 }
9543 }
9544 }
9545 }
9546 else
9547 {
9548 specs[count++] = tmpl;
9549 }
9550 break;
9551
9552 case IA64_RS_ANY:
9553 /* FIXME -- do any of these need to be non-specific? */
9554 specs[count++] = tmpl;
9555 break;
9556
9557 default:
9558 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9559 break;
9560 }
9561
9562 return count;
9563 }
9564
9565 /* Clear branch flags on marked resources. This breaks the link between the
9566 QP of the marking instruction and a subsequent branch on the same QP. */
9567
9568 static void
9569 clear_qp_branch_flag (valueT mask)
9570 {
9571 int i;
9572 for (i = 0; i < regdepslen; i++)
9573 {
9574 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9575 if ((bit & mask) != 0)
9576 {
9577 regdeps[i].link_to_qp_branch = 0;
9578 }
9579 }
9580 }
9581
9582 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9583 any mutexes which contain one of the PRs and create new ones when
9584 needed. */
9585
9586 static int
9587 update_qp_mutex (valueT mask)
9588 {
9589 int i;
9590 int add = 0;
9591
9592 i = 0;
9593 while (i < qp_mutexeslen)
9594 {
9595 if ((qp_mutexes[i].prmask & mask) != 0)
9596 {
9597 /* If it destroys and creates the same mutex, do nothing. */
9598 if (qp_mutexes[i].prmask == mask
9599 && qp_mutexes[i].path == md.path)
9600 {
9601 i++;
9602 add = -1;
9603 }
9604 else
9605 {
9606 int keep = 0;
9607
9608 if (md.debug_dv)
9609 {
9610 fprintf (stderr, " Clearing mutex relation");
9611 print_prmask (qp_mutexes[i].prmask);
9612 fprintf (stderr, "\n");
9613 }
9614
9615 /* Deal with the old mutex with more than 3+ PRs only if
9616 the new mutex on the same execution path with it.
9617
9618 FIXME: The 3+ mutex support is incomplete.
9619 dot_pred_rel () may be a better place to fix it. */
9620 if (qp_mutexes[i].path == md.path)
9621 {
9622 /* If it is a proper subset of the mutex, create a
9623 new mutex. */
9624 if (add == 0
9625 && (qp_mutexes[i].prmask & mask) == mask)
9626 add = 1;
9627
9628 qp_mutexes[i].prmask &= ~mask;
9629 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9630 {
9631 /* Modify the mutex if there are more than one
9632 PR left. */
9633 keep = 1;
9634 i++;
9635 }
9636 }
9637
9638 if (keep == 0)
9639 /* Remove the mutex. */
9640 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9641 }
9642 }
9643 else
9644 ++i;
9645 }
9646
9647 if (add == 1)
9648 add_qp_mutex (mask);
9649
9650 return add;
9651 }
9652
9653 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9654
9655 Any changes to a PR clears the mutex relations which include that PR. */
9656
9657 static void
9658 clear_qp_mutex (valueT mask)
9659 {
9660 int i;
9661
9662 i = 0;
9663 while (i < qp_mutexeslen)
9664 {
9665 if ((qp_mutexes[i].prmask & mask) != 0)
9666 {
9667 if (md.debug_dv)
9668 {
9669 fprintf (stderr, " Clearing mutex relation");
9670 print_prmask (qp_mutexes[i].prmask);
9671 fprintf (stderr, "\n");
9672 }
9673 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9674 }
9675 else
9676 ++i;
9677 }
9678 }
9679
9680 /* Clear implies relations which contain PRs in the given masks.
9681 P1_MASK indicates the source of the implies relation, while P2_MASK
9682 indicates the implied PR. */
9683
9684 static void
9685 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9686 {
9687 int i;
9688
9689 i = 0;
9690 while (i < qp_implieslen)
9691 {
9692 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9693 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9694 {
9695 if (md.debug_dv)
9696 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9697 qp_implies[i].p1, qp_implies[i].p2);
9698 qp_implies[i] = qp_implies[--qp_implieslen];
9699 }
9700 else
9701 ++i;
9702 }
9703 }
9704
9705 /* Add the PRs specified to the list of implied relations. */
9706
9707 static void
9708 add_qp_imply (int p1, int p2)
9709 {
9710 valueT mask;
9711 valueT bit;
9712 int i;
9713
9714 /* p0 is not meaningful here. */
9715 if (p1 == 0 || p2 == 0)
9716 abort ();
9717
9718 if (p1 == p2)
9719 return;
9720
9721 /* If it exists already, ignore it. */
9722 for (i = 0; i < qp_implieslen; i++)
9723 {
9724 if (qp_implies[i].p1 == p1
9725 && qp_implies[i].p2 == p2
9726 && qp_implies[i].path == md.path
9727 && !qp_implies[i].p2_branched)
9728 return;
9729 }
9730
9731 if (qp_implieslen == qp_impliestotlen)
9732 {
9733 qp_impliestotlen += 20;
9734 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9735 }
9736 if (md.debug_dv)
9737 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9738 qp_implies[qp_implieslen].p1 = p1;
9739 qp_implies[qp_implieslen].p2 = p2;
9740 qp_implies[qp_implieslen].path = md.path;
9741 qp_implies[qp_implieslen++].p2_branched = 0;
9742
9743 /* Add in the implied transitive relations; for everything that p2 implies,
9744 make p1 imply that, too; for everything that implies p1, make it imply p2
9745 as well. */
9746 for (i = 0; i < qp_implieslen; i++)
9747 {
9748 if (qp_implies[i].p1 == p2)
9749 add_qp_imply (p1, qp_implies[i].p2);
9750 if (qp_implies[i].p2 == p1)
9751 add_qp_imply (qp_implies[i].p1, p2);
9752 }
9753 /* Add in mutex relations implied by this implies relation; for each mutex
9754 relation containing p2, duplicate it and replace p2 with p1. */
9755 bit = (valueT) 1 << p1;
9756 mask = (valueT) 1 << p2;
9757 for (i = 0; i < qp_mutexeslen; i++)
9758 {
9759 if (qp_mutexes[i].prmask & mask)
9760 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9761 }
9762 }
9763
9764 /* Add the PRs specified in the mask to the mutex list; this means that only
9765 one of the PRs can be true at any time. PR0 should never be included in
9766 the mask. */
9767
9768 static void
9769 add_qp_mutex (valueT mask)
9770 {
9771 if (mask & 0x1)
9772 abort ();
9773
9774 if (qp_mutexeslen == qp_mutexestotlen)
9775 {
9776 qp_mutexestotlen += 20;
9777 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9778 }
9779 if (md.debug_dv)
9780 {
9781 fprintf (stderr, " Registering mutex on");
9782 print_prmask (mask);
9783 fprintf (stderr, "\n");
9784 }
9785 qp_mutexes[qp_mutexeslen].path = md.path;
9786 qp_mutexes[qp_mutexeslen++].prmask = mask;
9787 }
9788
9789 static int
9790 has_suffix_p (const char *name, const char *suffix)
9791 {
9792 size_t namelen = strlen (name);
9793 size_t sufflen = strlen (suffix);
9794
9795 if (namelen <= sufflen)
9796 return 0;
9797 return strcmp (name + namelen - sufflen, suffix) == 0;
9798 }
9799
9800 static void
9801 clear_register_values (void)
9802 {
9803 int i;
9804 if (md.debug_dv)
9805 fprintf (stderr, " Clearing register values\n");
9806 for (i = 1; i < NELEMS (gr_values); i++)
9807 gr_values[i].known = 0;
9808 }
9809
9810 /* Keep track of register values/changes which affect DV tracking.
9811
9812 optimization note: should add a flag to classes of insns where otherwise we
9813 have to examine a group of strings to identify them. */
9814
9815 static void
9816 note_register_values (struct ia64_opcode *idesc)
9817 {
9818 valueT qp_changemask = 0;
9819 int i;
9820
9821 /* Invalidate values for registers being written to. */
9822 for (i = 0; i < idesc->num_outputs; i++)
9823 {
9824 if (idesc->operands[i] == IA64_OPND_R1
9825 || idesc->operands[i] == IA64_OPND_R2
9826 || idesc->operands[i] == IA64_OPND_R3)
9827 {
9828 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9829 if (regno > 0 && regno < NELEMS (gr_values))
9830 gr_values[regno].known = 0;
9831 }
9832 else if (idesc->operands[i] == IA64_OPND_R3_2)
9833 {
9834 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9835 if (regno > 0 && regno < 4)
9836 gr_values[regno].known = 0;
9837 }
9838 else if (idesc->operands[i] == IA64_OPND_P1
9839 || idesc->operands[i] == IA64_OPND_P2)
9840 {
9841 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9842 qp_changemask |= (valueT) 1 << regno;
9843 }
9844 else if (idesc->operands[i] == IA64_OPND_PR)
9845 {
9846 if (idesc->operands[2] & (valueT) 0x10000)
9847 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9848 else
9849 qp_changemask = idesc->operands[2];
9850 break;
9851 }
9852 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9853 {
9854 if (idesc->operands[1] & ((valueT) 1 << 43))
9855 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9856 else
9857 qp_changemask = idesc->operands[1];
9858 qp_changemask &= ~(valueT) 0xFFFF;
9859 break;
9860 }
9861 }
9862
9863 /* Always clear qp branch flags on any PR change. */
9864 /* FIXME there may be exceptions for certain compares. */
9865 clear_qp_branch_flag (qp_changemask);
9866
9867 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9868 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9869 {
9870 qp_changemask |= ~(valueT) 0xFFFF;
9871 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9872 {
9873 for (i = 32; i < 32 + md.rot.num_regs; i++)
9874 gr_values[i].known = 0;
9875 }
9876 clear_qp_mutex (qp_changemask);
9877 clear_qp_implies (qp_changemask, qp_changemask);
9878 }
9879 /* After a call, all register values are undefined, except those marked
9880 as "safe". */
9881 else if (strncmp (idesc->name, "br.call", 6) == 0
9882 || strncmp (idesc->name, "brl.call", 7) == 0)
9883 {
9884 /* FIXME keep GR values which are marked as "safe_across_calls" */
9885 clear_register_values ();
9886 clear_qp_mutex (~qp_safe_across_calls);
9887 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9888 clear_qp_branch_flag (~qp_safe_across_calls);
9889 }
9890 else if (is_interruption_or_rfi (idesc)
9891 || is_taken_branch (idesc))
9892 {
9893 clear_register_values ();
9894 clear_qp_mutex (~(valueT) 0);
9895 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9896 }
9897 /* Look for mutex and implies relations. */
9898 else if ((idesc->operands[0] == IA64_OPND_P1
9899 || idesc->operands[0] == IA64_OPND_P2)
9900 && (idesc->operands[1] == IA64_OPND_P1
9901 || idesc->operands[1] == IA64_OPND_P2))
9902 {
9903 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9904 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9905 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9906 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9907
9908 /* If both PRs are PR0, we can't really do anything. */
9909 if (p1 == 0 && p2 == 0)
9910 {
9911 if (md.debug_dv)
9912 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9913 }
9914 /* In general, clear mutexes and implies which include P1 or P2,
9915 with the following exceptions. */
9916 else if (has_suffix_p (idesc->name, ".or.andcm")
9917 || has_suffix_p (idesc->name, ".and.orcm"))
9918 {
9919 clear_qp_implies (p2mask, p1mask);
9920 }
9921 else if (has_suffix_p (idesc->name, ".andcm")
9922 || has_suffix_p (idesc->name, ".and"))
9923 {
9924 clear_qp_implies (0, p1mask | p2mask);
9925 }
9926 else if (has_suffix_p (idesc->name, ".orcm")
9927 || has_suffix_p (idesc->name, ".or"))
9928 {
9929 clear_qp_mutex (p1mask | p2mask);
9930 clear_qp_implies (p1mask | p2mask, 0);
9931 }
9932 else
9933 {
9934 int added = 0;
9935
9936 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9937
9938 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9939 if (p1 == 0 || p2 == 0)
9940 clear_qp_mutex (p1mask | p2mask);
9941 else
9942 added = update_qp_mutex (p1mask | p2mask);
9943
9944 if (CURR_SLOT.qp_regno == 0
9945 || has_suffix_p (idesc->name, ".unc"))
9946 {
9947 if (added == 0 && p1 && p2)
9948 add_qp_mutex (p1mask | p2mask);
9949 if (CURR_SLOT.qp_regno != 0)
9950 {
9951 if (p1)
9952 add_qp_imply (p1, CURR_SLOT.qp_regno);
9953 if (p2)
9954 add_qp_imply (p2, CURR_SLOT.qp_regno);
9955 }
9956 }
9957 }
9958 }
9959 /* Look for mov imm insns into GRs. */
9960 else if (idesc->operands[0] == IA64_OPND_R1
9961 && (idesc->operands[1] == IA64_OPND_IMM22
9962 || idesc->operands[1] == IA64_OPND_IMMU64)
9963 && CURR_SLOT.opnd[1].X_op == O_constant
9964 && (strcmp (idesc->name, "mov") == 0
9965 || strcmp (idesc->name, "movl") == 0))
9966 {
9967 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9968 if (regno > 0 && regno < NELEMS (gr_values))
9969 {
9970 gr_values[regno].known = 1;
9971 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9972 gr_values[regno].path = md.path;
9973 if (md.debug_dv)
9974 {
9975 fprintf (stderr, " Know gr%d = ", regno);
9976 fprintf_vma (stderr, gr_values[regno].value);
9977 fputs ("\n", stderr);
9978 }
9979 }
9980 }
9981 /* Look for dep.z imm insns. */
9982 else if (idesc->operands[0] == IA64_OPND_R1
9983 && idesc->operands[1] == IA64_OPND_IMM8
9984 && strcmp (idesc->name, "dep.z") == 0)
9985 {
9986 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9987 if (regno > 0 && regno < NELEMS (gr_values))
9988 {
9989 valueT value = CURR_SLOT.opnd[1].X_add_number;
9990
9991 if (CURR_SLOT.opnd[3].X_add_number < 64)
9992 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9993 value <<= CURR_SLOT.opnd[2].X_add_number;
9994 gr_values[regno].known = 1;
9995 gr_values[regno].value = value;
9996 gr_values[regno].path = md.path;
9997 if (md.debug_dv)
9998 {
9999 fprintf (stderr, " Know gr%d = ", regno);
10000 fprintf_vma (stderr, gr_values[regno].value);
10001 fputs ("\n", stderr);
10002 }
10003 }
10004 }
10005 else
10006 {
10007 clear_qp_mutex (qp_changemask);
10008 clear_qp_implies (qp_changemask, qp_changemask);
10009 }
10010 }
10011
10012 /* Return whether the given predicate registers are currently mutex. */
10013
10014 static int
10015 qp_mutex (int p1, int p2, int path)
10016 {
10017 int i;
10018 valueT mask;
10019
10020 if (p1 != p2)
10021 {
10022 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10023 for (i = 0; i < qp_mutexeslen; i++)
10024 {
10025 if (qp_mutexes[i].path >= path
10026 && (qp_mutexes[i].prmask & mask) == mask)
10027 return 1;
10028 }
10029 }
10030 return 0;
10031 }
10032
10033 /* Return whether the given resource is in the given insn's list of chks
10034 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10035 conflict. */
10036
10037 static int
10038 resources_match (struct rsrc *rs,
10039 struct ia64_opcode *idesc,
10040 int note,
10041 int qp_regno,
10042 int path)
10043 {
10044 struct rsrc specs[MAX_SPECS];
10045 int count;
10046
10047 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10048 we don't need to check. One exception is note 11, which indicates that
10049 target predicates are written regardless of PR[qp]. */
10050 if (qp_mutex (rs->qp_regno, qp_regno, path)
10051 && note != 11)
10052 return 0;
10053
10054 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10055 while (count-- > 0)
10056 {
10057 /* UNAT checking is a bit more specific than other resources */
10058 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10059 && specs[count].mem_offset.hint
10060 && rs->mem_offset.hint)
10061 {
10062 if (rs->mem_offset.base == specs[count].mem_offset.base)
10063 {
10064 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10065 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10066 return 1;
10067 else
10068 continue;
10069 }
10070 }
10071
10072 /* Skip apparent PR write conflicts where both writes are an AND or both
10073 writes are an OR. */
10074 if (rs->dependency->specifier == IA64_RS_PR
10075 || rs->dependency->specifier == IA64_RS_PRr
10076 || rs->dependency->specifier == IA64_RS_PR63)
10077 {
10078 if (specs[count].cmp_type != CMP_NONE
10079 && specs[count].cmp_type == rs->cmp_type)
10080 {
10081 if (md.debug_dv)
10082 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10083 dv_mode[rs->dependency->mode],
10084 rs->dependency->specifier != IA64_RS_PR63 ?
10085 specs[count].index : 63);
10086 continue;
10087 }
10088 if (md.debug_dv)
10089 fprintf (stderr,
10090 " %s on parallel compare conflict %s vs %s on PR%d\n",
10091 dv_mode[rs->dependency->mode],
10092 dv_cmp_type[rs->cmp_type],
10093 dv_cmp_type[specs[count].cmp_type],
10094 rs->dependency->specifier != IA64_RS_PR63 ?
10095 specs[count].index : 63);
10096
10097 }
10098
10099 /* If either resource is not specific, conservatively assume a conflict
10100 */
10101 if (!specs[count].specific || !rs->specific)
10102 return 2;
10103 else if (specs[count].index == rs->index)
10104 return 1;
10105 }
10106
10107 return 0;
10108 }
10109
10110 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10111 insert a stop to create the break. Update all resource dependencies
10112 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10113 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10114 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10115 instruction. */
10116
10117 static void
10118 insn_group_break (int insert_stop, int qp_regno, int save_current)
10119 {
10120 int i;
10121
10122 if (insert_stop && md.num_slots_in_use > 0)
10123 PREV_SLOT.end_of_insn_group = 1;
10124
10125 if (md.debug_dv)
10126 {
10127 fprintf (stderr, " Insn group break%s",
10128 (insert_stop ? " (w/stop)" : ""));
10129 if (qp_regno != 0)
10130 fprintf (stderr, " effective for QP=%d", qp_regno);
10131 fprintf (stderr, "\n");
10132 }
10133
10134 i = 0;
10135 while (i < regdepslen)
10136 {
10137 const struct ia64_dependency *dep = regdeps[i].dependency;
10138
10139 if (qp_regno != 0
10140 && regdeps[i].qp_regno != qp_regno)
10141 {
10142 ++i;
10143 continue;
10144 }
10145
10146 if (save_current
10147 && CURR_SLOT.src_file == regdeps[i].file
10148 && CURR_SLOT.src_line == regdeps[i].line)
10149 {
10150 ++i;
10151 continue;
10152 }
10153
10154 /* clear dependencies which are automatically cleared by a stop, or
10155 those that have reached the appropriate state of insn serialization */
10156 if (dep->semantics == IA64_DVS_IMPLIED
10157 || dep->semantics == IA64_DVS_IMPLIEDF
10158 || regdeps[i].insn_srlz == STATE_SRLZ)
10159 {
10160 print_dependency ("Removing", i);
10161 regdeps[i] = regdeps[--regdepslen];
10162 }
10163 else
10164 {
10165 if (dep->semantics == IA64_DVS_DATA
10166 || dep->semantics == IA64_DVS_INSTR
10167 || dep->semantics == IA64_DVS_SPECIFIC)
10168 {
10169 if (regdeps[i].insn_srlz == STATE_NONE)
10170 regdeps[i].insn_srlz = STATE_STOP;
10171 if (regdeps[i].data_srlz == STATE_NONE)
10172 regdeps[i].data_srlz = STATE_STOP;
10173 }
10174 ++i;
10175 }
10176 }
10177 }
10178
10179 /* Add the given resource usage spec to the list of active dependencies. */
10180
10181 static void
10182 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10183 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10184 struct rsrc *spec,
10185 int depind,
10186 int path)
10187 {
10188 if (regdepslen == regdepstotlen)
10189 {
10190 regdepstotlen += 20;
10191 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10192 }
10193
10194 regdeps[regdepslen] = *spec;
10195 regdeps[regdepslen].depind = depind;
10196 regdeps[regdepslen].path = path;
10197 regdeps[regdepslen].file = CURR_SLOT.src_file;
10198 regdeps[regdepslen].line = CURR_SLOT.src_line;
10199
10200 print_dependency ("Adding", regdepslen);
10201
10202 ++regdepslen;
10203 }
10204
10205 static void
10206 print_dependency (const char *action, int depind)
10207 {
10208 if (md.debug_dv)
10209 {
10210 fprintf (stderr, " %s %s '%s'",
10211 action, dv_mode[(regdeps[depind].dependency)->mode],
10212 (regdeps[depind].dependency)->name);
10213 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10214 fprintf (stderr, " (%d)", regdeps[depind].index);
10215 if (regdeps[depind].mem_offset.hint)
10216 {
10217 fputs (" ", stderr);
10218 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10219 fputs ("+", stderr);
10220 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10221 }
10222 fprintf (stderr, "\n");
10223 }
10224 }
10225
10226 static void
10227 instruction_serialization (void)
10228 {
10229 int i;
10230 if (md.debug_dv)
10231 fprintf (stderr, " Instruction serialization\n");
10232 for (i = 0; i < regdepslen; i++)
10233 if (regdeps[i].insn_srlz == STATE_STOP)
10234 regdeps[i].insn_srlz = STATE_SRLZ;
10235 }
10236
10237 static void
10238 data_serialization (void)
10239 {
10240 int i = 0;
10241 if (md.debug_dv)
10242 fprintf (stderr, " Data serialization\n");
10243 while (i < regdepslen)
10244 {
10245 if (regdeps[i].data_srlz == STATE_STOP
10246 /* Note: as of 991210, all "other" dependencies are cleared by a
10247 data serialization. This might change with new tables */
10248 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10249 {
10250 print_dependency ("Removing", i);
10251 regdeps[i] = regdeps[--regdepslen];
10252 }
10253 else
10254 ++i;
10255 }
10256 }
10257
10258 /* Insert stops and serializations as needed to avoid DVs. */
10259
10260 static void
10261 remove_marked_resource (struct rsrc *rs)
10262 {
10263 switch (rs->dependency->semantics)
10264 {
10265 case IA64_DVS_SPECIFIC:
10266 if (md.debug_dv)
10267 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10268 /* Fall through. */
10269 case IA64_DVS_INSTR:
10270 if (md.debug_dv)
10271 fprintf (stderr, "Inserting instr serialization\n");
10272 if (rs->insn_srlz < STATE_STOP)
10273 insn_group_break (1, 0, 0);
10274 if (rs->insn_srlz < STATE_SRLZ)
10275 {
10276 struct slot oldslot = CURR_SLOT;
10277 /* Manually jam a srlz.i insn into the stream */
10278 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10279 CURR_SLOT.user_template = -1;
10280 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10281 instruction_serialization ();
10282 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10283 if (++md.num_slots_in_use >= NUM_SLOTS)
10284 emit_one_bundle ();
10285 CURR_SLOT = oldslot;
10286 }
10287 insn_group_break (1, 0, 0);
10288 break;
10289 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10290 "other" types of DV are eliminated
10291 by a data serialization */
10292 case IA64_DVS_DATA:
10293 if (md.debug_dv)
10294 fprintf (stderr, "Inserting data serialization\n");
10295 if (rs->data_srlz < STATE_STOP)
10296 insn_group_break (1, 0, 0);
10297 {
10298 struct slot oldslot = CURR_SLOT;
10299 /* Manually jam a srlz.d insn into the stream */
10300 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10301 CURR_SLOT.user_template = -1;
10302 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10303 data_serialization ();
10304 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10305 if (++md.num_slots_in_use >= NUM_SLOTS)
10306 emit_one_bundle ();
10307 CURR_SLOT = oldslot;
10308 }
10309 break;
10310 case IA64_DVS_IMPLIED:
10311 case IA64_DVS_IMPLIEDF:
10312 if (md.debug_dv)
10313 fprintf (stderr, "Inserting stop\n");
10314 insn_group_break (1, 0, 0);
10315 break;
10316 default:
10317 break;
10318 }
10319 }
10320
10321 /* Check the resources used by the given opcode against the current dependency
10322 list.
10323
10324 The check is run once for each execution path encountered. In this case,
10325 a unique execution path is the sequence of instructions following a code
10326 entry point, e.g. the following has three execution paths, one starting
10327 at L0, one at L1, and one at L2.
10328
10329 L0: nop
10330 L1: add
10331 L2: add
10332 br.ret
10333 */
10334
10335 static void
10336 check_dependencies (struct ia64_opcode *idesc)
10337 {
10338 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10339 int path;
10340 int i;
10341
10342 /* Note that the number of marked resources may change within the
10343 loop if in auto mode. */
10344 i = 0;
10345 while (i < regdepslen)
10346 {
10347 struct rsrc *rs = &regdeps[i];
10348 const struct ia64_dependency *dep = rs->dependency;
10349 int chkind;
10350 int note;
10351 int start_over = 0;
10352
10353 if (dep->semantics == IA64_DVS_NONE
10354 || (chkind = depends_on (rs->depind, idesc)) == -1)
10355 {
10356 ++i;
10357 continue;
10358 }
10359
10360 note = NOTE (opdeps->chks[chkind]);
10361
10362 /* Check this resource against each execution path seen thus far. */
10363 for (path = 0; path <= md.path; path++)
10364 {
10365 int matchtype;
10366
10367 /* If the dependency wasn't on the path being checked, ignore it. */
10368 if (rs->path < path)
10369 continue;
10370
10371 /* If the QP for this insn implies a QP which has branched, don't
10372 bother checking. Ed. NOTE: I don't think this check is terribly
10373 useful; what's the point of generating code which will only be
10374 reached if its QP is zero?
10375 This code was specifically inserted to handle the following code,
10376 based on notes from Intel's DV checking code, where p1 implies p2.
10377
10378 mov r4 = 2
10379 (p2) br.cond L
10380 (p1) mov r4 = 7
10381 */
10382 if (CURR_SLOT.qp_regno != 0)
10383 {
10384 int skip = 0;
10385 int implies;
10386 for (implies = 0; implies < qp_implieslen; implies++)
10387 {
10388 if (qp_implies[implies].path >= path
10389 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10390 && qp_implies[implies].p2_branched)
10391 {
10392 skip = 1;
10393 break;
10394 }
10395 }
10396 if (skip)
10397 continue;
10398 }
10399
10400 if ((matchtype = resources_match (rs, idesc, note,
10401 CURR_SLOT.qp_regno, path)) != 0)
10402 {
10403 char msg[1024];
10404 char pathmsg[256] = "";
10405 char indexmsg[256] = "";
10406 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10407
10408 if (path != 0)
10409 snprintf (pathmsg, sizeof (pathmsg),
10410 " when entry is at label '%s'",
10411 md.entry_labels[path - 1]);
10412 if (matchtype == 1 && rs->index >= 0)
10413 snprintf (indexmsg, sizeof (indexmsg),
10414 ", specific resource number is %d",
10415 rs->index);
10416 snprintf (msg, sizeof (msg),
10417 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10418 idesc->name,
10419 (certain ? "violates" : "may violate"),
10420 dv_mode[dep->mode], dep->name,
10421 dv_sem[dep->semantics],
10422 pathmsg, indexmsg);
10423
10424 if (md.explicit_mode)
10425 {
10426 as_warn ("%s", msg);
10427 if (path < md.path)
10428 as_warn (_("Only the first path encountering the conflict is reported"));
10429 as_warn_where (rs->file, rs->line,
10430 _("This is the location of the conflicting usage"));
10431 /* Don't bother checking other paths, to avoid duplicating
10432 the same warning */
10433 break;
10434 }
10435 else
10436 {
10437 if (md.debug_dv)
10438 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10439
10440 remove_marked_resource (rs);
10441
10442 /* since the set of dependencies has changed, start over */
10443 /* FIXME -- since we're removing dvs as we go, we
10444 probably don't really need to start over... */
10445 start_over = 1;
10446 break;
10447 }
10448 }
10449 }
10450 if (start_over)
10451 i = 0;
10452 else
10453 ++i;
10454 }
10455 }
10456
10457 /* Register new dependencies based on the given opcode. */
10458
10459 static void
10460 mark_resources (struct ia64_opcode *idesc)
10461 {
10462 int i;
10463 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10464 int add_only_qp_reads = 0;
10465
10466 /* A conditional branch only uses its resources if it is taken; if it is
10467 taken, we stop following that path. The other branch types effectively
10468 *always* write their resources. If it's not taken, register only QP
10469 reads. */
10470 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10471 {
10472 add_only_qp_reads = 1;
10473 }
10474
10475 if (md.debug_dv)
10476 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10477
10478 for (i = 0; i < opdeps->nregs; i++)
10479 {
10480 const struct ia64_dependency *dep;
10481 struct rsrc specs[MAX_SPECS];
10482 int note;
10483 int path;
10484 int count;
10485
10486 dep = ia64_find_dependency (opdeps->regs[i]);
10487 note = NOTE (opdeps->regs[i]);
10488
10489 if (add_only_qp_reads
10490 && !(dep->mode == IA64_DV_WAR
10491 && (dep->specifier == IA64_RS_PR
10492 || dep->specifier == IA64_RS_PRr
10493 || dep->specifier == IA64_RS_PR63)))
10494 continue;
10495
10496 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10497
10498 while (count-- > 0)
10499 {
10500 mark_resource (idesc, dep, &specs[count],
10501 DEP (opdeps->regs[i]), md.path);
10502 }
10503
10504 /* The execution path may affect register values, which may in turn
10505 affect which indirect-access resources are accessed. */
10506 switch (dep->specifier)
10507 {
10508 default:
10509 break;
10510 case IA64_RS_CPUID:
10511 case IA64_RS_DBR:
10512 case IA64_RS_IBR:
10513 case IA64_RS_MSR:
10514 case IA64_RS_PKR:
10515 case IA64_RS_PMC:
10516 case IA64_RS_PMD:
10517 case IA64_RS_RR:
10518 for (path = 0; path < md.path; path++)
10519 {
10520 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10521 while (count-- > 0)
10522 mark_resource (idesc, dep, &specs[count],
10523 DEP (opdeps->regs[i]), path);
10524 }
10525 break;
10526 }
10527 }
10528 }
10529
10530 /* Remove dependencies when they no longer apply. */
10531
10532 static void
10533 update_dependencies (struct ia64_opcode *idesc)
10534 {
10535 int i;
10536
10537 if (strcmp (idesc->name, "srlz.i") == 0)
10538 {
10539 instruction_serialization ();
10540 }
10541 else if (strcmp (idesc->name, "srlz.d") == 0)
10542 {
10543 data_serialization ();
10544 }
10545 else if (is_interruption_or_rfi (idesc)
10546 || is_taken_branch (idesc))
10547 {
10548 /* Although technically the taken branch doesn't clear dependencies
10549 which require a srlz.[id], we don't follow the branch; the next
10550 instruction is assumed to start with a clean slate. */
10551 regdepslen = 0;
10552 md.path = 0;
10553 }
10554 else if (is_conditional_branch (idesc)
10555 && CURR_SLOT.qp_regno != 0)
10556 {
10557 int is_call = strstr (idesc->name, ".call") != NULL;
10558
10559 for (i = 0; i < qp_implieslen; i++)
10560 {
10561 /* If the conditional branch's predicate is implied by the predicate
10562 in an existing dependency, remove that dependency. */
10563 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10564 {
10565 int depind = 0;
10566 /* Note that this implied predicate takes a branch so that if
10567 a later insn generates a DV but its predicate implies this
10568 one, we can avoid the false DV warning. */
10569 qp_implies[i].p2_branched = 1;
10570 while (depind < regdepslen)
10571 {
10572 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10573 {
10574 print_dependency ("Removing", depind);
10575 regdeps[depind] = regdeps[--regdepslen];
10576 }
10577 else
10578 ++depind;
10579 }
10580 }
10581 }
10582 /* Any marked resources which have this same predicate should be
10583 cleared, provided that the QP hasn't been modified between the
10584 marking instruction and the branch. */
10585 if (is_call)
10586 {
10587 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10588 }
10589 else
10590 {
10591 i = 0;
10592 while (i < regdepslen)
10593 {
10594 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10595 && regdeps[i].link_to_qp_branch
10596 && (regdeps[i].file != CURR_SLOT.src_file
10597 || regdeps[i].line != CURR_SLOT.src_line))
10598 {
10599 /* Treat like a taken branch */
10600 print_dependency ("Removing", i);
10601 regdeps[i] = regdeps[--regdepslen];
10602 }
10603 else
10604 ++i;
10605 }
10606 }
10607 }
10608 }
10609
10610 /* Examine the current instruction for dependency violations. */
10611
10612 static int
10613 check_dv (struct ia64_opcode *idesc)
10614 {
10615 if (md.debug_dv)
10616 {
10617 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10618 idesc->name, CURR_SLOT.src_line,
10619 idesc->dependencies->nchks,
10620 idesc->dependencies->nregs);
10621 }
10622
10623 /* Look through the list of currently marked resources; if the current
10624 instruction has the dependency in its chks list which uses that resource,
10625 check against the specific resources used. */
10626 check_dependencies (idesc);
10627
10628 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10629 then add them to the list of marked resources. */
10630 mark_resources (idesc);
10631
10632 /* There are several types of dependency semantics, and each has its own
10633 requirements for being cleared
10634
10635 Instruction serialization (insns separated by interruption, rfi, or
10636 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10637
10638 Data serialization (instruction serialization, or writer + srlz.d +
10639 reader, where writer and srlz.d are in separate groups) clears
10640 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10641 always be the case).
10642
10643 Instruction group break (groups separated by stop, taken branch,
10644 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10645 */
10646 update_dependencies (idesc);
10647
10648 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10649 warning. Keep track of as many as possible that are useful. */
10650 note_register_values (idesc);
10651
10652 /* We don't need or want this anymore. */
10653 md.mem_offset.hint = 0;
10654
10655 return 0;
10656 }
10657
10658 /* Translate one line of assembly. Pseudo ops and labels do not show
10659 here. */
10660 void
10661 md_assemble (char *str)
10662 {
10663 char *saved_input_line_pointer, *temp;
10664 const char *mnemonic;
10665 const struct pseudo_opcode *pdesc;
10666 struct ia64_opcode *idesc;
10667 unsigned char qp_regno;
10668 unsigned int flags;
10669 int ch;
10670
10671 saved_input_line_pointer = input_line_pointer;
10672 input_line_pointer = str;
10673
10674 /* extract the opcode (mnemonic): */
10675
10676 ch = get_symbol_name (&temp);
10677 mnemonic = temp;
10678 pdesc = (struct pseudo_opcode *) hash_find (md.pseudo_hash, mnemonic);
10679 if (pdesc)
10680 {
10681 (void) restore_line_pointer (ch);
10682 (*pdesc->handler) (pdesc->arg);
10683 goto done;
10684 }
10685
10686 /* Find the instruction descriptor matching the arguments. */
10687
10688 idesc = ia64_find_opcode (mnemonic);
10689 (void) restore_line_pointer (ch);
10690 if (!idesc)
10691 {
10692 as_bad (_("Unknown opcode `%s'"), mnemonic);
10693 goto done;
10694 }
10695
10696 idesc = parse_operands (idesc);
10697 if (!idesc)
10698 goto done;
10699
10700 /* Handle the dynamic ops we can handle now: */
10701 if (idesc->type == IA64_TYPE_DYN)
10702 {
10703 if (strcmp (idesc->name, "add") == 0)
10704 {
10705 if (CURR_SLOT.opnd[2].X_op == O_register
10706 && CURR_SLOT.opnd[2].X_add_number < 4)
10707 mnemonic = "addl";
10708 else
10709 mnemonic = "adds";
10710 ia64_free_opcode (idesc);
10711 idesc = ia64_find_opcode (mnemonic);
10712 }
10713 else if (strcmp (idesc->name, "mov") == 0)
10714 {
10715 enum ia64_opnd opnd1, opnd2;
10716 int rop;
10717
10718 opnd1 = idesc->operands[0];
10719 opnd2 = idesc->operands[1];
10720 if (opnd1 == IA64_OPND_AR3)
10721 rop = 0;
10722 else if (opnd2 == IA64_OPND_AR3)
10723 rop = 1;
10724 else
10725 abort ();
10726 if (CURR_SLOT.opnd[rop].X_op == O_register)
10727 {
10728 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10729 mnemonic = "mov.i";
10730 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10731 mnemonic = "mov.m";
10732 else
10733 rop = -1;
10734 }
10735 else
10736 abort ();
10737 if (rop >= 0)
10738 {
10739 ia64_free_opcode (idesc);
10740 idesc = ia64_find_opcode (mnemonic);
10741 while (idesc != NULL
10742 && (idesc->operands[0] != opnd1
10743 || idesc->operands[1] != opnd2))
10744 idesc = get_next_opcode (idesc);
10745 }
10746 }
10747 }
10748 else if (strcmp (idesc->name, "mov.i") == 0
10749 || strcmp (idesc->name, "mov.m") == 0)
10750 {
10751 enum ia64_opnd opnd1, opnd2;
10752 int rop;
10753
10754 opnd1 = idesc->operands[0];
10755 opnd2 = idesc->operands[1];
10756 if (opnd1 == IA64_OPND_AR3)
10757 rop = 0;
10758 else if (opnd2 == IA64_OPND_AR3)
10759 rop = 1;
10760 else
10761 abort ();
10762 if (CURR_SLOT.opnd[rop].X_op == O_register)
10763 {
10764 char unit = 'a';
10765 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10766 unit = 'i';
10767 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10768 unit = 'm';
10769 if (unit != 'a' && unit != idesc->name [4])
10770 as_bad (_("AR %d can only be accessed by %c-unit"),
10771 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10772 TOUPPER (unit));
10773 }
10774 }
10775 else if (strcmp (idesc->name, "hint.b") == 0)
10776 {
10777 switch (md.hint_b)
10778 {
10779 case hint_b_ok:
10780 break;
10781 case hint_b_warning:
10782 as_warn (_("hint.b may be treated as nop"));
10783 break;
10784 case hint_b_error:
10785 as_bad (_("hint.b shouldn't be used"));
10786 break;
10787 }
10788 }
10789
10790 qp_regno = 0;
10791 if (md.qp.X_op == O_register)
10792 {
10793 qp_regno = md.qp.X_add_number - REG_P;
10794 md.qp.X_op = O_absent;
10795 }
10796
10797 flags = idesc->flags;
10798
10799 if ((flags & IA64_OPCODE_FIRST) != 0)
10800 {
10801 /* The alignment frag has to end with a stop bit only if the
10802 next instruction after the alignment directive has to be
10803 the first instruction in an instruction group. */
10804 if (align_frag)
10805 {
10806 while (align_frag->fr_type != rs_align_code)
10807 {
10808 align_frag = align_frag->fr_next;
10809 if (!align_frag)
10810 break;
10811 }
10812 /* align_frag can be NULL if there are directives in
10813 between. */
10814 if (align_frag && align_frag->fr_next == frag_now)
10815 align_frag->tc_frag_data = 1;
10816 }
10817
10818 insn_group_break (1, 0, 0);
10819 }
10820 align_frag = NULL;
10821
10822 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10823 {
10824 as_bad (_("`%s' cannot be predicated"), idesc->name);
10825 goto done;
10826 }
10827
10828 /* Build the instruction. */
10829 CURR_SLOT.qp_regno = qp_regno;
10830 CURR_SLOT.idesc = idesc;
10831 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10832 dwarf2_where (&CURR_SLOT.debug_line);
10833 dwarf2_consume_line_info ();
10834
10835 /* Add unwind entries, if there are any. */
10836 if (unwind.current_entry)
10837 {
10838 CURR_SLOT.unwind_record = unwind.current_entry;
10839 unwind.current_entry = NULL;
10840 }
10841 if (unwind.pending_saves)
10842 {
10843 if (unwind.pending_saves->next)
10844 {
10845 /* Attach the next pending save to the next slot so that its
10846 slot number will get set correctly. */
10847 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10848 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10849 }
10850 else
10851 unwind.pending_saves = NULL;
10852 }
10853 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10854 unwind.insn = 1;
10855
10856 /* Check for dependency violations. */
10857 if (md.detect_dv)
10858 check_dv (idesc);
10859
10860 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10861 if (++md.num_slots_in_use >= NUM_SLOTS)
10862 emit_one_bundle ();
10863
10864 if ((flags & IA64_OPCODE_LAST) != 0)
10865 insn_group_break (1, 0, 0);
10866
10867 md.last_text_seg = now_seg;
10868
10869 done:
10870 input_line_pointer = saved_input_line_pointer;
10871 }
10872
10873 /* Called when symbol NAME cannot be found in the symbol table.
10874 Should be used for dynamic valued symbols only. */
10875
10876 symbolS *
10877 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10878 {
10879 return 0;
10880 }
10881
10882 /* Called for any expression that can not be recognized. When the
10883 function is called, `input_line_pointer' will point to the start of
10884 the expression. */
10885
10886 void
10887 md_operand (expressionS *e)
10888 {
10889 switch (*input_line_pointer)
10890 {
10891 case '[':
10892 ++input_line_pointer;
10893 expression_and_evaluate (e);
10894 if (*input_line_pointer != ']')
10895 {
10896 as_bad (_("Closing bracket missing"));
10897 goto err;
10898 }
10899 else
10900 {
10901 if (e->X_op != O_register
10902 || e->X_add_number < REG_GR
10903 || e->X_add_number > REG_GR + 127)
10904 {
10905 as_bad (_("Index must be a general register"));
10906 e->X_add_number = REG_GR;
10907 }
10908
10909 ++input_line_pointer;
10910 e->X_op = O_index;
10911 }
10912 break;
10913
10914 default:
10915 break;
10916 }
10917 return;
10918
10919 err:
10920 ignore_rest_of_line ();
10921 }
10922
10923 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10924 a section symbol plus some offset. For relocs involving @fptr(),
10925 directives we don't want such adjustments since we need to have the
10926 original symbol's name in the reloc. */
10927 int
10928 ia64_fix_adjustable (fixS *fix)
10929 {
10930 /* Prevent all adjustments to global symbols */
10931 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10932 return 0;
10933
10934 switch (fix->fx_r_type)
10935 {
10936 case BFD_RELOC_IA64_FPTR64I:
10937 case BFD_RELOC_IA64_FPTR32MSB:
10938 case BFD_RELOC_IA64_FPTR32LSB:
10939 case BFD_RELOC_IA64_FPTR64MSB:
10940 case BFD_RELOC_IA64_FPTR64LSB:
10941 case BFD_RELOC_IA64_LTOFF_FPTR22:
10942 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10943 return 0;
10944 default:
10945 break;
10946 }
10947
10948 return 1;
10949 }
10950
10951 int
10952 ia64_force_relocation (fixS *fix)
10953 {
10954 switch (fix->fx_r_type)
10955 {
10956 case BFD_RELOC_IA64_FPTR64I:
10957 case BFD_RELOC_IA64_FPTR32MSB:
10958 case BFD_RELOC_IA64_FPTR32LSB:
10959 case BFD_RELOC_IA64_FPTR64MSB:
10960 case BFD_RELOC_IA64_FPTR64LSB:
10961
10962 case BFD_RELOC_IA64_LTOFF22:
10963 case BFD_RELOC_IA64_LTOFF64I:
10964 case BFD_RELOC_IA64_LTOFF_FPTR22:
10965 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10966 case BFD_RELOC_IA64_PLTOFF22:
10967 case BFD_RELOC_IA64_PLTOFF64I:
10968 case BFD_RELOC_IA64_PLTOFF64MSB:
10969 case BFD_RELOC_IA64_PLTOFF64LSB:
10970
10971 case BFD_RELOC_IA64_LTOFF22X:
10972 case BFD_RELOC_IA64_LDXMOV:
10973 return 1;
10974
10975 default:
10976 break;
10977 }
10978
10979 return generic_force_reloc (fix);
10980 }
10981
10982 /* Decide from what point a pc-relative relocation is relative to,
10983 relative to the pc-relative fixup. Er, relatively speaking. */
10984 long
10985 ia64_pcrel_from_section (fixS *fix, segT sec)
10986 {
10987 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10988
10989 if (bfd_section_flags (sec) & SEC_CODE)
10990 off &= ~0xfUL;
10991
10992 return off;
10993 }
10994
10995
10996 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10997 void
10998 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10999 {
11000 expressionS exp;
11001
11002 exp.X_op = O_pseudo_fixup;
11003 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
11004 exp.X_add_number = 0;
11005 exp.X_add_symbol = symbol;
11006 emit_expr (&exp, size);
11007 }
11008
11009 /* This is called whenever some data item (not an instruction) needs a
11010 fixup. We pick the right reloc code depending on the byteorder
11011 currently in effect. */
11012 void
11013 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11014 bfd_reloc_code_real_type code)
11015 {
11016 fixS *fix;
11017
11018 switch (nbytes)
11019 {
11020 /* There are no reloc for 8 and 16 bit quantities, but we allow
11021 them here since they will work fine as long as the expression
11022 is fully defined at the end of the pass over the source file. */
11023 case 1: code = BFD_RELOC_8; break;
11024 case 2: code = BFD_RELOC_16; break;
11025 case 4:
11026 if (target_big_endian)
11027 code = BFD_RELOC_IA64_DIR32MSB;
11028 else
11029 code = BFD_RELOC_IA64_DIR32LSB;
11030 break;
11031
11032 case 8:
11033 /* In 32-bit mode, data8 could mean function descriptors too. */
11034 if (exp->X_op == O_pseudo_fixup
11035 && exp->X_op_symbol
11036 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11037 && !(md.flags & EF_IA_64_ABI64))
11038 {
11039 if (target_big_endian)
11040 code = BFD_RELOC_IA64_IPLTMSB;
11041 else
11042 code = BFD_RELOC_IA64_IPLTLSB;
11043 exp->X_op = O_symbol;
11044 break;
11045 }
11046 else
11047 {
11048 if (target_big_endian)
11049 code = BFD_RELOC_IA64_DIR64MSB;
11050 else
11051 code = BFD_RELOC_IA64_DIR64LSB;
11052 break;
11053 }
11054
11055 case 16:
11056 if (exp->X_op == O_pseudo_fixup
11057 && exp->X_op_symbol
11058 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11059 {
11060 if (target_big_endian)
11061 code = BFD_RELOC_IA64_IPLTMSB;
11062 else
11063 code = BFD_RELOC_IA64_IPLTLSB;
11064 exp->X_op = O_symbol;
11065 break;
11066 }
11067 /* FALLTHRU */
11068
11069 default:
11070 as_bad (_("Unsupported fixup size %d"), nbytes);
11071 ignore_rest_of_line ();
11072 return;
11073 }
11074
11075 if (exp->X_op == O_pseudo_fixup)
11076 {
11077 exp->X_op = O_symbol;
11078 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11079 /* ??? If code unchanged, unsupported. */
11080 }
11081
11082 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11083 /* We need to store the byte order in effect in case we're going
11084 to fix an 8 or 16 bit relocation (for which there no real
11085 relocs available). See md_apply_fix(). */
11086 fix->tc_fix_data.bigendian = target_big_endian;
11087 }
11088
11089 /* Return the actual relocation we wish to associate with the pseudo
11090 reloc described by SYM and R_TYPE. SYM should be one of the
11091 symbols in the pseudo_func array, or NULL. */
11092
11093 static bfd_reloc_code_real_type
11094 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11095 {
11096 bfd_reloc_code_real_type newr = 0;
11097 const char *type = NULL, *suffix = "";
11098
11099 if (sym == NULL)
11100 {
11101 return r_type;
11102 }
11103
11104 switch (S_GET_VALUE (sym))
11105 {
11106 case FUNC_FPTR_RELATIVE:
11107 switch (r_type)
11108 {
11109 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11110 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11111 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11112 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11113 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11114 default: type = "FPTR"; break;
11115 }
11116 break;
11117
11118 case FUNC_GP_RELATIVE:
11119 switch (r_type)
11120 {
11121 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11122 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11123 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11124 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11125 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11126 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11127 default: type = "GPREL"; break;
11128 }
11129 break;
11130
11131 case FUNC_LT_RELATIVE:
11132 switch (r_type)
11133 {
11134 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11135 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11136 default: type = "LTOFF"; break;
11137 }
11138 break;
11139
11140 case FUNC_LT_RELATIVE_X:
11141 switch (r_type)
11142 {
11143 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11144 default: type = "LTOFF"; suffix = "X"; break;
11145 }
11146 break;
11147
11148 case FUNC_PC_RELATIVE:
11149 switch (r_type)
11150 {
11151 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11152 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11153 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11154 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11155 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11156 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11157 default: type = "PCREL"; break;
11158 }
11159 break;
11160
11161 case FUNC_PLT_RELATIVE:
11162 switch (r_type)
11163 {
11164 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11165 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11166 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11167 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11168 default: type = "PLTOFF"; break;
11169 }
11170 break;
11171
11172 case FUNC_SEC_RELATIVE:
11173 switch (r_type)
11174 {
11175 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11176 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11177 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11178 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11179 default: type = "SECREL"; break;
11180 }
11181 break;
11182
11183 case FUNC_SEG_RELATIVE:
11184 switch (r_type)
11185 {
11186 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11187 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11188 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11189 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11190 default: type = "SEGREL"; break;
11191 }
11192 break;
11193
11194 case FUNC_LTV_RELATIVE:
11195 switch (r_type)
11196 {
11197 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11198 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11199 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11200 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11201 default: type = "LTV"; break;
11202 }
11203 break;
11204
11205 case FUNC_LT_FPTR_RELATIVE:
11206 switch (r_type)
11207 {
11208 case BFD_RELOC_IA64_IMM22:
11209 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11210 case BFD_RELOC_IA64_IMM64:
11211 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11212 case BFD_RELOC_IA64_DIR32MSB:
11213 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11214 case BFD_RELOC_IA64_DIR32LSB:
11215 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11216 case BFD_RELOC_IA64_DIR64MSB:
11217 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11218 case BFD_RELOC_IA64_DIR64LSB:
11219 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11220 default:
11221 type = "LTOFF_FPTR"; break;
11222 }
11223 break;
11224
11225 case FUNC_TP_RELATIVE:
11226 switch (r_type)
11227 {
11228 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11229 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11230 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11231 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11232 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11233 default: type = "TPREL"; break;
11234 }
11235 break;
11236
11237 case FUNC_LT_TP_RELATIVE:
11238 switch (r_type)
11239 {
11240 case BFD_RELOC_IA64_IMM22:
11241 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11242 default:
11243 type = "LTOFF_TPREL"; break;
11244 }
11245 break;
11246
11247 case FUNC_DTP_MODULE:
11248 switch (r_type)
11249 {
11250 case BFD_RELOC_IA64_DIR64MSB:
11251 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11252 case BFD_RELOC_IA64_DIR64LSB:
11253 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11254 default:
11255 type = "DTPMOD"; break;
11256 }
11257 break;
11258
11259 case FUNC_LT_DTP_MODULE:
11260 switch (r_type)
11261 {
11262 case BFD_RELOC_IA64_IMM22:
11263 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11264 default:
11265 type = "LTOFF_DTPMOD"; break;
11266 }
11267 break;
11268
11269 case FUNC_DTP_RELATIVE:
11270 switch (r_type)
11271 {
11272 case BFD_RELOC_IA64_DIR32MSB:
11273 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11274 case BFD_RELOC_IA64_DIR32LSB:
11275 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11276 case BFD_RELOC_IA64_DIR64MSB:
11277 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11278 case BFD_RELOC_IA64_DIR64LSB:
11279 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11280 case BFD_RELOC_IA64_IMM14:
11281 newr = BFD_RELOC_IA64_DTPREL14; break;
11282 case BFD_RELOC_IA64_IMM22:
11283 newr = BFD_RELOC_IA64_DTPREL22; break;
11284 case BFD_RELOC_IA64_IMM64:
11285 newr = BFD_RELOC_IA64_DTPREL64I; break;
11286 default:
11287 type = "DTPREL"; break;
11288 }
11289 break;
11290
11291 case FUNC_LT_DTP_RELATIVE:
11292 switch (r_type)
11293 {
11294 case BFD_RELOC_IA64_IMM22:
11295 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11296 default:
11297 type = "LTOFF_DTPREL"; break;
11298 }
11299 break;
11300
11301 case FUNC_IPLT_RELOC:
11302 switch (r_type)
11303 {
11304 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11305 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11306 default: type = "IPLT"; break;
11307 }
11308 break;
11309
11310 #ifdef TE_VMS
11311 case FUNC_SLOTCOUNT_RELOC:
11312 return DUMMY_RELOC_IA64_SLOTCOUNT;
11313 #endif
11314
11315 default:
11316 abort ();
11317 }
11318
11319 if (newr)
11320 return newr;
11321 else
11322 {
11323 int width;
11324
11325 if (!type)
11326 abort ();
11327 switch (r_type)
11328 {
11329 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11330 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11331 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11332 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11333 case BFD_RELOC_UNUSED: width = 13; break;
11334 case BFD_RELOC_IA64_IMM14: width = 14; break;
11335 case BFD_RELOC_IA64_IMM22: width = 22; break;
11336 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11337 default: abort ();
11338 }
11339
11340 /* This should be an error, but since previously there wasn't any
11341 diagnostic here, don't make it fail because of this for now. */
11342 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11343 return r_type;
11344 }
11345 }
11346
11347 /* Here is where generate the appropriate reloc for pseudo relocation
11348 functions. */
11349 void
11350 ia64_validate_fix (fixS *fix)
11351 {
11352 switch (fix->fx_r_type)
11353 {
11354 case BFD_RELOC_IA64_FPTR64I:
11355 case BFD_RELOC_IA64_FPTR32MSB:
11356 case BFD_RELOC_IA64_FPTR64LSB:
11357 case BFD_RELOC_IA64_LTOFF_FPTR22:
11358 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11359 if (fix->fx_offset != 0)
11360 as_bad_where (fix->fx_file, fix->fx_line,
11361 _("No addend allowed in @fptr() relocation"));
11362 break;
11363 default:
11364 break;
11365 }
11366 }
11367
11368 static void
11369 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11370 {
11371 bfd_vma insn[3], t0, t1, control_bits;
11372 const char *err;
11373 char *fixpos;
11374 long slot;
11375
11376 slot = fix->fx_where & 0x3;
11377 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11378
11379 /* Bundles are always in little-endian byte order */
11380 t0 = bfd_getl64 (fixpos);
11381 t1 = bfd_getl64 (fixpos + 8);
11382 control_bits = t0 & 0x1f;
11383 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11384 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11385 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11386
11387 err = NULL;
11388 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11389 {
11390 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11391 insn[2] |= (((value & 0x7f) << 13)
11392 | (((value >> 7) & 0x1ff) << 27)
11393 | (((value >> 16) & 0x1f) << 22)
11394 | (((value >> 21) & 0x1) << 21)
11395 | (((value >> 63) & 0x1) << 36));
11396 }
11397 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11398 {
11399 if (value & ~0x3fffffffffffffffULL)
11400 err = _("integer operand out of range");
11401 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11402 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11403 }
11404 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11405 {
11406 value >>= 4;
11407 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11408 insn[2] |= ((((value >> 59) & 0x1) << 36)
11409 | (((value >> 0) & 0xfffff) << 13));
11410 }
11411 else
11412 err = (*odesc->insert) (odesc, value, insn + slot);
11413
11414 if (err)
11415 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11416
11417 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11418 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11419 number_to_chars_littleendian (fixpos + 0, t0, 8);
11420 number_to_chars_littleendian (fixpos + 8, t1, 8);
11421 }
11422
11423 /* Attempt to simplify or even eliminate a fixup. The return value is
11424 ignored; perhaps it was once meaningful, but now it is historical.
11425 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11426
11427 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11428 (if possible). */
11429
11430 void
11431 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11432 {
11433 char *fixpos;
11434 valueT value = *valP;
11435
11436 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11437
11438 if (fix->fx_pcrel)
11439 {
11440 switch (fix->fx_r_type)
11441 {
11442 case BFD_RELOC_IA64_PCREL21B: break;
11443 case BFD_RELOC_IA64_PCREL21BI: break;
11444 case BFD_RELOC_IA64_PCREL21F: break;
11445 case BFD_RELOC_IA64_PCREL21M: break;
11446 case BFD_RELOC_IA64_PCREL60B: break;
11447 case BFD_RELOC_IA64_PCREL22: break;
11448 case BFD_RELOC_IA64_PCREL64I: break;
11449 case BFD_RELOC_IA64_PCREL32MSB: break;
11450 case BFD_RELOC_IA64_PCREL32LSB: break;
11451 case BFD_RELOC_IA64_PCREL64MSB: break;
11452 case BFD_RELOC_IA64_PCREL64LSB: break;
11453 default:
11454 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11455 fix->fx_r_type);
11456 break;
11457 }
11458 }
11459 if (fix->fx_addsy)
11460 {
11461 switch ((unsigned) fix->fx_r_type)
11462 {
11463 case BFD_RELOC_UNUSED:
11464 /* This must be a TAG13 or TAG13b operand. There are no external
11465 relocs defined for them, so we must give an error. */
11466 as_bad_where (fix->fx_file, fix->fx_line,
11467 _("%s must have a constant value"),
11468 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11469 fix->fx_done = 1;
11470 return;
11471
11472 case BFD_RELOC_IA64_TPREL14:
11473 case BFD_RELOC_IA64_TPREL22:
11474 case BFD_RELOC_IA64_TPREL64I:
11475 case BFD_RELOC_IA64_LTOFF_TPREL22:
11476 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11477 case BFD_RELOC_IA64_DTPREL14:
11478 case BFD_RELOC_IA64_DTPREL22:
11479 case BFD_RELOC_IA64_DTPREL64I:
11480 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11481 S_SET_THREAD_LOCAL (fix->fx_addsy);
11482 break;
11483
11484 #ifdef TE_VMS
11485 case DUMMY_RELOC_IA64_SLOTCOUNT:
11486 as_bad_where (fix->fx_file, fix->fx_line,
11487 _("cannot resolve @slotcount parameter"));
11488 fix->fx_done = 1;
11489 return;
11490 #endif
11491
11492 default:
11493 break;
11494 }
11495 }
11496 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11497 {
11498 #ifdef TE_VMS
11499 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11500 {
11501 /* For @slotcount, convert an addresses difference to a slots
11502 difference. */
11503 valueT v;
11504
11505 v = (value >> 4) * 3;
11506 switch (value & 0x0f)
11507 {
11508 case 0:
11509 case 1:
11510 case 2:
11511 v += value & 0x0f;
11512 break;
11513 case 0x0f:
11514 v += 2;
11515 break;
11516 case 0x0e:
11517 v += 1;
11518 break;
11519 default:
11520 as_bad (_("invalid @slotcount value"));
11521 }
11522 value = v;
11523 }
11524 #endif
11525
11526 if (fix->tc_fix_data.bigendian)
11527 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11528 else
11529 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11530 fix->fx_done = 1;
11531 }
11532 else
11533 {
11534 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11535 fix->fx_done = 1;
11536 }
11537 }
11538
11539 /* Generate the BFD reloc to be stuck in the object file from the
11540 fixup used internally in the assembler. */
11541
11542 arelent *
11543 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11544 {
11545 arelent *reloc;
11546
11547 reloc = XNEW (arelent);
11548 reloc->sym_ptr_ptr = XNEW (asymbol *);
11549 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11550 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11551 reloc->addend = fixp->fx_offset;
11552 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11553
11554 if (!reloc->howto)
11555 {
11556 as_bad_where (fixp->fx_file, fixp->fx_line,
11557 _("Cannot represent %s relocation in object file"),
11558 bfd_get_reloc_code_name (fixp->fx_r_type));
11559 free (reloc);
11560 return NULL;
11561 }
11562 return reloc;
11563 }
11564
11565 /* Turn a string in input_line_pointer into a floating point constant
11566 of type TYPE, and store the appropriate bytes in *LIT. The number
11567 of LITTLENUMS emitted is stored in *SIZE. An error message is
11568 returned, or NULL on OK. */
11569
11570 const char *
11571 md_atof (int type, char *lit, int *size)
11572 {
11573 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11574 char *t;
11575 int prec;
11576
11577 switch (type)
11578 {
11579 /* IEEE floats */
11580 case 'f':
11581 case 'F':
11582 case 's':
11583 case 'S':
11584 prec = 2;
11585 break;
11586
11587 case 'd':
11588 case 'D':
11589 case 'r':
11590 case 'R':
11591 prec = 4;
11592 break;
11593
11594 case 'x':
11595 case 'X':
11596 case 'p':
11597 case 'P':
11598 prec = 5;
11599 break;
11600
11601 default:
11602 *size = 0;
11603 return _("Unrecognized or unsupported floating point constant");
11604 }
11605 t = atof_ieee (input_line_pointer, type, words);
11606 if (t)
11607 input_line_pointer = t;
11608
11609 (*ia64_float_to_chars) (lit, words, prec);
11610
11611 if (type == 'X')
11612 {
11613 /* It is 10 byte floating point with 6 byte padding. */
11614 memset (&lit [10], 0, 6);
11615 *size = 8 * sizeof (LITTLENUM_TYPE);
11616 }
11617 else
11618 *size = prec * sizeof (LITTLENUM_TYPE);
11619
11620 return NULL;
11621 }
11622
11623 /* Handle ia64 specific semantics of the align directive. */
11624
11625 void
11626 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11627 const char *fill ATTRIBUTE_UNUSED,
11628 int len ATTRIBUTE_UNUSED,
11629 int max ATTRIBUTE_UNUSED)
11630 {
11631 if (subseg_text_p (now_seg))
11632 ia64_flush_insns ();
11633 }
11634
11635 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11636 of an rs_align_code fragment. */
11637
11638 void
11639 ia64_handle_align (fragS *fragp)
11640 {
11641 int bytes;
11642 char *p;
11643 const unsigned char *nop_type;
11644
11645 if (fragp->fr_type != rs_align_code)
11646 return;
11647
11648 /* Check if this frag has to end with a stop bit. */
11649 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11650
11651 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11652 p = fragp->fr_literal + fragp->fr_fix;
11653
11654 /* If no paddings are needed, we check if we need a stop bit. */
11655 if (!bytes && fragp->tc_frag_data)
11656 {
11657 if (fragp->fr_fix < 16)
11658 #if 1
11659 /* FIXME: It won't work with
11660 .align 16
11661 alloc r32=ar.pfs,1,2,4,0
11662 */
11663 ;
11664 #else
11665 as_bad_where (fragp->fr_file, fragp->fr_line,
11666 _("Can't add stop bit to mark end of instruction group"));
11667 #endif
11668 else
11669 /* Bundles are always in little-endian byte order. Make sure
11670 the previous bundle has the stop bit. */
11671 *(p - 16) |= 1;
11672 }
11673
11674 /* Make sure we are on a 16-byte boundary, in case someone has been
11675 putting data into a text section. */
11676 if (bytes & 15)
11677 {
11678 int fix = bytes & 15;
11679 memset (p, 0, fix);
11680 p += fix;
11681 bytes -= fix;
11682 fragp->fr_fix += fix;
11683 }
11684
11685 /* Instruction bundles are always little-endian. */
11686 memcpy (p, nop_type, 16);
11687 fragp->fr_var = 16;
11688 }
11689
11690 static void
11691 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11692 int prec)
11693 {
11694 while (prec--)
11695 {
11696 number_to_chars_bigendian (lit, (long) (*words++),
11697 sizeof (LITTLENUM_TYPE));
11698 lit += sizeof (LITTLENUM_TYPE);
11699 }
11700 }
11701
11702 static void
11703 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11704 int prec)
11705 {
11706 while (prec--)
11707 {
11708 number_to_chars_littleendian (lit, (long) (words[prec]),
11709 sizeof (LITTLENUM_TYPE));
11710 lit += sizeof (LITTLENUM_TYPE);
11711 }
11712 }
11713
11714 void
11715 ia64_elf_section_change_hook (void)
11716 {
11717 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11718 && elf_linked_to_section (now_seg) == NULL)
11719 elf_linked_to_section (now_seg) = text_section;
11720 dot_byteorder (-1);
11721 }
11722
11723 /* Check if a label should be made global. */
11724 void
11725 ia64_check_label (symbolS *label)
11726 {
11727 if (*input_line_pointer == ':')
11728 {
11729 S_SET_EXTERNAL (label);
11730 input_line_pointer++;
11731 }
11732 }
11733
11734 /* Used to remember where .alias and .secalias directives are seen. We
11735 will rename symbol and section names when we are about to output
11736 the relocatable file. */
11737 struct alias
11738 {
11739 const char *file; /* The file where the directive is seen. */
11740 unsigned int line; /* The line number the directive is at. */
11741 const char *name; /* The original name of the symbol. */
11742 };
11743
11744 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11745 .secalias. Otherwise, it is .alias. */
11746 static void
11747 dot_alias (int section)
11748 {
11749 char *name, *alias;
11750 char delim;
11751 char *end_name;
11752 int len;
11753 const char *error_string;
11754 struct alias *h;
11755 const char *a;
11756 struct hash_control *ahash, *nhash;
11757 const char *kind;
11758
11759 delim = get_symbol_name (&name);
11760 end_name = input_line_pointer;
11761 *end_name = delim;
11762
11763 if (name == end_name)
11764 {
11765 as_bad (_("expected symbol name"));
11766 ignore_rest_of_line ();
11767 return;
11768 }
11769
11770 SKIP_WHITESPACE_AFTER_NAME ();
11771
11772 if (*input_line_pointer != ',')
11773 {
11774 *end_name = 0;
11775 as_bad (_("expected comma after \"%s\""), name);
11776 *end_name = delim;
11777 ignore_rest_of_line ();
11778 return;
11779 }
11780
11781 input_line_pointer++;
11782 *end_name = 0;
11783 ia64_canonicalize_symbol_name (name);
11784
11785 /* We call demand_copy_C_string to check if alias string is valid.
11786 There should be a closing `"' and no `\0' in the string. */
11787 alias = demand_copy_C_string (&len);
11788 if (alias == NULL)
11789 {
11790 ignore_rest_of_line ();
11791 return;
11792 }
11793
11794 /* Make a copy of name string. */
11795 len = strlen (name) + 1;
11796 obstack_grow (&notes, name, len);
11797 name = obstack_finish (&notes);
11798
11799 if (section)
11800 {
11801 kind = "section";
11802 ahash = secalias_hash;
11803 nhash = secalias_name_hash;
11804 }
11805 else
11806 {
11807 kind = "symbol";
11808 ahash = alias_hash;
11809 nhash = alias_name_hash;
11810 }
11811
11812 /* Check if alias has been used before. */
11813 h = (struct alias *) hash_find (ahash, alias);
11814 if (h)
11815 {
11816 if (strcmp (h->name, name))
11817 as_bad (_("`%s' is already the alias of %s `%s'"),
11818 alias, kind, h->name);
11819 goto out;
11820 }
11821
11822 /* Check if name already has an alias. */
11823 a = (const char *) hash_find (nhash, name);
11824 if (a)
11825 {
11826 if (strcmp (a, alias))
11827 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11828 goto out;
11829 }
11830
11831 h = XNEW (struct alias);
11832 h->file = as_where (&h->line);
11833 h->name = name;
11834
11835 error_string = hash_jam (ahash, alias, (void *) h);
11836 if (error_string)
11837 {
11838 as_fatal (_("inserting \"%s\" into %s alias hash table failed: %s"),
11839 alias, kind, error_string);
11840 goto out;
11841 }
11842
11843 error_string = hash_jam (nhash, name, (void *) alias);
11844 if (error_string)
11845 {
11846 as_fatal (_("inserting \"%s\" into %s name hash table failed: %s"),
11847 alias, kind, error_string);
11848 out:
11849 obstack_free (&notes, name);
11850 obstack_free (&notes, alias);
11851 }
11852
11853 demand_empty_rest_of_line ();
11854 }
11855
11856 /* It renames the original symbol name to its alias. */
11857 static void
11858 do_alias (const char *alias, void *value)
11859 {
11860 struct alias *h = (struct alias *) value;
11861 symbolS *sym = symbol_find (h->name);
11862
11863 if (sym == NULL)
11864 {
11865 #ifdef TE_VMS
11866 /* Uses .alias extensively to alias CRTL functions to same with
11867 decc$ prefix. Sometimes function gets optimized away and a
11868 warning results, which should be suppressed. */
11869 if (strncmp (alias, "decc$", 5) != 0)
11870 #endif
11871 as_warn_where (h->file, h->line,
11872 _("symbol `%s' aliased to `%s' is not used"),
11873 h->name, alias);
11874 }
11875 else
11876 S_SET_NAME (sym, (char *) alias);
11877 }
11878
11879 /* Called from write_object_file. */
11880 void
11881 ia64_adjust_symtab (void)
11882 {
11883 hash_traverse (alias_hash, do_alias);
11884 }
11885
11886 /* It renames the original section name to its alias. */
11887 static void
11888 do_secalias (const char *alias, void *value)
11889 {
11890 struct alias *h = (struct alias *) value;
11891 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11892
11893 if (sec == NULL)
11894 as_warn_where (h->file, h->line,
11895 _("section `%s' aliased to `%s' is not used"),
11896 h->name, alias);
11897 else
11898 sec->name = alias;
11899 }
11900
11901 /* Called from write_object_file. */
11902 void
11903 ia64_frob_file (void)
11904 {
11905 hash_traverse (secalias_hash, do_secalias);
11906 }
11907
11908 #ifdef TE_VMS
11909 #define NT_VMS_MHD 1
11910 #define NT_VMS_LNM 2
11911
11912 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11913 .note section. */
11914
11915 /* Manufacture a VMS-like time string. */
11916 static void
11917 get_vms_time (char *Now)
11918 {
11919 char *pnt;
11920 time_t timeb;
11921
11922 time (&timeb);
11923 pnt = ctime (&timeb);
11924 pnt[3] = 0;
11925 pnt[7] = 0;
11926 pnt[10] = 0;
11927 pnt[16] = 0;
11928 pnt[24] = 0;
11929 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11930 }
11931
11932 void
11933 ia64_vms_note (void)
11934 {
11935 char *p;
11936 asection *seg = now_seg;
11937 subsegT subseg = now_subseg;
11938 asection *secp = NULL;
11939 char *bname;
11940 char buf [256];
11941 symbolS *sym;
11942
11943 /* Create the .note section. */
11944
11945 secp = subseg_new (".note", 0);
11946 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11947
11948 /* Module header note (MHD). */
11949 bname = xstrdup (lbasename (out_file_name));
11950 if ((p = strrchr (bname, '.')))
11951 *p = '\0';
11952
11953 /* VMS note header is 24 bytes long. */
11954 p = frag_more (8 + 8 + 8);
11955 number_to_chars_littleendian (p + 0, 8, 8);
11956 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11957 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11958
11959 p = frag_more (8);
11960 strcpy (p, "IPF/VMS");
11961
11962 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11963 get_vms_time (p);
11964 strcpy (p + 17, "24-FEB-2005 15:00");
11965 p += 17 + 17;
11966 strcpy (p, bname);
11967 p += strlen (bname) + 1;
11968 free (bname);
11969 strcpy (p, "V1.0");
11970
11971 frag_align (3, 0, 0);
11972
11973 /* Language processor name note. */
11974 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11975 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11976
11977 p = frag_more (8 + 8 + 8);
11978 number_to_chars_littleendian (p + 0, 8, 8);
11979 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11980 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11981
11982 p = frag_more (8);
11983 strcpy (p, "IPF/VMS");
11984
11985 p = frag_more (strlen (buf) + 1);
11986 strcpy (p, buf);
11987
11988 frag_align (3, 0, 0);
11989
11990 secp = subseg_new (".vms_display_name_info", 0);
11991 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11992
11993 /* This symbol should be passed on the command line and be variable
11994 according to language. */
11995 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11996 absolute_section, 0, &zero_address_frag);
11997 symbol_table_insert (sym);
11998 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11999
12000 p = frag_more (4);
12001 /* Format 3 of VMS demangler Spec. */
12002 number_to_chars_littleendian (p, 3, 4);
12003
12004 p = frag_more (4);
12005 /* Place holder for symbol table index of above symbol. */
12006 number_to_chars_littleendian (p, -1, 4);
12007
12008 frag_align (3, 0, 0);
12009
12010 /* We probably can't restore the current segment, for there likely
12011 isn't one yet... */
12012 if (seg && subseg)
12013 subseg_set (seg, subseg);
12014 }
12015
12016 #endif /* TE_VMS */
This page took 0.362183 seconds and 5 git commands to generate.