gdb/testsuite: restore configure script
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72 static gdb::optional<gdb::byte_vector> ktab_buf;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 typedef enum instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 } instruction_type;
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 /* Big enough to hold a FP register in bytes. */
130 #define IA64_FP_REGISTER_SIZE 16
131
132 static int sp_regnum = IA64_GR12_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static const char * const ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 tdep->ia64_ext_type
317 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
318 floatformats_ia64_ext);
319
320 return tdep->ia64_ext_type;
321 }
322
323 static int
324 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
325 struct reggroup *group)
326 {
327 int vector_p;
328 int float_p;
329 int raw_p;
330 if (group == all_reggroup)
331 return 1;
332 vector_p = register_type (gdbarch, regnum)->is_vector ();
333 float_p = register_type (gdbarch, regnum)->code () == TYPE_CODE_FLT;
334 raw_p = regnum < NUM_IA64_RAW_REGS;
335 if (group == float_reggroup)
336 return float_p;
337 if (group == vector_reggroup)
338 return vector_p;
339 if (group == general_reggroup)
340 return (!vector_p && !float_p);
341 if (group == save_reggroup || group == restore_reggroup)
342 return raw_p;
343 return 0;
344 }
345
346 static const char *
347 ia64_register_name (struct gdbarch *gdbarch, int reg)
348 {
349 return ia64_register_names[reg];
350 }
351
352 struct type *
353 ia64_register_type (struct gdbarch *arch, int reg)
354 {
355 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
356 return ia64_ext_type (arch);
357 else
358 return builtin_type (arch)->builtin_long;
359 }
360
361 static int
362 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
363 {
364 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
365 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
366 return reg;
367 }
368
369
370 /* Extract ``len'' bits from an instruction bundle starting at
371 bit ``from''. */
372
373 static long long
374 extract_bit_field (const gdb_byte *bundle, int from, int len)
375 {
376 long long result = 0LL;
377 int to = from + len;
378 int from_byte = from / 8;
379 int to_byte = to / 8;
380 unsigned char *b = (unsigned char *) bundle;
381 unsigned char c;
382 int lshift;
383 int i;
384
385 c = b[from_byte];
386 if (from_byte == to_byte)
387 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
388 result = c >> (from % 8);
389 lshift = 8 - (from % 8);
390
391 for (i = from_byte+1; i < to_byte; i++)
392 {
393 result |= ((long long) b[i]) << lshift;
394 lshift += 8;
395 }
396
397 if (from_byte < to_byte && (to % 8 != 0))
398 {
399 c = b[to_byte];
400 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
401 result |= ((long long) c) << lshift;
402 }
403
404 return result;
405 }
406
407 /* Replace the specified bits in an instruction bundle. */
408
409 static void
410 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
411 {
412 int to = from + len;
413 int from_byte = from / 8;
414 int to_byte = to / 8;
415 unsigned char *b = (unsigned char *) bundle;
416 unsigned char c;
417
418 if (from_byte == to_byte)
419 {
420 unsigned char left, right;
421 c = b[from_byte];
422 left = (c >> (to % 8)) << (to % 8);
423 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
424 c = (unsigned char) (val & 0xff);
425 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
426 c |= right | left;
427 b[from_byte] = c;
428 }
429 else
430 {
431 int i;
432 c = b[from_byte];
433 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
434 c = c | (val << (from % 8));
435 b[from_byte] = c;
436 val >>= 8 - from % 8;
437
438 for (i = from_byte+1; i < to_byte; i++)
439 {
440 c = val & 0xff;
441 val >>= 8;
442 b[i] = c;
443 }
444
445 if (to % 8 != 0)
446 {
447 unsigned char cv = (unsigned char) val;
448 c = b[to_byte];
449 c = c >> (to % 8) << (to % 8);
450 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
451 b[to_byte] = c;
452 }
453 }
454 }
455
456 /* Return the contents of slot N (for N = 0, 1, or 2) in
457 and instruction bundle. */
458
459 static long long
460 slotN_contents (gdb_byte *bundle, int slotnum)
461 {
462 return extract_bit_field (bundle, 5+41*slotnum, 41);
463 }
464
465 /* Store an instruction in an instruction bundle. */
466
467 static void
468 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
469 {
470 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
471 }
472
473 static const enum instruction_type template_encoding_table[32][3] =
474 {
475 { M, I, I }, /* 00 */
476 { M, I, I }, /* 01 */
477 { M, I, I }, /* 02 */
478 { M, I, I }, /* 03 */
479 { M, L, X }, /* 04 */
480 { M, L, X }, /* 05 */
481 { undefined, undefined, undefined }, /* 06 */
482 { undefined, undefined, undefined }, /* 07 */
483 { M, M, I }, /* 08 */
484 { M, M, I }, /* 09 */
485 { M, M, I }, /* 0A */
486 { M, M, I }, /* 0B */
487 { M, F, I }, /* 0C */
488 { M, F, I }, /* 0D */
489 { M, M, F }, /* 0E */
490 { M, M, F }, /* 0F */
491 { M, I, B }, /* 10 */
492 { M, I, B }, /* 11 */
493 { M, B, B }, /* 12 */
494 { M, B, B }, /* 13 */
495 { undefined, undefined, undefined }, /* 14 */
496 { undefined, undefined, undefined }, /* 15 */
497 { B, B, B }, /* 16 */
498 { B, B, B }, /* 17 */
499 { M, M, B }, /* 18 */
500 { M, M, B }, /* 19 */
501 { undefined, undefined, undefined }, /* 1A */
502 { undefined, undefined, undefined }, /* 1B */
503 { M, F, B }, /* 1C */
504 { M, F, B }, /* 1D */
505 { undefined, undefined, undefined }, /* 1E */
506 { undefined, undefined, undefined }, /* 1F */
507 };
508
509 /* Fetch and (partially) decode an instruction at ADDR and return the
510 address of the next instruction to fetch. */
511
512 static CORE_ADDR
513 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
514 {
515 gdb_byte bundle[BUNDLE_LEN];
516 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
517 long long templ;
518 int val;
519
520 /* Warn about slot numbers greater than 2. We used to generate
521 an error here on the assumption that the user entered an invalid
522 address. But, sometimes GDB itself requests an invalid address.
523 This can (easily) happen when execution stops in a function for
524 which there are no symbols. The prologue scanner will attempt to
525 find the beginning of the function - if the nearest symbol
526 happens to not be aligned on a bundle boundary (16 bytes), the
527 resulting starting address will cause GDB to think that the slot
528 number is too large.
529
530 So we warn about it and set the slot number to zero. It is
531 not necessarily a fatal condition, particularly if debugging
532 at the assembly language level. */
533 if (slotnum > 2)
534 {
535 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
536 "Using slot 0 instead"));
537 slotnum = 0;
538 }
539
540 addr &= ~0x0f;
541
542 val = target_read_memory (addr, bundle, BUNDLE_LEN);
543
544 if (val != 0)
545 return 0;
546
547 *instr = slotN_contents (bundle, slotnum);
548 templ = extract_bit_field (bundle, 0, 5);
549 *it = template_encoding_table[(int)templ][slotnum];
550
551 if (slotnum == 2 || (slotnum == 1 && *it == L))
552 addr += 16;
553 else
554 addr += (slotnum + 1) * SLOT_MULTIPLIER;
555
556 return addr;
557 }
558
559 /* There are 5 different break instructions (break.i, break.b,
560 break.m, break.f, and break.x), but they all have the same
561 encoding. (The five bit template in the low five bits of the
562 instruction bundle distinguishes one from another.)
563
564 The runtime architecture manual specifies that break instructions
565 used for debugging purposes must have the upper two bits of the 21
566 bit immediate set to a 0 and a 1 respectively. A breakpoint
567 instruction encodes the most significant bit of its 21 bit
568 immediate at bit 36 of the 41 bit instruction. The penultimate msb
569 is at bit 25 which leads to the pattern below.
570
571 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
572 it turns out that 0x80000 was used as the syscall break in the early
573 simulators. So I changed the pattern slightly to do "break.i 0x080001"
574 instead. But that didn't work either (I later found out that this
575 pattern was used by the simulator that I was using.) So I ended up
576 using the pattern seen below.
577
578 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
579 while we need bit-based addressing as the instructions length is 41 bits and
580 we must not modify/corrupt the adjacent slots in the same bundle.
581 Fortunately we may store larger memory incl. the adjacent bits with the
582 original memory content (not the possibly already stored breakpoints there).
583 We need to be careful in ia64_memory_remove_breakpoint to always restore
584 only the specific bits of this instruction ignoring any adjacent stored
585 bits.
586
587 We use the original addressing with the low nibble in the range <0..2> which
588 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
589 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
590 bytes just without these two possibly skipped bytes to not to exceed to the
591 next bundle.
592
593 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
594 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
595 In such case there is no other place where to store
596 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
597 SLOTNUM in ia64_memory_remove_breakpoint.
598
599 There is one special case where we need to be extra careful:
600 L-X instructions, which are instructions that occupy 2 slots
601 (The L part is always in slot 1, and the X part is always in
602 slot 2). We must refuse to insert breakpoints for an address
603 that points at slot 2 of a bundle where an L-X instruction is
604 present, since there is logically no instruction at that address.
605 However, to make things more interesting, the opcode of L-X
606 instructions is located in slot 2. This means that, to insert
607 a breakpoint at an address that points to slot 1, we actually
608 need to write the breakpoint in slot 2! Slot 1 is actually
609 the extended operand, so writing the breakpoint there would not
610 have the desired effect. Another side-effect of this issue
611 is that we need to make sure that the shadow contents buffer
612 does save byte 15 of our instruction bundle (this is the tail
613 end of slot 2, which wouldn't be saved if we were to insert
614 the breakpoint in slot 1).
615
616 ia64 16-byte bundle layout:
617 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
618
619 The current addressing used by the code below:
620 original PC placed_address placed_size required covered
621 == bp_tgt->shadow_len reqd \subset covered
622 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
623 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
624 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
625
626 L-X instructions are treated a little specially, as explained above:
627 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
628
629 `objdump -d' and some other tools show a bit unjustified offsets:
630 original PC byte where starts the instruction objdump offset
631 0xABCDE0 0xABCDE0 0xABCDE0
632 0xABCDE1 0xABCDE5 0xABCDE6
633 0xABCDE2 0xABCDEA 0xABCDEC
634 */
635
636 #define IA64_BREAKPOINT 0x00003333300LL
637
638 static int
639 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
640 struct bp_target_info *bp_tgt)
641 {
642 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
643 gdb_byte bundle[BUNDLE_LEN];
644 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
645 long long instr_breakpoint;
646 int val;
647 int templ;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 scoped_restore restore_memory_0
660 = make_scoped_restore_show_memory_breakpoints (0);
661 val = target_read_memory (addr, bundle, BUNDLE_LEN);
662 if (val != 0)
663 return val;
664
665 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
666 for addressing the SHADOW_CONTENTS placement. */
667 shadow_slotnum = slotnum;
668
669 /* Always cover the last byte of the bundle in case we are inserting
670 a breakpoint on an L-X instruction. */
671 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
672
673 templ = extract_bit_field (bundle, 0, 5);
674 if (template_encoding_table[templ][slotnum] == X)
675 {
676 /* X unit types can only be used in slot 2, and are actually
677 part of a 2-slot L-X instruction. We cannot break at this
678 address, as this is the second half of an instruction that
679 lives in slot 1 of that bundle. */
680 gdb_assert (slotnum == 2);
681 error (_("Can't insert breakpoint for non-existing slot X"));
682 }
683 if (template_encoding_table[templ][slotnum] == L)
684 {
685 /* L unit types can only be used in slot 1. But the associated
686 opcode for that instruction is in slot 2, so bump the slot number
687 accordingly. */
688 gdb_assert (slotnum == 1);
689 slotnum = 2;
690 }
691
692 /* Store the whole bundle, except for the initial skipped bytes by the slot
693 number interpreted as bytes offset in PLACED_ADDRESS. */
694 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
695 bp_tgt->shadow_len);
696
697 /* Re-read the same bundle as above except that, this time, read it in order
698 to compute the new bundle inside which we will be inserting the
699 breakpoint. Therefore, disable the automatic memory restoration from
700 breakpoints while we read our instruction bundle. Otherwise, the general
701 restoration mechanism kicks in and we would possibly remove parts of the
702 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
703 the real breakpoint instruction bits region. */
704 scoped_restore restore_memory_1
705 = make_scoped_restore_show_memory_breakpoints (1);
706 val = target_read_memory (addr, bundle, BUNDLE_LEN);
707 if (val != 0)
708 return val;
709
710 /* Breakpoints already present in the code will get detected and not get
711 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
712 location cannot induce the internal error as they are optimized into
713 a single instance by update_global_location_list. */
714 instr_breakpoint = slotN_contents (bundle, slotnum);
715 if (instr_breakpoint == IA64_BREAKPOINT)
716 internal_error (__FILE__, __LINE__,
717 _("Address %s already contains a breakpoint."),
718 paddress (gdbarch, bp_tgt->placed_address));
719 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
720
721 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
722 bp_tgt->shadow_len);
723
724 return val;
725 }
726
727 static int
728 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
729 struct bp_target_info *bp_tgt)
730 {
731 CORE_ADDR addr = bp_tgt->placed_address;
732 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
733 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
734 long long instr_breakpoint, instr_saved;
735 int val;
736 int templ;
737
738 addr &= ~0x0f;
739
740 /* Disable the automatic memory restoration from breakpoints while
741 we read our instruction bundle. Otherwise, the general restoration
742 mechanism kicks in and we would possibly remove parts of the adjacent
743 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
744 breakpoint instruction bits region. */
745 scoped_restore restore_memory_1
746 = make_scoped_restore_show_memory_breakpoints (1);
747 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
748 if (val != 0)
749 return val;
750
751 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
752 for addressing the SHADOW_CONTENTS placement. */
753 shadow_slotnum = slotnum;
754
755 templ = extract_bit_field (bundle_mem, 0, 5);
756 if (template_encoding_table[templ][slotnum] == X)
757 {
758 /* X unit types can only be used in slot 2, and are actually
759 part of a 2-slot L-X instruction. We refuse to insert
760 breakpoints at this address, so there should be no reason
761 for us attempting to remove one there, except if the program's
762 code somehow got modified in memory. */
763 gdb_assert (slotnum == 2);
764 warning (_("Cannot remove breakpoint at address %s from non-existing "
765 "X-type slot, memory has changed underneath"),
766 paddress (gdbarch, bp_tgt->placed_address));
767 return -1;
768 }
769 if (template_encoding_table[templ][slotnum] == L)
770 {
771 /* L unit types can only be used in slot 1. But the breakpoint
772 was actually saved using slot 2, so update the slot number
773 accordingly. */
774 gdb_assert (slotnum == 1);
775 slotnum = 2;
776 }
777
778 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
779
780 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
781 if (instr_breakpoint != IA64_BREAKPOINT)
782 {
783 warning (_("Cannot remove breakpoint at address %s, "
784 "no break instruction at such address."),
785 paddress (gdbarch, bp_tgt->placed_address));
786 return -1;
787 }
788
789 /* Extract the original saved instruction from SLOTNUM normalizing its
790 bit-shift for INSTR_SAVED. */
791 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
792 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
793 bp_tgt->shadow_len);
794 instr_saved = slotN_contents (bundle_saved, slotnum);
795
796 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
797 and not any of the other ones that are stored in SHADOW_CONTENTS. */
798 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
799 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
800
801 return val;
802 }
803
804 /* Implement the breakpoint_kind_from_pc gdbarch method. */
805
806 static int
807 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
808 {
809 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
810 return 0;
811 }
812
813 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
814 instruction slots ranges are bit-granular (41 bits) we have to provide an
815 extended range as described for ia64_memory_insert_breakpoint. We also take
816 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
817 make a match for permanent breakpoints. */
818
819 static const gdb_byte *
820 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
821 CORE_ADDR *pcptr, int *lenptr)
822 {
823 CORE_ADDR addr = *pcptr;
824 static gdb_byte bundle[BUNDLE_LEN];
825 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
826 long long instr_fetched;
827 int val;
828 int templ;
829
830 if (slotnum > 2)
831 error (_("Can't insert breakpoint for slot numbers greater than 2."));
832
833 addr &= ~0x0f;
834
835 /* Enable the automatic memory restoration from breakpoints while
836 we read our instruction bundle to match bp_loc_is_permanent. */
837 {
838 scoped_restore restore_memory_0
839 = make_scoped_restore_show_memory_breakpoints (0);
840 val = target_read_memory (addr, bundle, BUNDLE_LEN);
841 }
842
843 /* The memory might be unreachable. This can happen, for instance,
844 when the user inserts a breakpoint at an invalid address. */
845 if (val != 0)
846 return NULL;
847
848 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
849 for addressing the SHADOW_CONTENTS placement. */
850 shadow_slotnum = slotnum;
851
852 /* Cover always the last byte of the bundle for the L-X slot case. */
853 *lenptr = BUNDLE_LEN - shadow_slotnum;
854
855 /* Check for L type instruction in slot 1, if present then bump up the slot
856 number to the slot 2. */
857 templ = extract_bit_field (bundle, 0, 5);
858 if (template_encoding_table[templ][slotnum] == X)
859 {
860 gdb_assert (slotnum == 2);
861 error (_("Can't insert breakpoint for non-existing slot X"));
862 }
863 if (template_encoding_table[templ][slotnum] == L)
864 {
865 gdb_assert (slotnum == 1);
866 slotnum = 2;
867 }
868
869 /* A break instruction has its all its opcode bits cleared except for
870 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
871 we should not touch the L slot - the upper 41 bits of the parameter. */
872 instr_fetched = slotN_contents (bundle, slotnum);
873 instr_fetched &= 0x1003ffffc0LL;
874 replace_slotN_contents (bundle, instr_fetched, slotnum);
875
876 return bundle + shadow_slotnum;
877 }
878
879 static CORE_ADDR
880 ia64_read_pc (readable_regcache *regcache)
881 {
882 ULONGEST psr_value, pc_value;
883 int slot_num;
884
885 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
886 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
887 slot_num = (psr_value >> 41) & 3;
888
889 return pc_value | (slot_num * SLOT_MULTIPLIER);
890 }
891
892 void
893 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
894 {
895 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
896 ULONGEST psr_value;
897
898 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
899 psr_value &= ~(3LL << 41);
900 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
901
902 new_pc &= ~0xfLL;
903
904 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
905 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
906 }
907
908 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
909
910 /* Returns the address of the slot that's NSLOTS slots away from
911 the address ADDR. NSLOTS may be positive or negative. */
912 static CORE_ADDR
913 rse_address_add(CORE_ADDR addr, int nslots)
914 {
915 CORE_ADDR new_addr;
916 int mandatory_nat_slots = nslots / 63;
917 int direction = nslots < 0 ? -1 : 1;
918
919 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
920
921 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
922 new_addr += 8 * direction;
923
924 if (IS_NaT_COLLECTION_ADDR(new_addr))
925 new_addr += 8 * direction;
926
927 return new_addr;
928 }
929
930 static enum register_status
931 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
932 int regnum, gdb_byte *buf)
933 {
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935 enum register_status status;
936
937 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
938 {
939 #ifdef HAVE_LIBUNWIND_IA64_H
940 /* First try and use the libunwind special reg accessor,
941 otherwise fallback to standard logic. */
942 if (!libunwind_is_initialized ()
943 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
944 #endif
945 {
946 /* The fallback position is to assume that r32-r127 are
947 found sequentially in memory starting at $bof. This
948 isn't always true, but without libunwind, this is the
949 best we can do. */
950 ULONGEST cfm;
951 ULONGEST bsp;
952 CORE_ADDR reg;
953
954 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
955 if (status != REG_VALID)
956 return status;
957
958 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
959 if (status != REG_VALID)
960 return status;
961
962 /* The bsp points at the end of the register frame so we
963 subtract the size of frame from it to get start of
964 register frame. */
965 bsp = rse_address_add (bsp, -(cfm & 0x7f));
966
967 if ((cfm & 0x7f) > regnum - V32_REGNUM)
968 {
969 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
970 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
971 store_unsigned_integer (buf, register_size (gdbarch, regnum),
972 byte_order, reg);
973 }
974 else
975 store_unsigned_integer (buf, register_size (gdbarch, regnum),
976 byte_order, 0);
977 }
978 }
979 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
980 {
981 ULONGEST unatN_val;
982 ULONGEST unat;
983
984 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
985 if (status != REG_VALID)
986 return status;
987 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
988 store_unsigned_integer (buf, register_size (gdbarch, regnum),
989 byte_order, unatN_val);
990 }
991 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
992 {
993 ULONGEST natN_val = 0;
994 ULONGEST bsp;
995 ULONGEST cfm;
996 CORE_ADDR gr_addr = 0;
997
998 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
999 if (status != REG_VALID)
1000 return status;
1001
1002 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1003 if (status != REG_VALID)
1004 return status;
1005
1006 /* The bsp points at the end of the register frame so we
1007 subtract the size of frame from it to get start of register frame. */
1008 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1009
1010 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1011 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1012
1013 if (gr_addr != 0)
1014 {
1015 /* Compute address of nat collection bits. */
1016 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1017 ULONGEST nat_collection;
1018 int nat_bit;
1019 /* If our nat collection address is bigger than bsp, we have to get
1020 the nat collection from rnat. Otherwise, we fetch the nat
1021 collection from the computed address. */
1022 if (nat_addr >= bsp)
1023 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1024 else
1025 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1026 nat_bit = (gr_addr >> 3) & 0x3f;
1027 natN_val = (nat_collection >> nat_bit) & 1;
1028 }
1029
1030 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1031 byte_order, natN_val);
1032 }
1033 else if (regnum == VBOF_REGNUM)
1034 {
1035 /* A virtual register frame start is provided for user convenience.
1036 It can be calculated as the bsp - sof (sizeof frame). */
1037 ULONGEST bsp, vbsp;
1038 ULONGEST cfm;
1039
1040 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1041 if (status != REG_VALID)
1042 return status;
1043 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1044 if (status != REG_VALID)
1045 return status;
1046
1047 /* The bsp points at the end of the register frame so we
1048 subtract the size of frame from it to get beginning of frame. */
1049 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1050 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1051 byte_order, vbsp);
1052 }
1053 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1054 {
1055 ULONGEST pr;
1056 ULONGEST cfm;
1057 ULONGEST prN_val;
1058
1059 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1060 if (status != REG_VALID)
1061 return status;
1062 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1063 if (status != REG_VALID)
1064 return status;
1065
1066 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1067 {
1068 /* Fetch predicate register rename base from current frame
1069 marker for this frame. */
1070 int rrb_pr = (cfm >> 32) & 0x3f;
1071
1072 /* Adjust the register number to account for register rotation. */
1073 regnum = VP16_REGNUM
1074 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1075 }
1076 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1077 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1078 byte_order, prN_val);
1079 }
1080 else
1081 memset (buf, 0, register_size (gdbarch, regnum));
1082
1083 return REG_VALID;
1084 }
1085
1086 static void
1087 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1088 int regnum, const gdb_byte *buf)
1089 {
1090 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1091
1092 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1093 {
1094 ULONGEST bsp;
1095 ULONGEST cfm;
1096 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1097 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1098
1099 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1100
1101 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1102 {
1103 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1104 write_memory (reg_addr, buf, 8);
1105 }
1106 }
1107 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1108 {
1109 ULONGEST unatN_val, unat, unatN_mask;
1110 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1111 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1112 regnum),
1113 byte_order);
1114 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1115 if (unatN_val == 0)
1116 unat &= ~unatN_mask;
1117 else if (unatN_val == 1)
1118 unat |= unatN_mask;
1119 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1120 }
1121 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1122 {
1123 ULONGEST natN_val;
1124 ULONGEST bsp;
1125 ULONGEST cfm;
1126 CORE_ADDR gr_addr = 0;
1127 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1128 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1129
1130 /* The bsp points at the end of the register frame so we
1131 subtract the size of frame from it to get start of register frame. */
1132 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1133
1134 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1135 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1136
1137 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1138 regnum),
1139 byte_order);
1140
1141 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1142 {
1143 /* Compute address of nat collection bits. */
1144 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1145 CORE_ADDR nat_collection;
1146 int natN_bit = (gr_addr >> 3) & 0x3f;
1147 ULONGEST natN_mask = (1LL << natN_bit);
1148 /* If our nat collection address is bigger than bsp, we have to get
1149 the nat collection from rnat. Otherwise, we fetch the nat
1150 collection from the computed address. */
1151 if (nat_addr >= bsp)
1152 {
1153 regcache_cooked_read_unsigned (regcache,
1154 IA64_RNAT_REGNUM,
1155 &nat_collection);
1156 if (natN_val)
1157 nat_collection |= natN_mask;
1158 else
1159 nat_collection &= ~natN_mask;
1160 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1161 nat_collection);
1162 }
1163 else
1164 {
1165 gdb_byte nat_buf[8];
1166 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1167 if (natN_val)
1168 nat_collection |= natN_mask;
1169 else
1170 nat_collection &= ~natN_mask;
1171 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1172 byte_order, nat_collection);
1173 write_memory (nat_addr, nat_buf, 8);
1174 }
1175 }
1176 }
1177 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1178 {
1179 ULONGEST pr;
1180 ULONGEST cfm;
1181 ULONGEST prN_val;
1182 ULONGEST prN_mask;
1183
1184 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1185 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1186
1187 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1188 {
1189 /* Fetch predicate register rename base from current frame
1190 marker for this frame. */
1191 int rrb_pr = (cfm >> 32) & 0x3f;
1192
1193 /* Adjust the register number to account for register rotation. */
1194 regnum = VP16_REGNUM
1195 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1196 }
1197 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1198 byte_order);
1199 prN_mask = (1LL << (regnum - VP0_REGNUM));
1200 if (prN_val == 0)
1201 pr &= ~prN_mask;
1202 else if (prN_val == 1)
1203 pr |= prN_mask;
1204 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1205 }
1206 }
1207
1208 /* The ia64 needs to convert between various ieee floating-point formats
1209 and the special ia64 floating point register format. */
1210
1211 static int
1212 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1213 {
1214 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1215 && type->code () == TYPE_CODE_FLT
1216 && type != ia64_ext_type (gdbarch));
1217 }
1218
1219 static int
1220 ia64_register_to_value (struct frame_info *frame, int regnum,
1221 struct type *valtype, gdb_byte *out,
1222 int *optimizedp, int *unavailablep)
1223 {
1224 struct gdbarch *gdbarch = get_frame_arch (frame);
1225 gdb_byte in[IA64_FP_REGISTER_SIZE];
1226
1227 /* Convert to TYPE. */
1228 if (!get_frame_register_bytes (frame, regnum, 0,
1229 gdb::make_array_view (in,
1230 register_size (gdbarch,
1231 regnum)),
1232 optimizedp, unavailablep))
1233 return 0;
1234
1235 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1236 *optimizedp = *unavailablep = 0;
1237 return 1;
1238 }
1239
1240 static void
1241 ia64_value_to_register (struct frame_info *frame, int regnum,
1242 struct type *valtype, const gdb_byte *in)
1243 {
1244 struct gdbarch *gdbarch = get_frame_arch (frame);
1245 gdb_byte out[IA64_FP_REGISTER_SIZE];
1246 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1247 put_frame_register (frame, regnum, out);
1248 }
1249
1250
1251 /* Limit the number of skipped non-prologue instructions since examining
1252 of the prologue is expensive. */
1253 static int max_skip_non_prologue_insns = 40;
1254
1255 /* Given PC representing the starting address of a function, and
1256 LIM_PC which is the (sloppy) limit to which to scan when looking
1257 for a prologue, attempt to further refine this limit by using
1258 the line data in the symbol table. If successful, a better guess
1259 on where the prologue ends is returned, otherwise the previous
1260 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1261 which will be set to indicate whether the returned limit may be
1262 used with no further scanning in the event that the function is
1263 frameless. */
1264
1265 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1266 superseded by skip_prologue_using_sal. */
1267
1268 static CORE_ADDR
1269 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1270 {
1271 struct symtab_and_line prologue_sal;
1272 CORE_ADDR start_pc = pc;
1273 CORE_ADDR end_pc;
1274
1275 /* The prologue can not possibly go past the function end itself,
1276 so we can already adjust LIM_PC accordingly. */
1277 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1278 lim_pc = end_pc;
1279
1280 /* Start off not trusting the limit. */
1281 *trust_limit = 0;
1282
1283 prologue_sal = find_pc_line (pc, 0);
1284 if (prologue_sal.line != 0)
1285 {
1286 int i;
1287 CORE_ADDR addr = prologue_sal.end;
1288
1289 /* Handle the case in which compiler's optimizer/scheduler
1290 has moved instructions into the prologue. We scan ahead
1291 in the function looking for address ranges whose corresponding
1292 line number is less than or equal to the first one that we
1293 found for the function. (It can be less than when the
1294 scheduler puts a body instruction before the first prologue
1295 instruction.) */
1296 for (i = 2 * max_skip_non_prologue_insns;
1297 i > 0 && (lim_pc == 0 || addr < lim_pc);
1298 i--)
1299 {
1300 struct symtab_and_line sal;
1301
1302 sal = find_pc_line (addr, 0);
1303 if (sal.line == 0)
1304 break;
1305 if (sal.line <= prologue_sal.line
1306 && sal.symtab == prologue_sal.symtab)
1307 {
1308 prologue_sal = sal;
1309 }
1310 addr = sal.end;
1311 }
1312
1313 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1314 {
1315 lim_pc = prologue_sal.end;
1316 if (start_pc == get_pc_function_start (lim_pc))
1317 *trust_limit = 1;
1318 }
1319 }
1320 return lim_pc;
1321 }
1322
1323 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1324 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1325 || (14 <= (_regnum_) && (_regnum_) <= 31))
1326 #define imm9(_instr_) \
1327 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1328 | (((_instr_) & 0x00008000000LL) >> 20) \
1329 | (((_instr_) & 0x00000001fc0LL) >> 6))
1330
1331 /* Allocate and initialize a frame cache. */
1332
1333 static struct ia64_frame_cache *
1334 ia64_alloc_frame_cache (void)
1335 {
1336 struct ia64_frame_cache *cache;
1337 int i;
1338
1339 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1340
1341 /* Base address. */
1342 cache->base = 0;
1343 cache->pc = 0;
1344 cache->cfm = 0;
1345 cache->prev_cfm = 0;
1346 cache->sof = 0;
1347 cache->sol = 0;
1348 cache->sor = 0;
1349 cache->bsp = 0;
1350 cache->fp_reg = 0;
1351 cache->frameless = 1;
1352
1353 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1354 cache->saved_regs[i] = 0;
1355
1356 return cache;
1357 }
1358
1359 static CORE_ADDR
1360 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1361 struct frame_info *this_frame,
1362 struct ia64_frame_cache *cache)
1363 {
1364 CORE_ADDR next_pc;
1365 CORE_ADDR last_prologue_pc = pc;
1366 instruction_type it;
1367 long long instr;
1368 int cfm_reg = 0;
1369 int ret_reg = 0;
1370 int fp_reg = 0;
1371 int unat_save_reg = 0;
1372 int pr_save_reg = 0;
1373 int mem_stack_frame_size = 0;
1374 int spill_reg = 0;
1375 CORE_ADDR spill_addr = 0;
1376 char instores[8];
1377 char infpstores[8];
1378 char reg_contents[256];
1379 int trust_limit;
1380 int frameless = 1;
1381 int i;
1382 CORE_ADDR addr;
1383 gdb_byte buf[8];
1384 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1385
1386 memset (instores, 0, sizeof instores);
1387 memset (infpstores, 0, sizeof infpstores);
1388 memset (reg_contents, 0, sizeof reg_contents);
1389
1390 if (cache->after_prologue != 0
1391 && cache->after_prologue <= lim_pc)
1392 return cache->after_prologue;
1393
1394 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1395 next_pc = fetch_instruction (pc, &it, &instr);
1396
1397 /* We want to check if we have a recognizable function start before we
1398 look ahead for a prologue. */
1399 if (pc < lim_pc && next_pc
1400 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1401 {
1402 /* alloc - start of a regular function. */
1403 int sol_bits = (int) ((instr & 0x00007f00000LL) >> 20);
1404 int sof_bits = (int) ((instr & 0x000000fe000LL) >> 13);
1405 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1406
1407 /* Verify that the current cfm matches what we think is the
1408 function start. If we have somehow jumped within a function,
1409 we do not want to interpret the prologue and calculate the
1410 addresses of various registers such as the return address.
1411 We will instead treat the frame as frameless. */
1412 if (!this_frame ||
1413 (sof_bits == (cache->cfm & 0x7f) &&
1414 sol_bits == ((cache->cfm >> 7) & 0x7f)))
1415 frameless = 0;
1416
1417 cfm_reg = rN;
1418 last_prologue_pc = next_pc;
1419 pc = next_pc;
1420 }
1421 else
1422 {
1423 /* Look for a leaf routine. */
1424 if (pc < lim_pc && next_pc
1425 && (it == I || it == M)
1426 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1427 {
1428 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1429 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1430 | ((instr & 0x001f8000000LL) >> 20)
1431 | ((instr & 0x000000fe000LL) >> 13));
1432 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1433 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1434 int qp = (int) (instr & 0x0000000003fLL);
1435 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1436 {
1437 /* mov r2, r12 - beginning of leaf routine. */
1438 fp_reg = rN;
1439 last_prologue_pc = next_pc;
1440 }
1441 }
1442
1443 /* If we don't recognize a regular function or leaf routine, we are
1444 done. */
1445 if (!fp_reg)
1446 {
1447 pc = lim_pc;
1448 if (trust_limit)
1449 last_prologue_pc = lim_pc;
1450 }
1451 }
1452
1453 /* Loop, looking for prologue instructions, keeping track of
1454 where preserved registers were spilled. */
1455 while (pc < lim_pc)
1456 {
1457 next_pc = fetch_instruction (pc, &it, &instr);
1458 if (next_pc == 0)
1459 break;
1460
1461 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1462 {
1463 /* Exit loop upon hitting a non-nop branch instruction. */
1464 if (trust_limit)
1465 lim_pc = pc;
1466 break;
1467 }
1468 else if (((instr & 0x3fLL) != 0LL) &&
1469 (frameless || ret_reg != 0))
1470 {
1471 /* Exit loop upon hitting a predicated instruction if
1472 we already have the return register or if we are frameless. */
1473 if (trust_limit)
1474 lim_pc = pc;
1475 break;
1476 }
1477 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1478 {
1479 /* Move from BR */
1480 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1481 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1482 int qp = (int) (instr & 0x0000000003f);
1483
1484 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1485 {
1486 ret_reg = rN;
1487 last_prologue_pc = next_pc;
1488 }
1489 }
1490 else if ((it == I || it == M)
1491 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1492 {
1493 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1494 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1495 | ((instr & 0x001f8000000LL) >> 20)
1496 | ((instr & 0x000000fe000LL) >> 13));
1497 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1498 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1499 int qp = (int) (instr & 0x0000000003fLL);
1500
1501 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1502 {
1503 /* mov rN, r12 */
1504 fp_reg = rN;
1505 last_prologue_pc = next_pc;
1506 }
1507 else if (qp == 0 && rN == 12 && rM == 12)
1508 {
1509 /* adds r12, -mem_stack_frame_size, r12 */
1510 mem_stack_frame_size -= imm;
1511 last_prologue_pc = next_pc;
1512 }
1513 else if (qp == 0 && rN == 2
1514 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1515 {
1516 CORE_ADDR saved_sp = 0;
1517 /* adds r2, spilloffset, rFramePointer
1518 or
1519 adds r2, spilloffset, r12
1520
1521 Get ready for stf.spill or st8.spill instructions.
1522 The address to start spilling at is loaded into r2.
1523 FIXME: Why r2? That's what gcc currently uses; it
1524 could well be different for other compilers. */
1525
1526 /* Hmm... whether or not this will work will depend on
1527 where the pc is. If it's still early in the prologue
1528 this'll be wrong. FIXME */
1529 if (this_frame)
1530 saved_sp = get_frame_register_unsigned (this_frame,
1531 sp_regnum);
1532 spill_addr = saved_sp
1533 + (rM == 12 ? 0 : mem_stack_frame_size)
1534 + imm;
1535 spill_reg = rN;
1536 last_prologue_pc = next_pc;
1537 }
1538 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1539 rN < 256 && imm == 0)
1540 {
1541 /* mov rN, rM where rM is an input register. */
1542 reg_contents[rN] = rM;
1543 last_prologue_pc = next_pc;
1544 }
1545 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1546 rM == 2)
1547 {
1548 /* mov r12, r2 */
1549 last_prologue_pc = next_pc;
1550 break;
1551 }
1552 }
1553 else if (it == M
1554 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1555 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1556 {
1557 /* stf.spill [rN] = fM, imm9
1558 or
1559 stf.spill [rN] = fM */
1560
1561 int imm = imm9(instr);
1562 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1563 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1564 int qp = (int) (instr & 0x0000000003fLL);
1565 if (qp == 0 && rN == spill_reg && spill_addr != 0
1566 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1567 {
1568 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1569
1570 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1571 spill_addr += imm;
1572 else
1573 spill_addr = 0; /* last one; must be done. */
1574 last_prologue_pc = next_pc;
1575 }
1576 }
1577 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1578 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1579 {
1580 /* mov.m rN = arM
1581 or
1582 mov.i rN = arM */
1583
1584 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1585 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1586 int qp = (int) (instr & 0x0000000003fLL);
1587 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1588 {
1589 /* We have something like "mov.m r3 = ar.unat". Remember the
1590 r3 (or whatever) and watch for a store of this register... */
1591 unat_save_reg = rN;
1592 last_prologue_pc = next_pc;
1593 }
1594 }
1595 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1596 {
1597 /* mov rN = pr */
1598 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1599 int qp = (int) (instr & 0x0000000003fLL);
1600 if (qp == 0 && isScratch (rN))
1601 {
1602 pr_save_reg = rN;
1603 last_prologue_pc = next_pc;
1604 }
1605 }
1606 else if (it == M
1607 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1608 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1609 {
1610 /* st8 [rN] = rM
1611 or
1612 st8 [rN] = rM, imm9 */
1613 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1614 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1615 int qp = (int) (instr & 0x0000000003fLL);
1616 int indirect = rM < 256 ? reg_contents[rM] : 0;
1617 if (qp == 0 && rN == spill_reg && spill_addr != 0
1618 && (rM == unat_save_reg || rM == pr_save_reg))
1619 {
1620 /* We've found a spill of either the UNAT register or the PR
1621 register. (Well, not exactly; what we've actually found is
1622 a spill of the register that UNAT or PR was moved to).
1623 Record that fact and move on... */
1624 if (rM == unat_save_reg)
1625 {
1626 /* Track UNAT register. */
1627 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1628 unat_save_reg = 0;
1629 }
1630 else
1631 {
1632 /* Track PR register. */
1633 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1634 pr_save_reg = 0;
1635 }
1636 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1637 /* st8 [rN] = rM, imm9 */
1638 spill_addr += imm9(instr);
1639 else
1640 spill_addr = 0; /* Must be done spilling. */
1641 last_prologue_pc = next_pc;
1642 }
1643 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1644 {
1645 /* Allow up to one store of each input register. */
1646 instores[rM-32] = 1;
1647 last_prologue_pc = next_pc;
1648 }
1649 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1650 !instores[indirect-32])
1651 {
1652 /* Allow an indirect store of an input register. */
1653 instores[indirect-32] = 1;
1654 last_prologue_pc = next_pc;
1655 }
1656 }
1657 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1658 {
1659 /* One of
1660 st1 [rN] = rM
1661 st2 [rN] = rM
1662 st4 [rN] = rM
1663 st8 [rN] = rM
1664 Note that the st8 case is handled in the clause above.
1665
1666 Advance over stores of input registers. One store per input
1667 register is permitted. */
1668 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1669 int qp = (int) (instr & 0x0000000003fLL);
1670 int indirect = rM < 256 ? reg_contents[rM] : 0;
1671 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1672 {
1673 instores[rM-32] = 1;
1674 last_prologue_pc = next_pc;
1675 }
1676 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1677 !instores[indirect-32])
1678 {
1679 /* Allow an indirect store of an input register. */
1680 instores[indirect-32] = 1;
1681 last_prologue_pc = next_pc;
1682 }
1683 }
1684 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1685 {
1686 /* Either
1687 stfs [rN] = fM
1688 or
1689 stfd [rN] = fM
1690
1691 Advance over stores of floating point input registers. Again
1692 one store per register is permitted. */
1693 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1694 int qp = (int) (instr & 0x0000000003fLL);
1695 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1696 {
1697 infpstores[fM-8] = 1;
1698 last_prologue_pc = next_pc;
1699 }
1700 }
1701 else if (it == M
1702 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1703 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1704 {
1705 /* st8.spill [rN] = rM
1706 or
1707 st8.spill [rN] = rM, imm9 */
1708 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1709 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1710 int qp = (int) (instr & 0x0000000003fLL);
1711 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1712 {
1713 /* We've found a spill of one of the preserved general purpose
1714 regs. Record the spill address and advance the spill
1715 register if appropriate. */
1716 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1717 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1718 /* st8.spill [rN] = rM, imm9 */
1719 spill_addr += imm9(instr);
1720 else
1721 spill_addr = 0; /* Done spilling. */
1722 last_prologue_pc = next_pc;
1723 }
1724 }
1725
1726 pc = next_pc;
1727 }
1728
1729 /* If not frameless and we aren't called by skip_prologue, then we need
1730 to calculate registers for the previous frame which will be needed
1731 later. */
1732
1733 if (!frameless && this_frame)
1734 {
1735 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1736 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1737
1738 /* Extract the size of the rotating portion of the stack
1739 frame and the register rename base from the current
1740 frame marker. */
1741 cfm = cache->cfm;
1742 sor = cache->sor;
1743 sof = cache->sof;
1744 sol = cache->sol;
1745 rrb_gr = (cfm >> 18) & 0x7f;
1746
1747 /* Find the bof (beginning of frame). */
1748 bof = rse_address_add (cache->bsp, -sof);
1749
1750 for (i = 0, addr = bof;
1751 i < sof;
1752 i++, addr += 8)
1753 {
1754 if (IS_NaT_COLLECTION_ADDR (addr))
1755 {
1756 addr += 8;
1757 }
1758 if (i+32 == cfm_reg)
1759 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1760 if (i+32 == ret_reg)
1761 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1762 if (i+32 == fp_reg)
1763 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1764 }
1765
1766 /* For the previous argument registers we require the previous bof.
1767 If we can't find the previous cfm, then we can do nothing. */
1768 cfm = 0;
1769 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1770 {
1771 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1772 8, byte_order);
1773 }
1774 else if (cfm_reg != 0)
1775 {
1776 get_frame_register (this_frame, cfm_reg, buf);
1777 cfm = extract_unsigned_integer (buf, 8, byte_order);
1778 }
1779 cache->prev_cfm = cfm;
1780
1781 if (cfm != 0)
1782 {
1783 sor = ((cfm >> 14) & 0xf) * 8;
1784 sof = (cfm & 0x7f);
1785 sol = (cfm >> 7) & 0x7f;
1786 rrb_gr = (cfm >> 18) & 0x7f;
1787
1788 /* The previous bof only requires subtraction of the sol (size of
1789 locals) due to the overlap between output and input of
1790 subsequent frames. */
1791 bof = rse_address_add (bof, -sol);
1792
1793 for (i = 0, addr = bof;
1794 i < sof;
1795 i++, addr += 8)
1796 {
1797 if (IS_NaT_COLLECTION_ADDR (addr))
1798 {
1799 addr += 8;
1800 }
1801 if (i < sor)
1802 cache->saved_regs[IA64_GR32_REGNUM
1803 + ((i + (sor - rrb_gr)) % sor)]
1804 = addr;
1805 else
1806 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1807 }
1808
1809 }
1810 }
1811
1812 /* Try and trust the lim_pc value whenever possible. */
1813 if (trust_limit && lim_pc >= last_prologue_pc)
1814 last_prologue_pc = lim_pc;
1815
1816 cache->frameless = frameless;
1817 cache->after_prologue = last_prologue_pc;
1818 cache->mem_stack_frame_size = mem_stack_frame_size;
1819 cache->fp_reg = fp_reg;
1820
1821 return last_prologue_pc;
1822 }
1823
1824 CORE_ADDR
1825 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1826 {
1827 struct ia64_frame_cache cache;
1828 cache.base = 0;
1829 cache.after_prologue = 0;
1830 cache.cfm = 0;
1831 cache.bsp = 0;
1832
1833 /* Call examine_prologue with - as third argument since we don't
1834 have a next frame pointer to send. */
1835 return examine_prologue (pc, pc+1024, 0, &cache);
1836 }
1837
1838
1839 /* Normal frames. */
1840
1841 static struct ia64_frame_cache *
1842 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1843 {
1844 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1845 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1846 struct ia64_frame_cache *cache;
1847 gdb_byte buf[8];
1848 CORE_ADDR cfm;
1849
1850 if (*this_cache)
1851 return (struct ia64_frame_cache *) *this_cache;
1852
1853 cache = ia64_alloc_frame_cache ();
1854 *this_cache = cache;
1855
1856 get_frame_register (this_frame, sp_regnum, buf);
1857 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1858
1859 /* We always want the bsp to point to the end of frame.
1860 This way, we can always get the beginning of frame (bof)
1861 by subtracting frame size. */
1862 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1863 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1864
1865 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1866
1867 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1868 cfm = extract_unsigned_integer (buf, 8, byte_order);
1869
1870 cache->sof = (cfm & 0x7f);
1871 cache->sol = (cfm >> 7) & 0x7f;
1872 cache->sor = ((cfm >> 14) & 0xf) * 8;
1873
1874 cache->cfm = cfm;
1875
1876 cache->pc = get_frame_func (this_frame);
1877
1878 if (cache->pc != 0)
1879 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1880
1881 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1882
1883 return cache;
1884 }
1885
1886 static void
1887 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1888 struct frame_id *this_id)
1889 {
1890 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1891 struct ia64_frame_cache *cache =
1892 ia64_frame_cache (this_frame, this_cache);
1893
1894 /* If outermost frame, mark with null frame id. */
1895 if (cache->base != 0)
1896 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1897 if (gdbarch_debug >= 1)
1898 fprintf_unfiltered (gdb_stdlog,
1899 "regular frame id: code %s, stack %s, "
1900 "special %s, this_frame %s\n",
1901 paddress (gdbarch, this_id->code_addr),
1902 paddress (gdbarch, this_id->stack_addr),
1903 paddress (gdbarch, cache->bsp),
1904 host_address_to_string (this_frame));
1905 }
1906
1907 static struct value *
1908 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1909 int regnum)
1910 {
1911 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1912 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1913 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1914 gdb_byte buf[8];
1915
1916 gdb_assert (regnum >= 0);
1917
1918 if (!target_has_registers ())
1919 error (_("No registers."));
1920
1921 if (regnum == gdbarch_sp_regnum (gdbarch))
1922 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1923
1924 else if (regnum == IA64_BSP_REGNUM)
1925 {
1926 struct value *val;
1927 CORE_ADDR prev_cfm, bsp, prev_bsp;
1928
1929 /* We want to calculate the previous bsp as the end of the previous
1930 register stack frame. This corresponds to what the hardware bsp
1931 register will be if we pop the frame back which is why we might
1932 have been called. We know the beginning of the current frame is
1933 cache->bsp - cache->sof. This value in the previous frame points
1934 to the start of the output registers. We can calculate the end of
1935 that frame by adding the size of output:
1936 (sof (size of frame) - sol (size of locals)). */
1937 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1938 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1939 8, byte_order);
1940 bsp = rse_address_add (cache->bsp, -(cache->sof));
1941 prev_bsp =
1942 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1943
1944 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1945 }
1946
1947 else if (regnum == IA64_CFM_REGNUM)
1948 {
1949 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1950
1951 if (addr != 0)
1952 return frame_unwind_got_memory (this_frame, regnum, addr);
1953
1954 if (cache->prev_cfm)
1955 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1956
1957 if (cache->frameless)
1958 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1959 IA64_PFS_REGNUM);
1960 return frame_unwind_got_register (this_frame, regnum, 0);
1961 }
1962
1963 else if (regnum == IA64_VFP_REGNUM)
1964 {
1965 /* If the function in question uses an automatic register (r32-r127)
1966 for the frame pointer, it'll be found by ia64_find_saved_register()
1967 above. If the function lacks one of these frame pointers, we can
1968 still provide a value since we know the size of the frame. */
1969 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1970 }
1971
1972 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1973 {
1974 struct value *pr_val;
1975 ULONGEST prN;
1976
1977 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1978 IA64_PR_REGNUM);
1979 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1980 {
1981 /* Fetch predicate register rename base from current frame
1982 marker for this frame. */
1983 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1984
1985 /* Adjust the register number to account for register rotation. */
1986 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1987 }
1988 prN = extract_bit_field (value_contents_all (pr_val),
1989 regnum - VP0_REGNUM, 1);
1990 return frame_unwind_got_constant (this_frame, regnum, prN);
1991 }
1992
1993 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1994 {
1995 struct value *unat_val;
1996 ULONGEST unatN;
1997 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1998 IA64_UNAT_REGNUM);
1999 unatN = extract_bit_field (value_contents_all (unat_val),
2000 regnum - IA64_NAT0_REGNUM, 1);
2001 return frame_unwind_got_constant (this_frame, regnum, unatN);
2002 }
2003
2004 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2005 {
2006 int natval = 0;
2007 /* Find address of general register corresponding to nat bit we're
2008 interested in. */
2009 CORE_ADDR gr_addr;
2010
2011 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2012
2013 if (gr_addr != 0)
2014 {
2015 /* Compute address of nat collection bits. */
2016 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2017 CORE_ADDR bsp;
2018 CORE_ADDR nat_collection;
2019 int nat_bit;
2020
2021 /* If our nat collection address is bigger than bsp, we have to get
2022 the nat collection from rnat. Otherwise, we fetch the nat
2023 collection from the computed address. */
2024 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2025 bsp = extract_unsigned_integer (buf, 8, byte_order);
2026 if (nat_addr >= bsp)
2027 {
2028 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2029 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2030 }
2031 else
2032 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2033 nat_bit = (gr_addr >> 3) & 0x3f;
2034 natval = (nat_collection >> nat_bit) & 1;
2035 }
2036
2037 return frame_unwind_got_constant (this_frame, regnum, natval);
2038 }
2039
2040 else if (regnum == IA64_IP_REGNUM)
2041 {
2042 CORE_ADDR pc = 0;
2043 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2044
2045 if (addr != 0)
2046 {
2047 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2048 pc = extract_unsigned_integer (buf, 8, byte_order);
2049 }
2050 else if (cache->frameless)
2051 {
2052 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2053 pc = extract_unsigned_integer (buf, 8, byte_order);
2054 }
2055 pc &= ~0xf;
2056 return frame_unwind_got_constant (this_frame, regnum, pc);
2057 }
2058
2059 else if (regnum == IA64_PSR_REGNUM)
2060 {
2061 /* We don't know how to get the complete previous PSR, but we need it
2062 for the slot information when we unwind the pc (pc is formed of IP
2063 register plus slot information from PSR). To get the previous
2064 slot information, we mask it off the return address. */
2065 ULONGEST slot_num = 0;
2066 CORE_ADDR pc = 0;
2067 CORE_ADDR psr = 0;
2068 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2069
2070 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2071 psr = extract_unsigned_integer (buf, 8, byte_order);
2072
2073 if (addr != 0)
2074 {
2075 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2076 pc = extract_unsigned_integer (buf, 8, byte_order);
2077 }
2078 else if (cache->frameless)
2079 {
2080 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2081 pc = extract_unsigned_integer (buf, 8, byte_order);
2082 }
2083 psr &= ~(3LL << 41);
2084 slot_num = pc & 0x3LL;
2085 psr |= (CORE_ADDR)slot_num << 41;
2086 return frame_unwind_got_constant (this_frame, regnum, psr);
2087 }
2088
2089 else if (regnum == IA64_BR0_REGNUM)
2090 {
2091 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2092
2093 if (addr != 0)
2094 return frame_unwind_got_memory (this_frame, regnum, addr);
2095
2096 return frame_unwind_got_constant (this_frame, regnum, 0);
2097 }
2098
2099 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2100 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2101 {
2102 CORE_ADDR addr = 0;
2103
2104 if (regnum >= V32_REGNUM)
2105 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2106 addr = cache->saved_regs[regnum];
2107 if (addr != 0)
2108 return frame_unwind_got_memory (this_frame, regnum, addr);
2109
2110 if (cache->frameless)
2111 {
2112 struct value *reg_val;
2113 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2114
2115 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2116 with the same code above? */
2117 if (regnum >= V32_REGNUM)
2118 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2119 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2120 IA64_CFM_REGNUM);
2121 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2122 8, byte_order);
2123 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2124 IA64_BSP_REGNUM);
2125 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2126 8, byte_order);
2127 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2128
2129 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2130 return frame_unwind_got_memory (this_frame, regnum, addr);
2131 }
2132
2133 return frame_unwind_got_constant (this_frame, regnum, 0);
2134 }
2135
2136 else /* All other registers. */
2137 {
2138 CORE_ADDR addr = 0;
2139
2140 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2141 {
2142 /* Fetch floating point register rename base from current
2143 frame marker for this frame. */
2144 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2145
2146 /* Adjust the floating point register number to account for
2147 register rotation. */
2148 regnum = IA64_FR32_REGNUM
2149 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2150 }
2151
2152 /* If we have stored a memory address, access the register. */
2153 addr = cache->saved_regs[regnum];
2154 if (addr != 0)
2155 return frame_unwind_got_memory (this_frame, regnum, addr);
2156 /* Otherwise, punt and get the current value of the register. */
2157 else
2158 return frame_unwind_got_register (this_frame, regnum, regnum);
2159 }
2160 }
2161
2162 static const struct frame_unwind ia64_frame_unwind =
2163 {
2164 "ia64 prologue",
2165 NORMAL_FRAME,
2166 default_frame_unwind_stop_reason,
2167 &ia64_frame_this_id,
2168 &ia64_frame_prev_register,
2169 NULL,
2170 default_frame_sniffer
2171 };
2172
2173 /* Signal trampolines. */
2174
2175 static void
2176 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2177 struct ia64_frame_cache *cache)
2178 {
2179 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2181
2182 if (tdep->sigcontext_register_address)
2183 {
2184 int regno;
2185
2186 cache->saved_regs[IA64_VRAP_REGNUM]
2187 = tdep->sigcontext_register_address (gdbarch, cache->base,
2188 IA64_IP_REGNUM);
2189 cache->saved_regs[IA64_CFM_REGNUM]
2190 = tdep->sigcontext_register_address (gdbarch, cache->base,
2191 IA64_CFM_REGNUM);
2192 cache->saved_regs[IA64_PSR_REGNUM]
2193 = tdep->sigcontext_register_address (gdbarch, cache->base,
2194 IA64_PSR_REGNUM);
2195 cache->saved_regs[IA64_BSP_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_BSP_REGNUM);
2198 cache->saved_regs[IA64_RNAT_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_RNAT_REGNUM);
2201 cache->saved_regs[IA64_CCV_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_CCV_REGNUM);
2204 cache->saved_regs[IA64_UNAT_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_UNAT_REGNUM);
2207 cache->saved_regs[IA64_FPSR_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_FPSR_REGNUM);
2210 cache->saved_regs[IA64_PFS_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_PFS_REGNUM);
2213 cache->saved_regs[IA64_LC_REGNUM]
2214 = tdep->sigcontext_register_address (gdbarch, cache->base,
2215 IA64_LC_REGNUM);
2216
2217 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2218 cache->saved_regs[regno] =
2219 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2220 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2221 cache->saved_regs[regno] =
2222 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2223 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2224 cache->saved_regs[regno] =
2225 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2226 }
2227 }
2228
2229 static struct ia64_frame_cache *
2230 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2231 {
2232 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2233 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2234 struct ia64_frame_cache *cache;
2235 gdb_byte buf[8];
2236
2237 if (*this_cache)
2238 return (struct ia64_frame_cache *) *this_cache;
2239
2240 cache = ia64_alloc_frame_cache ();
2241
2242 get_frame_register (this_frame, sp_regnum, buf);
2243 /* Note that frame size is hard-coded below. We cannot calculate it
2244 via prologue examination. */
2245 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2246
2247 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2248 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2249
2250 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2251 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2252 cache->sof = cache->cfm & 0x7f;
2253
2254 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2255
2256 *this_cache = cache;
2257 return cache;
2258 }
2259
2260 static void
2261 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2262 void **this_cache, struct frame_id *this_id)
2263 {
2264 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2265 struct ia64_frame_cache *cache =
2266 ia64_sigtramp_frame_cache (this_frame, this_cache);
2267
2268 (*this_id) = frame_id_build_special (cache->base,
2269 get_frame_pc (this_frame),
2270 cache->bsp);
2271 if (gdbarch_debug >= 1)
2272 fprintf_unfiltered (gdb_stdlog,
2273 "sigtramp frame id: code %s, stack %s, "
2274 "special %s, this_frame %s\n",
2275 paddress (gdbarch, this_id->code_addr),
2276 paddress (gdbarch, this_id->stack_addr),
2277 paddress (gdbarch, cache->bsp),
2278 host_address_to_string (this_frame));
2279 }
2280
2281 static struct value *
2282 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2283 void **this_cache, int regnum)
2284 {
2285 struct ia64_frame_cache *cache =
2286 ia64_sigtramp_frame_cache (this_frame, this_cache);
2287
2288 gdb_assert (regnum >= 0);
2289
2290 if (!target_has_registers ())
2291 error (_("No registers."));
2292
2293 if (regnum == IA64_IP_REGNUM)
2294 {
2295 CORE_ADDR pc = 0;
2296 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2297
2298 if (addr != 0)
2299 {
2300 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2302 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2303 }
2304 pc &= ~0xf;
2305 return frame_unwind_got_constant (this_frame, regnum, pc);
2306 }
2307
2308 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2309 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2310 {
2311 CORE_ADDR addr = 0;
2312
2313 if (regnum >= V32_REGNUM)
2314 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2315 addr = cache->saved_regs[regnum];
2316 if (addr != 0)
2317 return frame_unwind_got_memory (this_frame, regnum, addr);
2318
2319 return frame_unwind_got_constant (this_frame, regnum, 0);
2320 }
2321
2322 else /* All other registers not listed above. */
2323 {
2324 CORE_ADDR addr = cache->saved_regs[regnum];
2325
2326 if (addr != 0)
2327 return frame_unwind_got_memory (this_frame, regnum, addr);
2328
2329 return frame_unwind_got_constant (this_frame, regnum, 0);
2330 }
2331 }
2332
2333 static int
2334 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2335 struct frame_info *this_frame,
2336 void **this_cache)
2337 {
2338 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2339 if (tdep->pc_in_sigtramp)
2340 {
2341 CORE_ADDR pc = get_frame_pc (this_frame);
2342
2343 if (tdep->pc_in_sigtramp (pc))
2344 return 1;
2345 }
2346
2347 return 0;
2348 }
2349
2350 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2351 {
2352 "ia64 sigtramp",
2353 SIGTRAMP_FRAME,
2354 default_frame_unwind_stop_reason,
2355 ia64_sigtramp_frame_this_id,
2356 ia64_sigtramp_frame_prev_register,
2357 NULL,
2358 ia64_sigtramp_frame_sniffer
2359 };
2360
2361 \f
2362
2363 static CORE_ADDR
2364 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2365 {
2366 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2367
2368 return cache->base;
2369 }
2370
2371 static const struct frame_base ia64_frame_base =
2372 {
2373 &ia64_frame_unwind,
2374 ia64_frame_base_address,
2375 ia64_frame_base_address,
2376 ia64_frame_base_address
2377 };
2378
2379 #ifdef HAVE_LIBUNWIND_IA64_H
2380
2381 struct ia64_unwind_table_entry
2382 {
2383 unw_word_t start_offset;
2384 unw_word_t end_offset;
2385 unw_word_t info_offset;
2386 };
2387
2388 static __inline__ uint64_t
2389 ia64_rse_slot_num (uint64_t addr)
2390 {
2391 return (addr >> 3) & 0x3f;
2392 }
2393
2394 /* Skip over a designated number of registers in the backing
2395 store, remembering every 64th position is for NAT. */
2396 static __inline__ uint64_t
2397 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2398 {
2399 long delta = ia64_rse_slot_num(addr) + num_regs;
2400
2401 if (num_regs < 0)
2402 delta -= 0x3e;
2403 return addr + ((num_regs + delta/0x3f) << 3);
2404 }
2405
2406 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2407 register number to a libunwind register number. */
2408 static int
2409 ia64_gdb2uw_regnum (int regnum)
2410 {
2411 if (regnum == sp_regnum)
2412 return UNW_IA64_SP;
2413 else if (regnum == IA64_BSP_REGNUM)
2414 return UNW_IA64_BSP;
2415 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2416 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2417 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2418 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2419 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2420 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2421 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2422 return -1;
2423 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2424 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2425 else if (regnum == IA64_PR_REGNUM)
2426 return UNW_IA64_PR;
2427 else if (regnum == IA64_IP_REGNUM)
2428 return UNW_REG_IP;
2429 else if (regnum == IA64_CFM_REGNUM)
2430 return UNW_IA64_CFM;
2431 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2432 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2433 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2434 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2435 else
2436 return -1;
2437 }
2438
2439 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2440 register number to a ia64 gdb register number. */
2441 static int
2442 ia64_uw2gdb_regnum (int uw_regnum)
2443 {
2444 if (uw_regnum == UNW_IA64_SP)
2445 return sp_regnum;
2446 else if (uw_regnum == UNW_IA64_BSP)
2447 return IA64_BSP_REGNUM;
2448 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2449 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2450 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2451 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2452 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2453 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2454 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2455 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2456 else if (uw_regnum == UNW_IA64_PR)
2457 return IA64_PR_REGNUM;
2458 else if (uw_regnum == UNW_REG_IP)
2459 return IA64_IP_REGNUM;
2460 else if (uw_regnum == UNW_IA64_CFM)
2461 return IA64_CFM_REGNUM;
2462 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2463 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2464 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2465 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2466 else
2467 return -1;
2468 }
2469
2470 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2471 a float register or not. */
2472 static int
2473 ia64_is_fpreg (int uw_regnum)
2474 {
2475 return unw_is_fpreg (uw_regnum);
2476 }
2477
2478 /* Libunwind callback accessor function for general registers. */
2479 static int
2480 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2481 int write, void *arg)
2482 {
2483 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2484 unw_word_t bsp, sof, cfm, psr, ip;
2485 struct frame_info *this_frame = (struct frame_info *) arg;
2486 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2487
2488 /* We never call any libunwind routines that need to write registers. */
2489 gdb_assert (!write);
2490
2491 switch (uw_regnum)
2492 {
2493 case UNW_REG_IP:
2494 /* Libunwind expects to see the pc value which means the slot number
2495 from the psr must be merged with the ip word address. */
2496 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2497 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2498 *val = ip | ((psr >> 41) & 0x3);
2499 break;
2500
2501 case UNW_IA64_AR_BSP:
2502 /* Libunwind expects to see the beginning of the current
2503 register frame so we must account for the fact that
2504 ptrace() will return a value for bsp that points *after*
2505 the current register frame. */
2506 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2507 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2508 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2509 *val = ia64_rse_skip_regs (bsp, -sof);
2510 break;
2511
2512 case UNW_IA64_AR_BSPSTORE:
2513 /* Libunwind wants bspstore to be after the current register frame.
2514 This is what ptrace() and gdb treats as the regular bsp value. */
2515 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2516 break;
2517
2518 default:
2519 /* For all other registers, just unwind the value directly. */
2520 *val = get_frame_register_unsigned (this_frame, regnum);
2521 break;
2522 }
2523
2524 if (gdbarch_debug >= 1)
2525 fprintf_unfiltered (gdb_stdlog,
2526 " access_reg: from cache: %4s=%s\n",
2527 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2528 ? ia64_register_names[regnum] : "r??"),
2529 paddress (gdbarch, *val));
2530 return 0;
2531 }
2532
2533 /* Libunwind callback accessor function for floating-point registers. */
2534 static int
2535 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2536 unw_fpreg_t *val, int write, void *arg)
2537 {
2538 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2539 struct frame_info *this_frame = (struct frame_info *) arg;
2540
2541 /* We never call any libunwind routines that need to write registers. */
2542 gdb_assert (!write);
2543
2544 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2545
2546 return 0;
2547 }
2548
2549 /* Libunwind callback accessor function for top-level rse registers. */
2550 static int
2551 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2552 unw_word_t *val, int write, void *arg)
2553 {
2554 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2555 unw_word_t bsp, sof, cfm, psr, ip;
2556 struct regcache *regcache = (struct regcache *) arg;
2557 struct gdbarch *gdbarch = regcache->arch ();
2558
2559 /* We never call any libunwind routines that need to write registers. */
2560 gdb_assert (!write);
2561
2562 switch (uw_regnum)
2563 {
2564 case UNW_REG_IP:
2565 /* Libunwind expects to see the pc value which means the slot number
2566 from the psr must be merged with the ip word address. */
2567 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2568 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2569 *val = ip | ((psr >> 41) & 0x3);
2570 break;
2571
2572 case UNW_IA64_AR_BSP:
2573 /* Libunwind expects to see the beginning of the current
2574 register frame so we must account for the fact that
2575 ptrace() will return a value for bsp that points *after*
2576 the current register frame. */
2577 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2578 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2579 sof = (cfm & 0x7f);
2580 *val = ia64_rse_skip_regs (bsp, -sof);
2581 break;
2582
2583 case UNW_IA64_AR_BSPSTORE:
2584 /* Libunwind wants bspstore to be after the current register frame.
2585 This is what ptrace() and gdb treats as the regular bsp value. */
2586 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2587 break;
2588
2589 default:
2590 /* For all other registers, just unwind the value directly. */
2591 regcache_cooked_read_unsigned (regcache, regnum, val);
2592 break;
2593 }
2594
2595 if (gdbarch_debug >= 1)
2596 fprintf_unfiltered (gdb_stdlog,
2597 " access_rse_reg: from cache: %4s=%s\n",
2598 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2599 ? ia64_register_names[regnum] : "r??"),
2600 paddress (gdbarch, *val));
2601
2602 return 0;
2603 }
2604
2605 /* Libunwind callback accessor function for top-level fp registers. */
2606 static int
2607 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2608 unw_fpreg_t *val, int write, void *arg)
2609 {
2610 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2611 struct regcache *regcache = (struct regcache *) arg;
2612
2613 /* We never call any libunwind routines that need to write registers. */
2614 gdb_assert (!write);
2615
2616 regcache->cooked_read (regnum, (gdb_byte *) val);
2617
2618 return 0;
2619 }
2620
2621 /* Libunwind callback accessor function for accessing memory. */
2622 static int
2623 ia64_access_mem (unw_addr_space_t as,
2624 unw_word_t addr, unw_word_t *val,
2625 int write, void *arg)
2626 {
2627 if (addr - KERNEL_START < ktab_size)
2628 {
2629 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2630 + (addr - KERNEL_START));
2631
2632 if (write)
2633 *laddr = *val;
2634 else
2635 *val = *laddr;
2636 return 0;
2637 }
2638
2639 /* XXX do we need to normalize byte-order here? */
2640 if (write)
2641 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2642 else
2643 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2644 }
2645
2646 /* Call low-level function to access the kernel unwind table. */
2647 static gdb::optional<gdb::byte_vector>
2648 getunwind_table ()
2649 {
2650 /* FIXME drow/2005-09-10: This code used to call
2651 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2652 for the currently running ia64-linux kernel. That data should
2653 come from the core file and be accessed via the auxv vector; if
2654 we want to preserve fall back to the running kernel's table, then
2655 we should find a way to override the corefile layer's
2656 xfer_partial method. */
2657
2658 return target_read_alloc (current_inferior ()->top_target (),
2659 TARGET_OBJECT_UNWIND_TABLE, NULL);
2660 }
2661
2662 /* Get the kernel unwind table. */
2663 static int
2664 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2665 {
2666 static struct ia64_table_entry *etab;
2667
2668 if (!ktab)
2669 {
2670 ktab_buf = getunwind_table ();
2671 if (!ktab_buf)
2672 return -UNW_ENOINFO;
2673
2674 ktab = (struct ia64_table_entry *) ktab_buf->data ();
2675 ktab_size = ktab_buf->size ();
2676
2677 for (etab = ktab; etab->start_offset; ++etab)
2678 etab->info_offset += KERNEL_START;
2679 }
2680
2681 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2682 return -UNW_ENOINFO;
2683
2684 di->format = UNW_INFO_FORMAT_TABLE;
2685 di->gp = 0;
2686 di->start_ip = ktab[0].start_offset;
2687 di->end_ip = etab[-1].end_offset;
2688 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2689 di->u.ti.segbase = 0;
2690 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2691 di->u.ti.table_data = (unw_word_t *) ktab;
2692
2693 if (gdbarch_debug >= 1)
2694 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2695 "segbase=%s, length=%s, gp=%s\n",
2696 (char *) di->u.ti.name_ptr,
2697 hex_string (di->u.ti.segbase),
2698 pulongest (di->u.ti.table_len),
2699 hex_string (di->gp));
2700 return 0;
2701 }
2702
2703 /* Find the unwind table entry for a specified address. */
2704 static int
2705 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2706 unw_dyn_info_t *dip, void **buf)
2707 {
2708 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2709 Elf_Internal_Ehdr *ehdr;
2710 unw_word_t segbase = 0;
2711 CORE_ADDR load_base;
2712 bfd *bfd;
2713 int i;
2714
2715 bfd = objfile->obfd;
2716
2717 ehdr = elf_tdata (bfd)->elf_header;
2718 phdr = elf_tdata (bfd)->phdr;
2719
2720 load_base = objfile->text_section_offset ();
2721
2722 for (i = 0; i < ehdr->e_phnum; ++i)
2723 {
2724 switch (phdr[i].p_type)
2725 {
2726 case PT_LOAD:
2727 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2728 < phdr[i].p_memsz)
2729 p_text = phdr + i;
2730 break;
2731
2732 case PT_IA_64_UNWIND:
2733 p_unwind = phdr + i;
2734 break;
2735
2736 default:
2737 break;
2738 }
2739 }
2740
2741 if (!p_text || !p_unwind)
2742 return -UNW_ENOINFO;
2743
2744 /* Verify that the segment that contains the IP also contains
2745 the static unwind table. If not, we may be in the Linux kernel's
2746 DSO gate page in which case the unwind table is another segment.
2747 Otherwise, we are dealing with runtime-generated code, for which we
2748 have no info here. */
2749 segbase = p_text->p_vaddr + load_base;
2750
2751 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2752 {
2753 int ok = 0;
2754 for (i = 0; i < ehdr->e_phnum; ++i)
2755 {
2756 if (phdr[i].p_type == PT_LOAD
2757 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2758 {
2759 ok = 1;
2760 /* Get the segbase from the section containing the
2761 libunwind table. */
2762 segbase = phdr[i].p_vaddr + load_base;
2763 }
2764 }
2765 if (!ok)
2766 return -UNW_ENOINFO;
2767 }
2768
2769 dip->start_ip = p_text->p_vaddr + load_base;
2770 dip->end_ip = dip->start_ip + p_text->p_memsz;
2771 dip->gp = ia64_find_global_pointer (objfile->arch (), ip);
2772 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2773 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2774 dip->u.rti.segbase = segbase;
2775 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2776 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2777
2778 return 0;
2779 }
2780
2781 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2782 static int
2783 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2784 int need_unwind_info, void *arg)
2785 {
2786 struct obj_section *sec = find_pc_section (ip);
2787 unw_dyn_info_t di;
2788 int ret;
2789 void *buf = NULL;
2790
2791 if (!sec)
2792 {
2793 /* XXX This only works if the host and the target architecture are
2794 both ia64 and if the have (more or less) the same kernel
2795 version. */
2796 if (get_kernel_table (ip, &di) < 0)
2797 return -UNW_ENOINFO;
2798
2799 if (gdbarch_debug >= 1)
2800 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2801 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2802 "length=%s,data=%s)\n",
2803 hex_string (ip), (char *)di.u.ti.name_ptr,
2804 hex_string (di.u.ti.segbase),
2805 hex_string (di.start_ip), hex_string (di.end_ip),
2806 hex_string (di.gp),
2807 pulongest (di.u.ti.table_len),
2808 hex_string ((CORE_ADDR)di.u.ti.table_data));
2809 }
2810 else
2811 {
2812 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2813 if (ret < 0)
2814 return ret;
2815
2816 if (gdbarch_debug >= 1)
2817 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2818 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2819 "length=%s,data=%s)\n",
2820 hex_string (ip), (char *)di.u.rti.name_ptr,
2821 hex_string (di.u.rti.segbase),
2822 hex_string (di.start_ip), hex_string (di.end_ip),
2823 hex_string (di.gp),
2824 pulongest (di.u.rti.table_len),
2825 hex_string (di.u.rti.table_data));
2826 }
2827
2828 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2829 arg);
2830
2831 /* We no longer need the dyn info storage so free it. */
2832 xfree (buf);
2833
2834 return ret;
2835 }
2836
2837 /* Libunwind callback accessor function for cleanup. */
2838 static void
2839 ia64_put_unwind_info (unw_addr_space_t as,
2840 unw_proc_info_t *pip, void *arg)
2841 {
2842 /* Nothing required for now. */
2843 }
2844
2845 /* Libunwind callback accessor function to get head of the dynamic
2846 unwind-info registration list. */
2847 static int
2848 ia64_get_dyn_info_list (unw_addr_space_t as,
2849 unw_word_t *dilap, void *arg)
2850 {
2851 struct obj_section *text_sec;
2852 unw_word_t ip, addr;
2853 unw_dyn_info_t di;
2854 int ret;
2855
2856 if (!libunwind_is_initialized ())
2857 return -UNW_ENOINFO;
2858
2859 for (objfile *objfile : current_program_space->objfiles ())
2860 {
2861 void *buf = NULL;
2862
2863 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2864 ip = text_sec->addr ();
2865 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2866 if (ret >= 0)
2867 {
2868 addr = libunwind_find_dyn_list (as, &di, arg);
2869 /* We no longer need the dyn info storage so free it. */
2870 xfree (buf);
2871
2872 if (addr)
2873 {
2874 if (gdbarch_debug >= 1)
2875 fprintf_unfiltered (gdb_stdlog,
2876 "dynamic unwind table in objfile %s "
2877 "at %s (gp=%s)\n",
2878 bfd_get_filename (objfile->obfd),
2879 hex_string (addr), hex_string (di.gp));
2880 *dilap = addr;
2881 return 0;
2882 }
2883 }
2884 }
2885 return -UNW_ENOINFO;
2886 }
2887
2888
2889 /* Frame interface functions for libunwind. */
2890
2891 static void
2892 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2893 struct frame_id *this_id)
2894 {
2895 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2896 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2897 struct frame_id id = outer_frame_id;
2898 gdb_byte buf[8];
2899 CORE_ADDR bsp;
2900
2901 libunwind_frame_this_id (this_frame, this_cache, &id);
2902 if (frame_id_eq (id, outer_frame_id))
2903 {
2904 (*this_id) = outer_frame_id;
2905 return;
2906 }
2907
2908 /* We must add the bsp as the special address for frame comparison
2909 purposes. */
2910 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2911 bsp = extract_unsigned_integer (buf, 8, byte_order);
2912
2913 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2914
2915 if (gdbarch_debug >= 1)
2916 fprintf_unfiltered (gdb_stdlog,
2917 "libunwind frame id: code %s, stack %s, "
2918 "special %s, this_frame %s\n",
2919 paddress (gdbarch, id.code_addr),
2920 paddress (gdbarch, id.stack_addr),
2921 paddress (gdbarch, bsp),
2922 host_address_to_string (this_frame));
2923 }
2924
2925 static struct value *
2926 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2927 void **this_cache, int regnum)
2928 {
2929 int reg = regnum;
2930 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2931 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2932 struct value *val;
2933
2934 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2935 reg = IA64_PR_REGNUM;
2936 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2937 reg = IA64_UNAT_REGNUM;
2938
2939 /* Let libunwind do most of the work. */
2940 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2941
2942 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2943 {
2944 ULONGEST prN_val;
2945
2946 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2947 {
2948 int rrb_pr = 0;
2949 ULONGEST cfm;
2950
2951 /* Fetch predicate register rename base from current frame
2952 marker for this frame. */
2953 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2954 rrb_pr = (cfm >> 32) & 0x3f;
2955
2956 /* Adjust the register number to account for register rotation. */
2957 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2958 }
2959 prN_val = extract_bit_field (value_contents_all (val),
2960 regnum - VP0_REGNUM, 1);
2961 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2962 }
2963
2964 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2965 {
2966 ULONGEST unatN_val;
2967
2968 unatN_val = extract_bit_field (value_contents_all (val),
2969 regnum - IA64_NAT0_REGNUM, 1);
2970 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2971 }
2972
2973 else if (regnum == IA64_BSP_REGNUM)
2974 {
2975 struct value *cfm_val;
2976 CORE_ADDR prev_bsp, prev_cfm;
2977
2978 /* We want to calculate the previous bsp as the end of the previous
2979 register stack frame. This corresponds to what the hardware bsp
2980 register will be if we pop the frame back which is why we might
2981 have been called. We know that libunwind will pass us back the
2982 beginning of the current frame so we should just add sof to it. */
2983 prev_bsp = extract_unsigned_integer (value_contents_all (val),
2984 8, byte_order);
2985 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2986 IA64_CFM_REGNUM);
2987 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
2988 8, byte_order);
2989 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2990
2991 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2992 }
2993 else
2994 return val;
2995 }
2996
2997 static int
2998 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
2999 struct frame_info *this_frame,
3000 void **this_cache)
3001 {
3002 if (libunwind_is_initialized ()
3003 && libunwind_frame_sniffer (self, this_frame, this_cache))
3004 return 1;
3005
3006 return 0;
3007 }
3008
3009 static const struct frame_unwind ia64_libunwind_frame_unwind =
3010 {
3011 "ia64 libunwind",
3012 NORMAL_FRAME,
3013 default_frame_unwind_stop_reason,
3014 ia64_libunwind_frame_this_id,
3015 ia64_libunwind_frame_prev_register,
3016 NULL,
3017 ia64_libunwind_frame_sniffer,
3018 libunwind_frame_dealloc_cache
3019 };
3020
3021 static void
3022 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3023 void **this_cache,
3024 struct frame_id *this_id)
3025 {
3026 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3027 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3028 gdb_byte buf[8];
3029 CORE_ADDR bsp;
3030 struct frame_id id = outer_frame_id;
3031
3032 libunwind_frame_this_id (this_frame, this_cache, &id);
3033 if (frame_id_eq (id, outer_frame_id))
3034 {
3035 (*this_id) = outer_frame_id;
3036 return;
3037 }
3038
3039 /* We must add the bsp as the special address for frame comparison
3040 purposes. */
3041 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3042 bsp = extract_unsigned_integer (buf, 8, byte_order);
3043
3044 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3045 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3046
3047 if (gdbarch_debug >= 1)
3048 fprintf_unfiltered (gdb_stdlog,
3049 "libunwind sigtramp frame id: code %s, "
3050 "stack %s, special %s, this_frame %s\n",
3051 paddress (gdbarch, id.code_addr),
3052 paddress (gdbarch, id.stack_addr),
3053 paddress (gdbarch, bsp),
3054 host_address_to_string (this_frame));
3055 }
3056
3057 static struct value *
3058 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3059 void **this_cache, int regnum)
3060 {
3061 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3062 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3063 struct value *prev_ip_val;
3064 CORE_ADDR prev_ip;
3065
3066 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3067 method of getting previous registers. */
3068 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3069 IA64_IP_REGNUM);
3070 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3071 8, byte_order);
3072
3073 if (prev_ip == 0)
3074 {
3075 void *tmp_cache = NULL;
3076 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3077 regnum);
3078 }
3079 else
3080 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3081 }
3082
3083 static int
3084 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3085 struct frame_info *this_frame,
3086 void **this_cache)
3087 {
3088 if (libunwind_is_initialized ())
3089 {
3090 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3091 return 1;
3092 return 0;
3093 }
3094 else
3095 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3096 }
3097
3098 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3099 {
3100 "ia64 libunwind sigtramp",
3101 SIGTRAMP_FRAME,
3102 default_frame_unwind_stop_reason,
3103 ia64_libunwind_sigtramp_frame_this_id,
3104 ia64_libunwind_sigtramp_frame_prev_register,
3105 NULL,
3106 ia64_libunwind_sigtramp_frame_sniffer
3107 };
3108
3109 /* Set of libunwind callback acccessor functions. */
3110 unw_accessors_t ia64_unw_accessors =
3111 {
3112 ia64_find_proc_info_x,
3113 ia64_put_unwind_info,
3114 ia64_get_dyn_info_list,
3115 ia64_access_mem,
3116 ia64_access_reg,
3117 ia64_access_fpreg,
3118 /* resume */
3119 /* get_proc_name */
3120 };
3121
3122 /* Set of special libunwind callback acccessor functions specific for accessing
3123 the rse registers. At the top of the stack, we want libunwind to figure out
3124 how to read r32 - r127. Though usually they are found sequentially in
3125 memory starting from $bof, this is not always true. */
3126 unw_accessors_t ia64_unw_rse_accessors =
3127 {
3128 ia64_find_proc_info_x,
3129 ia64_put_unwind_info,
3130 ia64_get_dyn_info_list,
3131 ia64_access_mem,
3132 ia64_access_rse_reg,
3133 ia64_access_rse_fpreg,
3134 /* resume */
3135 /* get_proc_name */
3136 };
3137
3138 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3139 ia64-libunwind-tdep code to use. */
3140 struct libunwind_descr ia64_libunwind_descr =
3141 {
3142 ia64_gdb2uw_regnum,
3143 ia64_uw2gdb_regnum,
3144 ia64_is_fpreg,
3145 &ia64_unw_accessors,
3146 &ia64_unw_rse_accessors,
3147 };
3148
3149 #endif /* HAVE_LIBUNWIND_IA64_H */
3150
3151 static int
3152 ia64_use_struct_convention (struct type *type)
3153 {
3154 struct type *float_elt_type;
3155
3156 /* Don't use the struct convention for anything but structure,
3157 union, or array types. */
3158 if (!(type->code () == TYPE_CODE_STRUCT
3159 || type->code () == TYPE_CODE_UNION
3160 || type->code () == TYPE_CODE_ARRAY))
3161 return 0;
3162
3163 /* HFAs are structures (or arrays) consisting entirely of floating
3164 point values of the same length. Up to 8 of these are returned
3165 in registers. Don't use the struct convention when this is the
3166 case. */
3167 float_elt_type = is_float_or_hfa_type (type);
3168 if (float_elt_type != NULL
3169 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3170 return 0;
3171
3172 /* Other structs of length 32 or less are returned in r8-r11.
3173 Don't use the struct convention for those either. */
3174 return TYPE_LENGTH (type) > 32;
3175 }
3176
3177 /* Return non-zero if TYPE is a structure or union type. */
3178
3179 static int
3180 ia64_struct_type_p (const struct type *type)
3181 {
3182 return (type->code () == TYPE_CODE_STRUCT
3183 || type->code () == TYPE_CODE_UNION);
3184 }
3185
3186 static void
3187 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3188 gdb_byte *valbuf)
3189 {
3190 struct gdbarch *gdbarch = regcache->arch ();
3191 struct type *float_elt_type;
3192
3193 float_elt_type = is_float_or_hfa_type (type);
3194 if (float_elt_type != NULL)
3195 {
3196 gdb_byte from[IA64_FP_REGISTER_SIZE];
3197 int offset = 0;
3198 int regnum = IA64_FR8_REGNUM;
3199 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3200
3201 while (n-- > 0)
3202 {
3203 regcache->cooked_read (regnum, from);
3204 target_float_convert (from, ia64_ext_type (gdbarch),
3205 valbuf + offset, float_elt_type);
3206 offset += TYPE_LENGTH (float_elt_type);
3207 regnum++;
3208 }
3209 }
3210 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3211 {
3212 /* This is an integral value, and its size is less than 8 bytes.
3213 These values are LSB-aligned, so extract the relevant bytes,
3214 and copy them into VALBUF. */
3215 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3216 so I suppose we should also add handling here for integral values
3217 whose size is greater than 8. But I wasn't able to create such
3218 a type, neither in C nor in Ada, so not worrying about these yet. */
3219 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3220 ULONGEST val;
3221
3222 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3223 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3224 }
3225 else
3226 {
3227 ULONGEST val;
3228 int offset = 0;
3229 int regnum = IA64_GR8_REGNUM;
3230 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3231 int n = TYPE_LENGTH (type) / reglen;
3232 int m = TYPE_LENGTH (type) % reglen;
3233
3234 while (n-- > 0)
3235 {
3236 ULONGEST regval;
3237 regcache_cooked_read_unsigned (regcache, regnum, &regval);
3238 memcpy ((char *)valbuf + offset, &regval, reglen);
3239 offset += reglen;
3240 regnum++;
3241 }
3242
3243 if (m)
3244 {
3245 regcache_cooked_read_unsigned (regcache, regnum, &val);
3246 memcpy ((char *)valbuf + offset, &val, m);
3247 }
3248 }
3249 }
3250
3251 static void
3252 ia64_store_return_value (struct type *type, struct regcache *regcache,
3253 const gdb_byte *valbuf)
3254 {
3255 struct gdbarch *gdbarch = regcache->arch ();
3256 struct type *float_elt_type;
3257
3258 float_elt_type = is_float_or_hfa_type (type);
3259 if (float_elt_type != NULL)
3260 {
3261 gdb_byte to[IA64_FP_REGISTER_SIZE];
3262 int offset = 0;
3263 int regnum = IA64_FR8_REGNUM;
3264 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3265
3266 while (n-- > 0)
3267 {
3268 target_float_convert (valbuf + offset, float_elt_type,
3269 to, ia64_ext_type (gdbarch));
3270 regcache->cooked_write (regnum, to);
3271 offset += TYPE_LENGTH (float_elt_type);
3272 regnum++;
3273 }
3274 }
3275 else
3276 {
3277 int offset = 0;
3278 int regnum = IA64_GR8_REGNUM;
3279 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3280 int n = TYPE_LENGTH (type) / reglen;
3281 int m = TYPE_LENGTH (type) % reglen;
3282
3283 while (n-- > 0)
3284 {
3285 ULONGEST val;
3286 memcpy (&val, (char *)valbuf + offset, reglen);
3287 regcache_cooked_write_unsigned (regcache, regnum, val);
3288 offset += reglen;
3289 regnum++;
3290 }
3291
3292 if (m)
3293 {
3294 ULONGEST val;
3295 memcpy (&val, (char *)valbuf + offset, m);
3296 regcache_cooked_write_unsigned (regcache, regnum, val);
3297 }
3298 }
3299 }
3300
3301 static enum return_value_convention
3302 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3303 struct type *valtype, struct regcache *regcache,
3304 gdb_byte *readbuf, const gdb_byte *writebuf)
3305 {
3306 int struct_return = ia64_use_struct_convention (valtype);
3307
3308 if (writebuf != NULL)
3309 {
3310 gdb_assert (!struct_return);
3311 ia64_store_return_value (valtype, regcache, writebuf);
3312 }
3313
3314 if (readbuf != NULL)
3315 {
3316 gdb_assert (!struct_return);
3317 ia64_extract_return_value (valtype, regcache, readbuf);
3318 }
3319
3320 if (struct_return)
3321 return RETURN_VALUE_STRUCT_CONVENTION;
3322 else
3323 return RETURN_VALUE_REGISTER_CONVENTION;
3324 }
3325
3326 static int
3327 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3328 {
3329 switch (t->code ())
3330 {
3331 case TYPE_CODE_FLT:
3332 if (*etp)
3333 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3334 else
3335 {
3336 *etp = t;
3337 return 1;
3338 }
3339 break;
3340 case TYPE_CODE_ARRAY:
3341 return
3342 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3343 etp);
3344 break;
3345 case TYPE_CODE_STRUCT:
3346 {
3347 int i;
3348
3349 for (i = 0; i < t->num_fields (); i++)
3350 if (!is_float_or_hfa_type_recurse
3351 (check_typedef (t->field (i).type ()), etp))
3352 return 0;
3353 return 1;
3354 }
3355 break;
3356 default:
3357 return 0;
3358 break;
3359 }
3360 }
3361
3362 /* Determine if the given type is one of the floating point types or
3363 and HFA (which is a struct, array, or combination thereof whose
3364 bottom-most elements are all of the same floating point type). */
3365
3366 static struct type *
3367 is_float_or_hfa_type (struct type *t)
3368 {
3369 struct type *et = 0;
3370
3371 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3372 }
3373
3374
3375 /* Return 1 if the alignment of T is such that the next even slot
3376 should be used. Return 0, if the next available slot should
3377 be used. (See section 8.5.1 of the IA-64 Software Conventions
3378 and Runtime manual). */
3379
3380 static int
3381 slot_alignment_is_next_even (struct type *t)
3382 {
3383 switch (t->code ())
3384 {
3385 case TYPE_CODE_INT:
3386 case TYPE_CODE_FLT:
3387 if (TYPE_LENGTH (t) > 8)
3388 return 1;
3389 else
3390 return 0;
3391 case TYPE_CODE_ARRAY:
3392 return
3393 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3394 case TYPE_CODE_STRUCT:
3395 {
3396 int i;
3397
3398 for (i = 0; i < t->num_fields (); i++)
3399 if (slot_alignment_is_next_even
3400 (check_typedef (t->field (i).type ())))
3401 return 1;
3402 return 0;
3403 }
3404 default:
3405 return 0;
3406 }
3407 }
3408
3409 /* Attempt to find (and return) the global pointer for the given
3410 function.
3411
3412 This is a rather nasty bit of code searchs for the .dynamic section
3413 in the objfile corresponding to the pc of the function we're trying
3414 to call. Once it finds the addresses at which the .dynamic section
3415 lives in the child process, it scans the Elf64_Dyn entries for a
3416 DT_PLTGOT tag. If it finds one of these, the corresponding
3417 d_un.d_ptr value is the global pointer. */
3418
3419 static CORE_ADDR
3420 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3421 CORE_ADDR faddr)
3422 {
3423 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3424 struct obj_section *faddr_sect;
3425
3426 faddr_sect = find_pc_section (faddr);
3427 if (faddr_sect != NULL)
3428 {
3429 struct obj_section *osect;
3430
3431 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3432 {
3433 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3434 break;
3435 }
3436
3437 if (osect < faddr_sect->objfile->sections_end)
3438 {
3439 CORE_ADDR addr = osect->addr ();
3440 CORE_ADDR endaddr = osect->endaddr ();
3441
3442 while (addr < endaddr)
3443 {
3444 int status;
3445 LONGEST tag;
3446 gdb_byte buf[8];
3447
3448 status = target_read_memory (addr, buf, sizeof (buf));
3449 if (status != 0)
3450 break;
3451 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3452
3453 if (tag == DT_PLTGOT)
3454 {
3455 CORE_ADDR global_pointer;
3456
3457 status = target_read_memory (addr + 8, buf, sizeof (buf));
3458 if (status != 0)
3459 break;
3460 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3461 byte_order);
3462
3463 /* The payoff... */
3464 return global_pointer;
3465 }
3466
3467 if (tag == DT_NULL)
3468 break;
3469
3470 addr += 16;
3471 }
3472 }
3473 }
3474 return 0;
3475 }
3476
3477 /* Attempt to find (and return) the global pointer for the given
3478 function. We first try the find_global_pointer_from_solib routine
3479 from the gdbarch tdep vector, if provided. And if that does not
3480 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3481
3482 static CORE_ADDR
3483 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3484 {
3485 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3486 CORE_ADDR addr = 0;
3487
3488 if (tdep->find_global_pointer_from_solib)
3489 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3490 if (addr == 0)
3491 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3492 return addr;
3493 }
3494
3495 /* Given a function's address, attempt to find (and return) the
3496 corresponding (canonical) function descriptor. Return 0 if
3497 not found. */
3498 static CORE_ADDR
3499 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3500 {
3501 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3502 struct obj_section *faddr_sect;
3503
3504 /* Return early if faddr is already a function descriptor. */
3505 faddr_sect = find_pc_section (faddr);
3506 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3507 return faddr;
3508
3509 if (faddr_sect != NULL)
3510 {
3511 struct obj_section *osect;
3512 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3513 {
3514 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3515 break;
3516 }
3517
3518 if (osect < faddr_sect->objfile->sections_end)
3519 {
3520 CORE_ADDR addr = osect->addr ();
3521 CORE_ADDR endaddr = osect->endaddr ();
3522
3523 while (addr < endaddr)
3524 {
3525 int status;
3526 LONGEST faddr2;
3527 gdb_byte buf[8];
3528
3529 status = target_read_memory (addr, buf, sizeof (buf));
3530 if (status != 0)
3531 break;
3532 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3533
3534 if (faddr == faddr2)
3535 return addr;
3536
3537 addr += 16;
3538 }
3539 }
3540 }
3541 return 0;
3542 }
3543
3544 /* Attempt to find a function descriptor corresponding to the
3545 given address. If none is found, construct one on the
3546 stack using the address at fdaptr. */
3547
3548 static CORE_ADDR
3549 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3550 {
3551 struct gdbarch *gdbarch = regcache->arch ();
3552 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3553 CORE_ADDR fdesc;
3554
3555 fdesc = find_extant_func_descr (gdbarch, faddr);
3556
3557 if (fdesc == 0)
3558 {
3559 ULONGEST global_pointer;
3560 gdb_byte buf[16];
3561
3562 fdesc = *fdaptr;
3563 *fdaptr += 16;
3564
3565 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3566
3567 if (global_pointer == 0)
3568 regcache_cooked_read_unsigned (regcache,
3569 IA64_GR1_REGNUM, &global_pointer);
3570
3571 store_unsigned_integer (buf, 8, byte_order, faddr);
3572 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3573
3574 write_memory (fdesc, buf, 16);
3575 }
3576
3577 return fdesc;
3578 }
3579
3580 /* Use the following routine when printing out function pointers
3581 so the user can see the function address rather than just the
3582 function descriptor. */
3583 static CORE_ADDR
3584 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3585 struct target_ops *targ)
3586 {
3587 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3588 struct obj_section *s;
3589 gdb_byte buf[8];
3590
3591 s = find_pc_section (addr);
3592
3593 /* check if ADDR points to a function descriptor. */
3594 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3595 return read_memory_unsigned_integer (addr, 8, byte_order);
3596
3597 /* Normally, functions live inside a section that is executable.
3598 So, if ADDR points to a non-executable section, then treat it
3599 as a function descriptor and return the target address iff
3600 the target address itself points to a section that is executable.
3601 Check first the memory of the whole length of 8 bytes is readable. */
3602 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3603 && target_read_memory (addr, buf, 8) == 0)
3604 {
3605 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3606 struct obj_section *pc_section = find_pc_section (pc);
3607
3608 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3609 return pc;
3610 }
3611
3612 /* There are also descriptors embedded in vtables. */
3613 if (s)
3614 {
3615 struct bound_minimal_symbol minsym;
3616
3617 minsym = lookup_minimal_symbol_by_pc (addr);
3618
3619 if (minsym.minsym
3620 && is_vtable_name (minsym.minsym->linkage_name ()))
3621 return read_memory_unsigned_integer (addr, 8, byte_order);
3622 }
3623
3624 return addr;
3625 }
3626
3627 static CORE_ADDR
3628 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3629 {
3630 return sp & ~0xfLL;
3631 }
3632
3633 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3634
3635 static void
3636 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3637 {
3638 ULONGEST cfm, pfs, new_bsp;
3639
3640 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3641
3642 new_bsp = rse_address_add (bsp, sof);
3643 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3644
3645 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3646 pfs &= 0xc000000000000000LL;
3647 pfs |= (cfm & 0xffffffffffffLL);
3648 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3649
3650 cfm &= 0xc000000000000000LL;
3651 cfm |= sof;
3652 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3653 }
3654
3655 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3656 ia64. */
3657
3658 static void
3659 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3660 int slotnum, gdb_byte *buf)
3661 {
3662 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3663 }
3664
3665 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3666
3667 static void
3668 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3669 {
3670 /* Nothing needed. */
3671 }
3672
3673 static CORE_ADDR
3674 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3675 struct regcache *regcache, CORE_ADDR bp_addr,
3676 int nargs, struct value **args, CORE_ADDR sp,
3677 function_call_return_method return_method,
3678 CORE_ADDR struct_addr)
3679 {
3680 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3681 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3682 int argno;
3683 struct value *arg;
3684 struct type *type;
3685 int len, argoffset;
3686 int nslots, rseslots, memslots, slotnum, nfuncargs;
3687 int floatreg;
3688 ULONGEST bsp;
3689 CORE_ADDR funcdescaddr, global_pointer;
3690 CORE_ADDR func_addr = find_function_addr (function, NULL);
3691
3692 nslots = 0;
3693 nfuncargs = 0;
3694 /* Count the number of slots needed for the arguments. */
3695 for (argno = 0; argno < nargs; argno++)
3696 {
3697 arg = args[argno];
3698 type = check_typedef (value_type (arg));
3699 len = TYPE_LENGTH (type);
3700
3701 if ((nslots & 1) && slot_alignment_is_next_even (type))
3702 nslots++;
3703
3704 if (type->code () == TYPE_CODE_FUNC)
3705 nfuncargs++;
3706
3707 nslots += (len + 7) / 8;
3708 }
3709
3710 /* Divvy up the slots between the RSE and the memory stack. */
3711 rseslots = (nslots > 8) ? 8 : nslots;
3712 memslots = nslots - rseslots;
3713
3714 /* Allocate a new RSE frame. */
3715 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3716 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3717
3718 /* We will attempt to find function descriptors in the .opd segment,
3719 but if we can't we'll construct them ourselves. That being the
3720 case, we'll need to reserve space on the stack for them. */
3721 funcdescaddr = sp - nfuncargs * 16;
3722 funcdescaddr &= ~0xfLL;
3723
3724 /* Adjust the stack pointer to it's new value. The calling conventions
3725 require us to have 16 bytes of scratch, plus whatever space is
3726 necessary for the memory slots and our function descriptors. */
3727 sp = sp - 16 - (memslots + nfuncargs) * 8;
3728 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3729
3730 /* Place the arguments where they belong. The arguments will be
3731 either placed in the RSE backing store or on the memory stack.
3732 In addition, floating point arguments or HFAs are placed in
3733 floating point registers. */
3734 slotnum = 0;
3735 floatreg = IA64_FR8_REGNUM;
3736 for (argno = 0; argno < nargs; argno++)
3737 {
3738 struct type *float_elt_type;
3739
3740 arg = args[argno];
3741 type = check_typedef (value_type (arg));
3742 len = TYPE_LENGTH (type);
3743
3744 /* Special handling for function parameters. */
3745 if (len == 8
3746 && type->code () == TYPE_CODE_PTR
3747 && TYPE_TARGET_TYPE (type)->code () == TYPE_CODE_FUNC)
3748 {
3749 gdb_byte val_buf[8];
3750 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3751 8, byte_order);
3752 store_unsigned_integer (val_buf, 8, byte_order,
3753 find_func_descr (regcache, faddr,
3754 &funcdescaddr));
3755 if (slotnum < rseslots)
3756 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3757 slotnum, val_buf);
3758 else
3759 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3760 slotnum++;
3761 continue;
3762 }
3763
3764 /* Normal slots. */
3765
3766 /* Skip odd slot if necessary... */
3767 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3768 slotnum++;
3769
3770 argoffset = 0;
3771 while (len > 0)
3772 {
3773 gdb_byte val_buf[8];
3774
3775 memset (val_buf, 0, 8);
3776 if (!ia64_struct_type_p (type) && len < 8)
3777 {
3778 /* Integral types are LSB-aligned, so we have to be careful
3779 to insert the argument on the correct side of the buffer.
3780 This is why we use store_unsigned_integer. */
3781 store_unsigned_integer
3782 (val_buf, 8, byte_order,
3783 extract_unsigned_integer (value_contents (arg), len,
3784 byte_order));
3785 }
3786 else
3787 {
3788 /* This is either an 8bit integral type, or an aggregate.
3789 For 8bit integral type, there is no problem, we just
3790 copy the value over.
3791
3792 For aggregates, the only potentially tricky portion
3793 is to write the last one if it is less than 8 bytes.
3794 In this case, the data is Byte0-aligned. Happy news,
3795 this means that we don't need to differentiate the
3796 handling of 8byte blocks and less-than-8bytes blocks. */
3797 memcpy (val_buf, value_contents (arg) + argoffset,
3798 (len > 8) ? 8 : len);
3799 }
3800
3801 if (slotnum < rseslots)
3802 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3803 slotnum, val_buf);
3804 else
3805 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3806
3807 argoffset += 8;
3808 len -= 8;
3809 slotnum++;
3810 }
3811
3812 /* Handle floating point types (including HFAs). */
3813 float_elt_type = is_float_or_hfa_type (type);
3814 if (float_elt_type != NULL)
3815 {
3816 argoffset = 0;
3817 len = TYPE_LENGTH (type);
3818 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3819 {
3820 gdb_byte to[IA64_FP_REGISTER_SIZE];
3821 target_float_convert (value_contents (arg) + argoffset,
3822 float_elt_type, to,
3823 ia64_ext_type (gdbarch));
3824 regcache->cooked_write (floatreg, to);
3825 floatreg++;
3826 argoffset += TYPE_LENGTH (float_elt_type);
3827 len -= TYPE_LENGTH (float_elt_type);
3828 }
3829 }
3830 }
3831
3832 /* Store the struct return value in r8 if necessary. */
3833 if (return_method == return_method_struct)
3834 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3835 (ULONGEST) struct_addr);
3836
3837 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3838
3839 if (global_pointer != 0)
3840 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3841
3842 /* The following is not necessary on HP-UX, because we're using
3843 a dummy code sequence pushed on the stack to make the call, and
3844 this sequence doesn't need b0 to be set in order for our dummy
3845 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3846 it's needed for other OSes, so we do this unconditionaly. */
3847 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3848
3849 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3850
3851 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3852
3853 return sp;
3854 }
3855
3856 static const struct ia64_infcall_ops ia64_infcall_ops =
3857 {
3858 ia64_allocate_new_rse_frame,
3859 ia64_store_argument_in_slot,
3860 ia64_set_function_addr
3861 };
3862
3863 static struct frame_id
3864 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3865 {
3866 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3867 gdb_byte buf[8];
3868 CORE_ADDR sp, bsp;
3869
3870 get_frame_register (this_frame, sp_regnum, buf);
3871 sp = extract_unsigned_integer (buf, 8, byte_order);
3872
3873 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3874 bsp = extract_unsigned_integer (buf, 8, byte_order);
3875
3876 if (gdbarch_debug >= 1)
3877 fprintf_unfiltered (gdb_stdlog,
3878 "dummy frame id: code %s, stack %s, special %s\n",
3879 paddress (gdbarch, get_frame_pc (this_frame)),
3880 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3881
3882 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3883 }
3884
3885 static CORE_ADDR
3886 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3887 {
3888 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3889 gdb_byte buf[8];
3890 CORE_ADDR ip, psr, pc;
3891
3892 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3893 ip = extract_unsigned_integer (buf, 8, byte_order);
3894 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3895 psr = extract_unsigned_integer (buf, 8, byte_order);
3896
3897 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3898 return pc;
3899 }
3900
3901 static int
3902 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3903 {
3904 info->bytes_per_line = SLOT_MULTIPLIER;
3905 return default_print_insn (memaddr, info);
3906 }
3907
3908 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3909
3910 static int
3911 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3912 {
3913 return (cfm & 0x7f);
3914 }
3915
3916 static struct gdbarch *
3917 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3918 {
3919 struct gdbarch *gdbarch;
3920 struct gdbarch_tdep *tdep;
3921
3922 /* If there is already a candidate, use it. */
3923 arches = gdbarch_list_lookup_by_info (arches, &info);
3924 if (arches != NULL)
3925 return arches->gdbarch;
3926
3927 tdep = XCNEW (struct gdbarch_tdep);
3928 gdbarch = gdbarch_alloc (&info, tdep);
3929
3930 tdep->size_of_register_frame = ia64_size_of_register_frame;
3931
3932 /* According to the ia64 specs, instructions that store long double
3933 floats in memory use a long-double format different than that
3934 used in the floating registers. The memory format matches the
3935 x86 extended float format which is 80 bits. An OS may choose to
3936 use this format (e.g. GNU/Linux) or choose to use a different
3937 format for storing long doubles (e.g. HPUX). In the latter case,
3938 the setting of the format may be moved/overridden in an
3939 OS-specific tdep file. */
3940 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3941
3942 set_gdbarch_short_bit (gdbarch, 16);
3943 set_gdbarch_int_bit (gdbarch, 32);
3944 set_gdbarch_long_bit (gdbarch, 64);
3945 set_gdbarch_long_long_bit (gdbarch, 64);
3946 set_gdbarch_float_bit (gdbarch, 32);
3947 set_gdbarch_double_bit (gdbarch, 64);
3948 set_gdbarch_long_double_bit (gdbarch, 128);
3949 set_gdbarch_ptr_bit (gdbarch, 64);
3950
3951 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3952 set_gdbarch_num_pseudo_regs (gdbarch,
3953 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3954 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3955 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3956
3957 set_gdbarch_register_name (gdbarch, ia64_register_name);
3958 set_gdbarch_register_type (gdbarch, ia64_register_type);
3959
3960 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3961 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3962 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3963 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3964 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3965 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3966 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3967
3968 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3969
3970 set_gdbarch_return_value (gdbarch, ia64_return_value);
3971
3972 set_gdbarch_memory_insert_breakpoint (gdbarch,
3973 ia64_memory_insert_breakpoint);
3974 set_gdbarch_memory_remove_breakpoint (gdbarch,
3975 ia64_memory_remove_breakpoint);
3976 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3977 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3978 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3979 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3980
3981 /* Settings for calling functions in the inferior. */
3982 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3983 tdep->infcall_ops = ia64_infcall_ops;
3984 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3985 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3986
3987 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3988 #ifdef HAVE_LIBUNWIND_IA64_H
3989 frame_unwind_append_unwinder (gdbarch,
3990 &ia64_libunwind_sigtramp_frame_unwind);
3991 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
3992 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3993 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
3994 #else
3995 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3996 #endif
3997 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
3998 frame_base_set_default (gdbarch, &ia64_frame_base);
3999
4000 /* Settings that should be unnecessary. */
4001 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4002
4003 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4004 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4005 ia64_convert_from_func_ptr_addr);
4006
4007 /* The virtual table contains 16-byte descriptors, not pointers to
4008 descriptors. */
4009 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4010
4011 /* Hook in ABI-specific overrides, if they have been registered. */
4012 gdbarch_init_osabi (info, gdbarch);
4013
4014 return gdbarch;
4015 }
4016
4017 void _initialize_ia64_tdep ();
4018 void
4019 _initialize_ia64_tdep ()
4020 {
4021 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4022 }
This page took 0.116073 seconds and 4 git commands to generate.