Pass readable_regcache to gdbarch method read_pc
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72
73 #endif
74
75 /* An enumeration of the different IA-64 instruction types. */
76
77 typedef enum instruction_type
78 {
79 A, /* Integer ALU ; I-unit or M-unit */
80 I, /* Non-ALU integer; I-unit */
81 M, /* Memory ; M-unit */
82 F, /* Floating-point ; F-unit */
83 B, /* Branch ; B-unit */
84 L, /* Extended (L+X) ; I-unit */
85 X, /* Extended (L+X) ; I-unit */
86 undefined /* undefined or reserved */
87 } instruction_type;
88
89 /* We represent IA-64 PC addresses as the value of the instruction
90 pointer or'd with some bit combination in the low nibble which
91 represents the slot number in the bundle addressed by the
92 instruction pointer. The problem is that the Linux kernel
93 multiplies its slot numbers (for exceptions) by one while the
94 disassembler multiplies its slot numbers by 6. In addition, I've
95 heard it said that the simulator uses 1 as the multiplier.
96
97 I've fixed the disassembler so that the bytes_per_line field will
98 be the slot multiplier. If bytes_per_line comes in as zero, it
99 is set to six (which is how it was set up initially). -- objdump
100 displays pretty disassembly dumps with this value. For our purposes,
101 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
102 never want to also display the raw bytes the way objdump does. */
103
104 #define SLOT_MULTIPLIER 1
105
106 /* Length in bytes of an instruction bundle. */
107
108 #define BUNDLE_LEN 16
109
110 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
111
112 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
113 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
114 #endif
115
116 static gdbarch_init_ftype ia64_gdbarch_init;
117
118 static gdbarch_register_name_ftype ia64_register_name;
119 static gdbarch_register_type_ftype ia64_register_type;
120 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
121 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
122 static struct type *is_float_or_hfa_type (struct type *t);
123 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
124 CORE_ADDR faddr);
125
126 #define NUM_IA64_RAW_REGS 462
127
128 /* Big enough to hold a FP register in bytes. */
129 #define IA64_FP_REGISTER_SIZE 16
130
131 static int sp_regnum = IA64_GR12_REGNUM;
132
133 /* NOTE: we treat the register stack registers r32-r127 as
134 pseudo-registers because they may not be accessible via the ptrace
135 register get/set interfaces. */
136
137 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
138 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
139 V127_REGNUM = V32_REGNUM + 95,
140 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
141 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
142
143 /* Array of register names; There should be ia64_num_regs strings in
144 the initializer. */
145
146 static const char *ia64_register_names[] =
147 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
148 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
149 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
150 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
151 "", "", "", "", "", "", "", "",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163
164 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
165 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
166 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
167 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
168 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
169 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
170 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
171 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
172 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
173 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
174 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
175 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
176 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
177 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
178 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
179 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
180
181 "", "", "", "", "", "", "", "",
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189
190 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
191
192 "vfp", "vrap",
193
194 "pr", "ip", "psr", "cfm",
195
196 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
197 "", "", "", "", "", "", "", "",
198 "rsc", "bsp", "bspstore", "rnat",
199 "", "fcr", "", "",
200 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
201 "ccv", "", "", "", "unat", "", "", "",
202 "fpsr", "", "", "", "itc",
203 "", "", "", "", "", "", "", "", "", "",
204 "", "", "", "", "", "", "", "", "",
205 "pfs", "lc", "ec",
206 "", "", "", "", "", "", "", "", "", "",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "",
213 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
214 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
215 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
216 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
217 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
218 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
219 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
220 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
221 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
222 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
223 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
224 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
225 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
226 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
227 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
228 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
229
230 "bof",
231
232 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
233 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
234 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
235 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
236 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
237 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
238 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
239 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
240 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
241 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
242 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
243 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
244
245 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
246 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
247 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
248 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
249 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
250 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
251 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
252 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
253 };
254
255 struct ia64_frame_cache
256 {
257 CORE_ADDR base; /* frame pointer base for frame */
258 CORE_ADDR pc; /* function start pc for frame */
259 CORE_ADDR saved_sp; /* stack pointer for frame */
260 CORE_ADDR bsp; /* points at r32 for the current frame */
261 CORE_ADDR cfm; /* cfm value for current frame */
262 CORE_ADDR prev_cfm; /* cfm value for previous frame */
263 int frameless;
264 int sof; /* Size of frame (decoded from cfm value). */
265 int sol; /* Size of locals (decoded from cfm value). */
266 int sor; /* Number of rotating registers (decoded from
267 cfm value). */
268 CORE_ADDR after_prologue;
269 /* Address of first instruction after the last
270 prologue instruction; Note that there may
271 be instructions from the function's body
272 intermingled with the prologue. */
273 int mem_stack_frame_size;
274 /* Size of the memory stack frame (may be zero),
275 or -1 if it has not been determined yet. */
276 int fp_reg; /* Register number (if any) used a frame pointer
277 for this frame. 0 if no register is being used
278 as the frame pointer. */
279
280 /* Saved registers. */
281 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
282
283 };
284
285 static int
286 floatformat_valid (const struct floatformat *fmt, const void *from)
287 {
288 return 1;
289 }
290
291 static const struct floatformat floatformat_ia64_ext_little =
292 {
293 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
294 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
295 };
296
297 static const struct floatformat floatformat_ia64_ext_big =
298 {
299 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
300 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
301 };
302
303 static const struct floatformat *floatformats_ia64_ext[2] =
304 {
305 &floatformat_ia64_ext_big,
306 &floatformat_ia64_ext_little
307 };
308
309 static struct type *
310 ia64_ext_type (struct gdbarch *gdbarch)
311 {
312 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
313
314 if (!tdep->ia64_ext_type)
315 tdep->ia64_ext_type
316 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
317 floatformats_ia64_ext);
318
319 return tdep->ia64_ext_type;
320 }
321
322 static int
323 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
324 struct reggroup *group)
325 {
326 int vector_p;
327 int float_p;
328 int raw_p;
329 if (group == all_reggroup)
330 return 1;
331 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
332 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
333 raw_p = regnum < NUM_IA64_RAW_REGS;
334 if (group == float_reggroup)
335 return float_p;
336 if (group == vector_reggroup)
337 return vector_p;
338 if (group == general_reggroup)
339 return (!vector_p && !float_p);
340 if (group == save_reggroup || group == restore_reggroup)
341 return raw_p;
342 return 0;
343 }
344
345 static const char *
346 ia64_register_name (struct gdbarch *gdbarch, int reg)
347 {
348 return ia64_register_names[reg];
349 }
350
351 struct type *
352 ia64_register_type (struct gdbarch *arch, int reg)
353 {
354 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
355 return ia64_ext_type (arch);
356 else
357 return builtin_type (arch)->builtin_long;
358 }
359
360 static int
361 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
362 {
363 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
364 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
365 return reg;
366 }
367
368
369 /* Extract ``len'' bits from an instruction bundle starting at
370 bit ``from''. */
371
372 static long long
373 extract_bit_field (const gdb_byte *bundle, int from, int len)
374 {
375 long long result = 0LL;
376 int to = from + len;
377 int from_byte = from / 8;
378 int to_byte = to / 8;
379 unsigned char *b = (unsigned char *) bundle;
380 unsigned char c;
381 int lshift;
382 int i;
383
384 c = b[from_byte];
385 if (from_byte == to_byte)
386 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
387 result = c >> (from % 8);
388 lshift = 8 - (from % 8);
389
390 for (i = from_byte+1; i < to_byte; i++)
391 {
392 result |= ((long long) b[i]) << lshift;
393 lshift += 8;
394 }
395
396 if (from_byte < to_byte && (to % 8 != 0))
397 {
398 c = b[to_byte];
399 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
400 result |= ((long long) c) << lshift;
401 }
402
403 return result;
404 }
405
406 /* Replace the specified bits in an instruction bundle. */
407
408 static void
409 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
410 {
411 int to = from + len;
412 int from_byte = from / 8;
413 int to_byte = to / 8;
414 unsigned char *b = (unsigned char *) bundle;
415 unsigned char c;
416
417 if (from_byte == to_byte)
418 {
419 unsigned char left, right;
420 c = b[from_byte];
421 left = (c >> (to % 8)) << (to % 8);
422 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
423 c = (unsigned char) (val & 0xff);
424 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
425 c |= right | left;
426 b[from_byte] = c;
427 }
428 else
429 {
430 int i;
431 c = b[from_byte];
432 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
433 c = c | (val << (from % 8));
434 b[from_byte] = c;
435 val >>= 8 - from % 8;
436
437 for (i = from_byte+1; i < to_byte; i++)
438 {
439 c = val & 0xff;
440 val >>= 8;
441 b[i] = c;
442 }
443
444 if (to % 8 != 0)
445 {
446 unsigned char cv = (unsigned char) val;
447 c = b[to_byte];
448 c = c >> (to % 8) << (to % 8);
449 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
450 b[to_byte] = c;
451 }
452 }
453 }
454
455 /* Return the contents of slot N (for N = 0, 1, or 2) in
456 and instruction bundle. */
457
458 static long long
459 slotN_contents (gdb_byte *bundle, int slotnum)
460 {
461 return extract_bit_field (bundle, 5+41*slotnum, 41);
462 }
463
464 /* Store an instruction in an instruction bundle. */
465
466 static void
467 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
468 {
469 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
470 }
471
472 static const enum instruction_type template_encoding_table[32][3] =
473 {
474 { M, I, I }, /* 00 */
475 { M, I, I }, /* 01 */
476 { M, I, I }, /* 02 */
477 { M, I, I }, /* 03 */
478 { M, L, X }, /* 04 */
479 { M, L, X }, /* 05 */
480 { undefined, undefined, undefined }, /* 06 */
481 { undefined, undefined, undefined }, /* 07 */
482 { M, M, I }, /* 08 */
483 { M, M, I }, /* 09 */
484 { M, M, I }, /* 0A */
485 { M, M, I }, /* 0B */
486 { M, F, I }, /* 0C */
487 { M, F, I }, /* 0D */
488 { M, M, F }, /* 0E */
489 { M, M, F }, /* 0F */
490 { M, I, B }, /* 10 */
491 { M, I, B }, /* 11 */
492 { M, B, B }, /* 12 */
493 { M, B, B }, /* 13 */
494 { undefined, undefined, undefined }, /* 14 */
495 { undefined, undefined, undefined }, /* 15 */
496 { B, B, B }, /* 16 */
497 { B, B, B }, /* 17 */
498 { M, M, B }, /* 18 */
499 { M, M, B }, /* 19 */
500 { undefined, undefined, undefined }, /* 1A */
501 { undefined, undefined, undefined }, /* 1B */
502 { M, F, B }, /* 1C */
503 { M, F, B }, /* 1D */
504 { undefined, undefined, undefined }, /* 1E */
505 { undefined, undefined, undefined }, /* 1F */
506 };
507
508 /* Fetch and (partially) decode an instruction at ADDR and return the
509 address of the next instruction to fetch. */
510
511 static CORE_ADDR
512 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
513 {
514 gdb_byte bundle[BUNDLE_LEN];
515 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
516 long long templ;
517 int val;
518
519 /* Warn about slot numbers greater than 2. We used to generate
520 an error here on the assumption that the user entered an invalid
521 address. But, sometimes GDB itself requests an invalid address.
522 This can (easily) happen when execution stops in a function for
523 which there are no symbols. The prologue scanner will attempt to
524 find the beginning of the function - if the nearest symbol
525 happens to not be aligned on a bundle boundary (16 bytes), the
526 resulting starting address will cause GDB to think that the slot
527 number is too large.
528
529 So we warn about it and set the slot number to zero. It is
530 not necessarily a fatal condition, particularly if debugging
531 at the assembly language level. */
532 if (slotnum > 2)
533 {
534 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
535 "Using slot 0 instead"));
536 slotnum = 0;
537 }
538
539 addr &= ~0x0f;
540
541 val = target_read_memory (addr, bundle, BUNDLE_LEN);
542
543 if (val != 0)
544 return 0;
545
546 *instr = slotN_contents (bundle, slotnum);
547 templ = extract_bit_field (bundle, 0, 5);
548 *it = template_encoding_table[(int)templ][slotnum];
549
550 if (slotnum == 2 || (slotnum == 1 && *it == L))
551 addr += 16;
552 else
553 addr += (slotnum + 1) * SLOT_MULTIPLIER;
554
555 return addr;
556 }
557
558 /* There are 5 different break instructions (break.i, break.b,
559 break.m, break.f, and break.x), but they all have the same
560 encoding. (The five bit template in the low five bits of the
561 instruction bundle distinguishes one from another.)
562
563 The runtime architecture manual specifies that break instructions
564 used for debugging purposes must have the upper two bits of the 21
565 bit immediate set to a 0 and a 1 respectively. A breakpoint
566 instruction encodes the most significant bit of its 21 bit
567 immediate at bit 36 of the 41 bit instruction. The penultimate msb
568 is at bit 25 which leads to the pattern below.
569
570 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
571 it turns out that 0x80000 was used as the syscall break in the early
572 simulators. So I changed the pattern slightly to do "break.i 0x080001"
573 instead. But that didn't work either (I later found out that this
574 pattern was used by the simulator that I was using.) So I ended up
575 using the pattern seen below.
576
577 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
578 while we need bit-based addressing as the instructions length is 41 bits and
579 we must not modify/corrupt the adjacent slots in the same bundle.
580 Fortunately we may store larger memory incl. the adjacent bits with the
581 original memory content (not the possibly already stored breakpoints there).
582 We need to be careful in ia64_memory_remove_breakpoint to always restore
583 only the specific bits of this instruction ignoring any adjacent stored
584 bits.
585
586 We use the original addressing with the low nibble in the range <0..2> which
587 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
588 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
589 bytes just without these two possibly skipped bytes to not to exceed to the
590 next bundle.
591
592 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
593 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
594 In such case there is no other place where to store
595 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
596 SLOTNUM in ia64_memory_remove_breakpoint.
597
598 There is one special case where we need to be extra careful:
599 L-X instructions, which are instructions that occupy 2 slots
600 (The L part is always in slot 1, and the X part is always in
601 slot 2). We must refuse to insert breakpoints for an address
602 that points at slot 2 of a bundle where an L-X instruction is
603 present, since there is logically no instruction at that address.
604 However, to make things more interesting, the opcode of L-X
605 instructions is located in slot 2. This means that, to insert
606 a breakpoint at an address that points to slot 1, we actually
607 need to write the breakpoint in slot 2! Slot 1 is actually
608 the extended operand, so writing the breakpoint there would not
609 have the desired effect. Another side-effect of this issue
610 is that we need to make sure that the shadow contents buffer
611 does save byte 15 of our instruction bundle (this is the tail
612 end of slot 2, which wouldn't be saved if we were to insert
613 the breakpoint in slot 1).
614
615 ia64 16-byte bundle layout:
616 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
617
618 The current addressing used by the code below:
619 original PC placed_address placed_size required covered
620 == bp_tgt->shadow_len reqd \subset covered
621 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
622 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
623 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
624
625 L-X instructions are treated a little specially, as explained above:
626 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
627
628 `objdump -d' and some other tools show a bit unjustified offsets:
629 original PC byte where starts the instruction objdump offset
630 0xABCDE0 0xABCDE0 0xABCDE0
631 0xABCDE1 0xABCDE5 0xABCDE6
632 0xABCDE2 0xABCDEA 0xABCDEC
633 */
634
635 #define IA64_BREAKPOINT 0x00003333300LL
636
637 static int
638 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
639 struct bp_target_info *bp_tgt)
640 {
641 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
642 gdb_byte bundle[BUNDLE_LEN];
643 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
644 long long instr_breakpoint;
645 int val;
646 int templ;
647
648 if (slotnum > 2)
649 error (_("Can't insert breakpoint for slot numbers greater than 2."));
650
651 addr &= ~0x0f;
652
653 /* Enable the automatic memory restoration from breakpoints while
654 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
655 Otherwise, we could possibly store into the shadow parts of the adjacent
656 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
657 breakpoint instruction bits region. */
658 scoped_restore restore_memory_0
659 = make_scoped_restore_show_memory_breakpoints (0);
660 val = target_read_memory (addr, bundle, BUNDLE_LEN);
661 if (val != 0)
662 return val;
663
664 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
665 for addressing the SHADOW_CONTENTS placement. */
666 shadow_slotnum = slotnum;
667
668 /* Always cover the last byte of the bundle in case we are inserting
669 a breakpoint on an L-X instruction. */
670 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
671
672 templ = extract_bit_field (bundle, 0, 5);
673 if (template_encoding_table[templ][slotnum] == X)
674 {
675 /* X unit types can only be used in slot 2, and are actually
676 part of a 2-slot L-X instruction. We cannot break at this
677 address, as this is the second half of an instruction that
678 lives in slot 1 of that bundle. */
679 gdb_assert (slotnum == 2);
680 error (_("Can't insert breakpoint for non-existing slot X"));
681 }
682 if (template_encoding_table[templ][slotnum] == L)
683 {
684 /* L unit types can only be used in slot 1. But the associated
685 opcode for that instruction is in slot 2, so bump the slot number
686 accordingly. */
687 gdb_assert (slotnum == 1);
688 slotnum = 2;
689 }
690
691 /* Store the whole bundle, except for the initial skipped bytes by the slot
692 number interpreted as bytes offset in PLACED_ADDRESS. */
693 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
694 bp_tgt->shadow_len);
695
696 /* Re-read the same bundle as above except that, this time, read it in order
697 to compute the new bundle inside which we will be inserting the
698 breakpoint. Therefore, disable the automatic memory restoration from
699 breakpoints while we read our instruction bundle. Otherwise, the general
700 restoration mechanism kicks in and we would possibly remove parts of the
701 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
702 the real breakpoint instruction bits region. */
703 scoped_restore restore_memory_1
704 = make_scoped_restore_show_memory_breakpoints (1);
705 val = target_read_memory (addr, bundle, BUNDLE_LEN);
706 if (val != 0)
707 return val;
708
709 /* Breakpoints already present in the code will get deteacted and not get
710 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
711 location cannot induce the internal error as they are optimized into
712 a single instance by update_global_location_list. */
713 instr_breakpoint = slotN_contents (bundle, slotnum);
714 if (instr_breakpoint == IA64_BREAKPOINT)
715 internal_error (__FILE__, __LINE__,
716 _("Address %s already contains a breakpoint."),
717 paddress (gdbarch, bp_tgt->placed_address));
718 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
719
720 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
721 bp_tgt->shadow_len);
722
723 return val;
724 }
725
726 static int
727 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
728 struct bp_target_info *bp_tgt)
729 {
730 CORE_ADDR addr = bp_tgt->placed_address;
731 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
732 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
733 long long instr_breakpoint, instr_saved;
734 int val;
735 int templ;
736
737 addr &= ~0x0f;
738
739 /* Disable the automatic memory restoration from breakpoints while
740 we read our instruction bundle. Otherwise, the general restoration
741 mechanism kicks in and we would possibly remove parts of the adjacent
742 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
743 breakpoint instruction bits region. */
744 scoped_restore restore_memory_1
745 = make_scoped_restore_show_memory_breakpoints (1);
746 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
747 if (val != 0)
748 return val;
749
750 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
751 for addressing the SHADOW_CONTENTS placement. */
752 shadow_slotnum = slotnum;
753
754 templ = extract_bit_field (bundle_mem, 0, 5);
755 if (template_encoding_table[templ][slotnum] == X)
756 {
757 /* X unit types can only be used in slot 2, and are actually
758 part of a 2-slot L-X instruction. We refuse to insert
759 breakpoints at this address, so there should be no reason
760 for us attempting to remove one there, except if the program's
761 code somehow got modified in memory. */
762 gdb_assert (slotnum == 2);
763 warning (_("Cannot remove breakpoint at address %s from non-existing "
764 "X-type slot, memory has changed underneath"),
765 paddress (gdbarch, bp_tgt->placed_address));
766 return -1;
767 }
768 if (template_encoding_table[templ][slotnum] == L)
769 {
770 /* L unit types can only be used in slot 1. But the breakpoint
771 was actually saved using slot 2, so update the slot number
772 accordingly. */
773 gdb_assert (slotnum == 1);
774 slotnum = 2;
775 }
776
777 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
778
779 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
780 if (instr_breakpoint != IA64_BREAKPOINT)
781 {
782 warning (_("Cannot remove breakpoint at address %s, "
783 "no break instruction at such address."),
784 paddress (gdbarch, bp_tgt->placed_address));
785 return -1;
786 }
787
788 /* Extract the original saved instruction from SLOTNUM normalizing its
789 bit-shift for INSTR_SAVED. */
790 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
791 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
792 bp_tgt->shadow_len);
793 instr_saved = slotN_contents (bundle_saved, slotnum);
794
795 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
796 and not any of the other ones that are stored in SHADOW_CONTENTS. */
797 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
798 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
799
800 return val;
801 }
802
803 /* Implement the breakpoint_kind_from_pc gdbarch method. */
804
805 static int
806 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
807 {
808 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
809 return 0;
810 }
811
812 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
813 instruction slots ranges are bit-granular (41 bits) we have to provide an
814 extended range as described for ia64_memory_insert_breakpoint. We also take
815 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
816 make a match for permanent breakpoints. */
817
818 static const gdb_byte *
819 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
820 CORE_ADDR *pcptr, int *lenptr)
821 {
822 CORE_ADDR addr = *pcptr;
823 static gdb_byte bundle[BUNDLE_LEN];
824 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
825 long long instr_fetched;
826 int val;
827 int templ;
828
829 if (slotnum > 2)
830 error (_("Can't insert breakpoint for slot numbers greater than 2."));
831
832 addr &= ~0x0f;
833
834 /* Enable the automatic memory restoration from breakpoints while
835 we read our instruction bundle to match bp_loc_is_permanent. */
836 {
837 scoped_restore restore_memory_0
838 = make_scoped_restore_show_memory_breakpoints (0);
839 val = target_read_memory (addr, bundle, BUNDLE_LEN);
840 }
841
842 /* The memory might be unreachable. This can happen, for instance,
843 when the user inserts a breakpoint at an invalid address. */
844 if (val != 0)
845 return NULL;
846
847 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
848 for addressing the SHADOW_CONTENTS placement. */
849 shadow_slotnum = slotnum;
850
851 /* Cover always the last byte of the bundle for the L-X slot case. */
852 *lenptr = BUNDLE_LEN - shadow_slotnum;
853
854 /* Check for L type instruction in slot 1, if present then bump up the slot
855 number to the slot 2. */
856 templ = extract_bit_field (bundle, 0, 5);
857 if (template_encoding_table[templ][slotnum] == X)
858 {
859 gdb_assert (slotnum == 2);
860 error (_("Can't insert breakpoint for non-existing slot X"));
861 }
862 if (template_encoding_table[templ][slotnum] == L)
863 {
864 gdb_assert (slotnum == 1);
865 slotnum = 2;
866 }
867
868 /* A break instruction has its all its opcode bits cleared except for
869 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
870 we should not touch the L slot - the upper 41 bits of the parameter. */
871 instr_fetched = slotN_contents (bundle, slotnum);
872 instr_fetched &= 0x1003ffffc0LL;
873 replace_slotN_contents (bundle, instr_fetched, slotnum);
874
875 return bundle + shadow_slotnum;
876 }
877
878 static CORE_ADDR
879 ia64_read_pc (readable_regcache *regcache)
880 {
881 ULONGEST psr_value, pc_value;
882 int slot_num;
883
884 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
885 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
886 slot_num = (psr_value >> 41) & 3;
887
888 return pc_value | (slot_num * SLOT_MULTIPLIER);
889 }
890
891 void
892 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
893 {
894 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
895 ULONGEST psr_value;
896
897 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
898 psr_value &= ~(3LL << 41);
899 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
900
901 new_pc &= ~0xfLL;
902
903 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
904 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
905 }
906
907 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
908
909 /* Returns the address of the slot that's NSLOTS slots away from
910 the address ADDR. NSLOTS may be positive or negative. */
911 static CORE_ADDR
912 rse_address_add(CORE_ADDR addr, int nslots)
913 {
914 CORE_ADDR new_addr;
915 int mandatory_nat_slots = nslots / 63;
916 int direction = nslots < 0 ? -1 : 1;
917
918 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
919
920 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
921 new_addr += 8 * direction;
922
923 if (IS_NaT_COLLECTION_ADDR(new_addr))
924 new_addr += 8 * direction;
925
926 return new_addr;
927 }
928
929 static enum register_status
930 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
931 int regnum, gdb_byte *buf)
932 {
933 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
934 enum register_status status;
935
936 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
937 {
938 #ifdef HAVE_LIBUNWIND_IA64_H
939 /* First try and use the libunwind special reg accessor,
940 otherwise fallback to standard logic. */
941 if (!libunwind_is_initialized ()
942 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
943 #endif
944 {
945 /* The fallback position is to assume that r32-r127 are
946 found sequentially in memory starting at $bof. This
947 isn't always true, but without libunwind, this is the
948 best we can do. */
949 enum register_status status;
950 ULONGEST cfm;
951 ULONGEST bsp;
952 CORE_ADDR reg;
953
954 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
955 if (status != REG_VALID)
956 return status;
957
958 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
959 if (status != REG_VALID)
960 return status;
961
962 /* The bsp points at the end of the register frame so we
963 subtract the size of frame from it to get start of
964 register frame. */
965 bsp = rse_address_add (bsp, -(cfm & 0x7f));
966
967 if ((cfm & 0x7f) > regnum - V32_REGNUM)
968 {
969 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
970 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
971 store_unsigned_integer (buf, register_size (gdbarch, regnum),
972 byte_order, reg);
973 }
974 else
975 store_unsigned_integer (buf, register_size (gdbarch, regnum),
976 byte_order, 0);
977 }
978 }
979 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
980 {
981 ULONGEST unatN_val;
982 ULONGEST unat;
983
984 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
985 if (status != REG_VALID)
986 return status;
987 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
988 store_unsigned_integer (buf, register_size (gdbarch, regnum),
989 byte_order, unatN_val);
990 }
991 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
992 {
993 ULONGEST natN_val = 0;
994 ULONGEST bsp;
995 ULONGEST cfm;
996 CORE_ADDR gr_addr = 0;
997
998 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
999 if (status != REG_VALID)
1000 return status;
1001
1002 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1003 if (status != REG_VALID)
1004 return status;
1005
1006 /* The bsp points at the end of the register frame so we
1007 subtract the size of frame from it to get start of register frame. */
1008 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1009
1010 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1011 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1012
1013 if (gr_addr != 0)
1014 {
1015 /* Compute address of nat collection bits. */
1016 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1017 ULONGEST nat_collection;
1018 int nat_bit;
1019 /* If our nat collection address is bigger than bsp, we have to get
1020 the nat collection from rnat. Otherwise, we fetch the nat
1021 collection from the computed address. */
1022 if (nat_addr >= bsp)
1023 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1024 else
1025 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1026 nat_bit = (gr_addr >> 3) & 0x3f;
1027 natN_val = (nat_collection >> nat_bit) & 1;
1028 }
1029
1030 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1031 byte_order, natN_val);
1032 }
1033 else if (regnum == VBOF_REGNUM)
1034 {
1035 /* A virtual register frame start is provided for user convenience.
1036 It can be calculated as the bsp - sof (sizeof frame). */
1037 ULONGEST bsp, vbsp;
1038 ULONGEST cfm;
1039
1040 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1041 if (status != REG_VALID)
1042 return status;
1043 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1044 if (status != REG_VALID)
1045 return status;
1046
1047 /* The bsp points at the end of the register frame so we
1048 subtract the size of frame from it to get beginning of frame. */
1049 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1050 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1051 byte_order, vbsp);
1052 }
1053 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1054 {
1055 ULONGEST pr;
1056 ULONGEST cfm;
1057 ULONGEST prN_val;
1058
1059 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1060 if (status != REG_VALID)
1061 return status;
1062 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1063 if (status != REG_VALID)
1064 return status;
1065
1066 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1067 {
1068 /* Fetch predicate register rename base from current frame
1069 marker for this frame. */
1070 int rrb_pr = (cfm >> 32) & 0x3f;
1071
1072 /* Adjust the register number to account for register rotation. */
1073 regnum = VP16_REGNUM
1074 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1075 }
1076 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1077 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1078 byte_order, prN_val);
1079 }
1080 else
1081 memset (buf, 0, register_size (gdbarch, regnum));
1082
1083 return REG_VALID;
1084 }
1085
1086 static void
1087 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1088 int regnum, const gdb_byte *buf)
1089 {
1090 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1091
1092 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1093 {
1094 ULONGEST bsp;
1095 ULONGEST cfm;
1096 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1097 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1098
1099 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1100
1101 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1102 {
1103 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1104 write_memory (reg_addr, buf, 8);
1105 }
1106 }
1107 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1108 {
1109 ULONGEST unatN_val, unat, unatN_mask;
1110 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1111 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1112 regnum),
1113 byte_order);
1114 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1115 if (unatN_val == 0)
1116 unat &= ~unatN_mask;
1117 else if (unatN_val == 1)
1118 unat |= unatN_mask;
1119 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1120 }
1121 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1122 {
1123 ULONGEST natN_val;
1124 ULONGEST bsp;
1125 ULONGEST cfm;
1126 CORE_ADDR gr_addr = 0;
1127 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1128 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1129
1130 /* The bsp points at the end of the register frame so we
1131 subtract the size of frame from it to get start of register frame. */
1132 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1133
1134 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1135 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1136
1137 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1138 regnum),
1139 byte_order);
1140
1141 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1142 {
1143 /* Compute address of nat collection bits. */
1144 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1145 CORE_ADDR nat_collection;
1146 int natN_bit = (gr_addr >> 3) & 0x3f;
1147 ULONGEST natN_mask = (1LL << natN_bit);
1148 /* If our nat collection address is bigger than bsp, we have to get
1149 the nat collection from rnat. Otherwise, we fetch the nat
1150 collection from the computed address. */
1151 if (nat_addr >= bsp)
1152 {
1153 regcache_cooked_read_unsigned (regcache,
1154 IA64_RNAT_REGNUM,
1155 &nat_collection);
1156 if (natN_val)
1157 nat_collection |= natN_mask;
1158 else
1159 nat_collection &= ~natN_mask;
1160 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1161 nat_collection);
1162 }
1163 else
1164 {
1165 gdb_byte nat_buf[8];
1166 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1167 if (natN_val)
1168 nat_collection |= natN_mask;
1169 else
1170 nat_collection &= ~natN_mask;
1171 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1172 byte_order, nat_collection);
1173 write_memory (nat_addr, nat_buf, 8);
1174 }
1175 }
1176 }
1177 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1178 {
1179 ULONGEST pr;
1180 ULONGEST cfm;
1181 ULONGEST prN_val;
1182 ULONGEST prN_mask;
1183
1184 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1185 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1186
1187 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1188 {
1189 /* Fetch predicate register rename base from current frame
1190 marker for this frame. */
1191 int rrb_pr = (cfm >> 32) & 0x3f;
1192
1193 /* Adjust the register number to account for register rotation. */
1194 regnum = VP16_REGNUM
1195 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1196 }
1197 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1198 byte_order);
1199 prN_mask = (1LL << (regnum - VP0_REGNUM));
1200 if (prN_val == 0)
1201 pr &= ~prN_mask;
1202 else if (prN_val == 1)
1203 pr |= prN_mask;
1204 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1205 }
1206 }
1207
1208 /* The ia64 needs to convert between various ieee floating-point formats
1209 and the special ia64 floating point register format. */
1210
1211 static int
1212 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1213 {
1214 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1215 && TYPE_CODE (type) == TYPE_CODE_FLT
1216 && type != ia64_ext_type (gdbarch));
1217 }
1218
1219 static int
1220 ia64_register_to_value (struct frame_info *frame, int regnum,
1221 struct type *valtype, gdb_byte *out,
1222 int *optimizedp, int *unavailablep)
1223 {
1224 struct gdbarch *gdbarch = get_frame_arch (frame);
1225 gdb_byte in[IA64_FP_REGISTER_SIZE];
1226
1227 /* Convert to TYPE. */
1228 if (!get_frame_register_bytes (frame, regnum, 0,
1229 register_size (gdbarch, regnum),
1230 in, optimizedp, unavailablep))
1231 return 0;
1232
1233 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1234 *optimizedp = *unavailablep = 0;
1235 return 1;
1236 }
1237
1238 static void
1239 ia64_value_to_register (struct frame_info *frame, int regnum,
1240 struct type *valtype, const gdb_byte *in)
1241 {
1242 struct gdbarch *gdbarch = get_frame_arch (frame);
1243 gdb_byte out[IA64_FP_REGISTER_SIZE];
1244 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1245 put_frame_register (frame, regnum, out);
1246 }
1247
1248
1249 /* Limit the number of skipped non-prologue instructions since examining
1250 of the prologue is expensive. */
1251 static int max_skip_non_prologue_insns = 40;
1252
1253 /* Given PC representing the starting address of a function, and
1254 LIM_PC which is the (sloppy) limit to which to scan when looking
1255 for a prologue, attempt to further refine this limit by using
1256 the line data in the symbol table. If successful, a better guess
1257 on where the prologue ends is returned, otherwise the previous
1258 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1259 which will be set to indicate whether the returned limit may be
1260 used with no further scanning in the event that the function is
1261 frameless. */
1262
1263 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1264 superseded by skip_prologue_using_sal. */
1265
1266 static CORE_ADDR
1267 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1268 {
1269 struct symtab_and_line prologue_sal;
1270 CORE_ADDR start_pc = pc;
1271 CORE_ADDR end_pc;
1272
1273 /* The prologue can not possibly go past the function end itself,
1274 so we can already adjust LIM_PC accordingly. */
1275 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1276 lim_pc = end_pc;
1277
1278 /* Start off not trusting the limit. */
1279 *trust_limit = 0;
1280
1281 prologue_sal = find_pc_line (pc, 0);
1282 if (prologue_sal.line != 0)
1283 {
1284 int i;
1285 CORE_ADDR addr = prologue_sal.end;
1286
1287 /* Handle the case in which compiler's optimizer/scheduler
1288 has moved instructions into the prologue. We scan ahead
1289 in the function looking for address ranges whose corresponding
1290 line number is less than or equal to the first one that we
1291 found for the function. (It can be less than when the
1292 scheduler puts a body instruction before the first prologue
1293 instruction.) */
1294 for (i = 2 * max_skip_non_prologue_insns;
1295 i > 0 && (lim_pc == 0 || addr < lim_pc);
1296 i--)
1297 {
1298 struct symtab_and_line sal;
1299
1300 sal = find_pc_line (addr, 0);
1301 if (sal.line == 0)
1302 break;
1303 if (sal.line <= prologue_sal.line
1304 && sal.symtab == prologue_sal.symtab)
1305 {
1306 prologue_sal = sal;
1307 }
1308 addr = sal.end;
1309 }
1310
1311 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1312 {
1313 lim_pc = prologue_sal.end;
1314 if (start_pc == get_pc_function_start (lim_pc))
1315 *trust_limit = 1;
1316 }
1317 }
1318 return lim_pc;
1319 }
1320
1321 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1322 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1323 || (14 <= (_regnum_) && (_regnum_) <= 31))
1324 #define imm9(_instr_) \
1325 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1326 | (((_instr_) & 0x00008000000LL) >> 20) \
1327 | (((_instr_) & 0x00000001fc0LL) >> 6))
1328
1329 /* Allocate and initialize a frame cache. */
1330
1331 static struct ia64_frame_cache *
1332 ia64_alloc_frame_cache (void)
1333 {
1334 struct ia64_frame_cache *cache;
1335 int i;
1336
1337 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1338
1339 /* Base address. */
1340 cache->base = 0;
1341 cache->pc = 0;
1342 cache->cfm = 0;
1343 cache->prev_cfm = 0;
1344 cache->sof = 0;
1345 cache->sol = 0;
1346 cache->sor = 0;
1347 cache->bsp = 0;
1348 cache->fp_reg = 0;
1349 cache->frameless = 1;
1350
1351 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1352 cache->saved_regs[i] = 0;
1353
1354 return cache;
1355 }
1356
1357 static CORE_ADDR
1358 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1359 struct frame_info *this_frame,
1360 struct ia64_frame_cache *cache)
1361 {
1362 CORE_ADDR next_pc;
1363 CORE_ADDR last_prologue_pc = pc;
1364 instruction_type it;
1365 long long instr;
1366 int cfm_reg = 0;
1367 int ret_reg = 0;
1368 int fp_reg = 0;
1369 int unat_save_reg = 0;
1370 int pr_save_reg = 0;
1371 int mem_stack_frame_size = 0;
1372 int spill_reg = 0;
1373 CORE_ADDR spill_addr = 0;
1374 char instores[8];
1375 char infpstores[8];
1376 char reg_contents[256];
1377 int trust_limit;
1378 int frameless = 1;
1379 int i;
1380 CORE_ADDR addr;
1381 gdb_byte buf[8];
1382 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1383
1384 memset (instores, 0, sizeof instores);
1385 memset (infpstores, 0, sizeof infpstores);
1386 memset (reg_contents, 0, sizeof reg_contents);
1387
1388 if (cache->after_prologue != 0
1389 && cache->after_prologue <= lim_pc)
1390 return cache->after_prologue;
1391
1392 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1393 next_pc = fetch_instruction (pc, &it, &instr);
1394
1395 /* We want to check if we have a recognizable function start before we
1396 look ahead for a prologue. */
1397 if (pc < lim_pc && next_pc
1398 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1399 {
1400 /* alloc - start of a regular function. */
1401 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1402 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1403 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1404
1405 /* Verify that the current cfm matches what we think is the
1406 function start. If we have somehow jumped within a function,
1407 we do not want to interpret the prologue and calculate the
1408 addresses of various registers such as the return address.
1409 We will instead treat the frame as frameless. */
1410 if (!this_frame ||
1411 (sof == (cache->cfm & 0x7f) &&
1412 sol == ((cache->cfm >> 7) & 0x7f)))
1413 frameless = 0;
1414
1415 cfm_reg = rN;
1416 last_prologue_pc = next_pc;
1417 pc = next_pc;
1418 }
1419 else
1420 {
1421 /* Look for a leaf routine. */
1422 if (pc < lim_pc && next_pc
1423 && (it == I || it == M)
1424 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1425 {
1426 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1427 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1428 | ((instr & 0x001f8000000LL) >> 20)
1429 | ((instr & 0x000000fe000LL) >> 13));
1430 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1431 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1432 int qp = (int) (instr & 0x0000000003fLL);
1433 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1434 {
1435 /* mov r2, r12 - beginning of leaf routine. */
1436 fp_reg = rN;
1437 last_prologue_pc = next_pc;
1438 }
1439 }
1440
1441 /* If we don't recognize a regular function or leaf routine, we are
1442 done. */
1443 if (!fp_reg)
1444 {
1445 pc = lim_pc;
1446 if (trust_limit)
1447 last_prologue_pc = lim_pc;
1448 }
1449 }
1450
1451 /* Loop, looking for prologue instructions, keeping track of
1452 where preserved registers were spilled. */
1453 while (pc < lim_pc)
1454 {
1455 next_pc = fetch_instruction (pc, &it, &instr);
1456 if (next_pc == 0)
1457 break;
1458
1459 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1460 {
1461 /* Exit loop upon hitting a non-nop branch instruction. */
1462 if (trust_limit)
1463 lim_pc = pc;
1464 break;
1465 }
1466 else if (((instr & 0x3fLL) != 0LL) &&
1467 (frameless || ret_reg != 0))
1468 {
1469 /* Exit loop upon hitting a predicated instruction if
1470 we already have the return register or if we are frameless. */
1471 if (trust_limit)
1472 lim_pc = pc;
1473 break;
1474 }
1475 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1476 {
1477 /* Move from BR */
1478 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1479 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1480 int qp = (int) (instr & 0x0000000003f);
1481
1482 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1483 {
1484 ret_reg = rN;
1485 last_prologue_pc = next_pc;
1486 }
1487 }
1488 else if ((it == I || it == M)
1489 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1490 {
1491 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1492 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1493 | ((instr & 0x001f8000000LL) >> 20)
1494 | ((instr & 0x000000fe000LL) >> 13));
1495 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1496 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1497 int qp = (int) (instr & 0x0000000003fLL);
1498
1499 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1500 {
1501 /* mov rN, r12 */
1502 fp_reg = rN;
1503 last_prologue_pc = next_pc;
1504 }
1505 else if (qp == 0 && rN == 12 && rM == 12)
1506 {
1507 /* adds r12, -mem_stack_frame_size, r12 */
1508 mem_stack_frame_size -= imm;
1509 last_prologue_pc = next_pc;
1510 }
1511 else if (qp == 0 && rN == 2
1512 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1513 {
1514 CORE_ADDR saved_sp = 0;
1515 /* adds r2, spilloffset, rFramePointer
1516 or
1517 adds r2, spilloffset, r12
1518
1519 Get ready for stf.spill or st8.spill instructions.
1520 The address to start spilling at is loaded into r2.
1521 FIXME: Why r2? That's what gcc currently uses; it
1522 could well be different for other compilers. */
1523
1524 /* Hmm... whether or not this will work will depend on
1525 where the pc is. If it's still early in the prologue
1526 this'll be wrong. FIXME */
1527 if (this_frame)
1528 {
1529 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1530 saved_sp = get_frame_register_unsigned (this_frame,
1531 sp_regnum);
1532 }
1533 spill_addr = saved_sp
1534 + (rM == 12 ? 0 : mem_stack_frame_size)
1535 + imm;
1536 spill_reg = rN;
1537 last_prologue_pc = next_pc;
1538 }
1539 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1540 rN < 256 && imm == 0)
1541 {
1542 /* mov rN, rM where rM is an input register. */
1543 reg_contents[rN] = rM;
1544 last_prologue_pc = next_pc;
1545 }
1546 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1547 rM == 2)
1548 {
1549 /* mov r12, r2 */
1550 last_prologue_pc = next_pc;
1551 break;
1552 }
1553 }
1554 else if (it == M
1555 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1556 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1557 {
1558 /* stf.spill [rN] = fM, imm9
1559 or
1560 stf.spill [rN] = fM */
1561
1562 int imm = imm9(instr);
1563 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1564 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1565 int qp = (int) (instr & 0x0000000003fLL);
1566 if (qp == 0 && rN == spill_reg && spill_addr != 0
1567 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1568 {
1569 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1570
1571 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1572 spill_addr += imm;
1573 else
1574 spill_addr = 0; /* last one; must be done. */
1575 last_prologue_pc = next_pc;
1576 }
1577 }
1578 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1579 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1580 {
1581 /* mov.m rN = arM
1582 or
1583 mov.i rN = arM */
1584
1585 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1586 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1587 int qp = (int) (instr & 0x0000000003fLL);
1588 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1589 {
1590 /* We have something like "mov.m r3 = ar.unat". Remember the
1591 r3 (or whatever) and watch for a store of this register... */
1592 unat_save_reg = rN;
1593 last_prologue_pc = next_pc;
1594 }
1595 }
1596 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1597 {
1598 /* mov rN = pr */
1599 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1600 int qp = (int) (instr & 0x0000000003fLL);
1601 if (qp == 0 && isScratch (rN))
1602 {
1603 pr_save_reg = rN;
1604 last_prologue_pc = next_pc;
1605 }
1606 }
1607 else if (it == M
1608 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1609 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1610 {
1611 /* st8 [rN] = rM
1612 or
1613 st8 [rN] = rM, imm9 */
1614 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1615 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1616 int qp = (int) (instr & 0x0000000003fLL);
1617 int indirect = rM < 256 ? reg_contents[rM] : 0;
1618 if (qp == 0 && rN == spill_reg && spill_addr != 0
1619 && (rM == unat_save_reg || rM == pr_save_reg))
1620 {
1621 /* We've found a spill of either the UNAT register or the PR
1622 register. (Well, not exactly; what we've actually found is
1623 a spill of the register that UNAT or PR was moved to).
1624 Record that fact and move on... */
1625 if (rM == unat_save_reg)
1626 {
1627 /* Track UNAT register. */
1628 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1629 unat_save_reg = 0;
1630 }
1631 else
1632 {
1633 /* Track PR register. */
1634 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1635 pr_save_reg = 0;
1636 }
1637 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1638 /* st8 [rN] = rM, imm9 */
1639 spill_addr += imm9(instr);
1640 else
1641 spill_addr = 0; /* Must be done spilling. */
1642 last_prologue_pc = next_pc;
1643 }
1644 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1645 {
1646 /* Allow up to one store of each input register. */
1647 instores[rM-32] = 1;
1648 last_prologue_pc = next_pc;
1649 }
1650 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1651 !instores[indirect-32])
1652 {
1653 /* Allow an indirect store of an input register. */
1654 instores[indirect-32] = 1;
1655 last_prologue_pc = next_pc;
1656 }
1657 }
1658 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1659 {
1660 /* One of
1661 st1 [rN] = rM
1662 st2 [rN] = rM
1663 st4 [rN] = rM
1664 st8 [rN] = rM
1665 Note that the st8 case is handled in the clause above.
1666
1667 Advance over stores of input registers. One store per input
1668 register is permitted. */
1669 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1670 int qp = (int) (instr & 0x0000000003fLL);
1671 int indirect = rM < 256 ? reg_contents[rM] : 0;
1672 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1673 {
1674 instores[rM-32] = 1;
1675 last_prologue_pc = next_pc;
1676 }
1677 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1678 !instores[indirect-32])
1679 {
1680 /* Allow an indirect store of an input register. */
1681 instores[indirect-32] = 1;
1682 last_prologue_pc = next_pc;
1683 }
1684 }
1685 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1686 {
1687 /* Either
1688 stfs [rN] = fM
1689 or
1690 stfd [rN] = fM
1691
1692 Advance over stores of floating point input registers. Again
1693 one store per register is permitted. */
1694 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1695 int qp = (int) (instr & 0x0000000003fLL);
1696 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1697 {
1698 infpstores[fM-8] = 1;
1699 last_prologue_pc = next_pc;
1700 }
1701 }
1702 else if (it == M
1703 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1704 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1705 {
1706 /* st8.spill [rN] = rM
1707 or
1708 st8.spill [rN] = rM, imm9 */
1709 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1710 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1711 int qp = (int) (instr & 0x0000000003fLL);
1712 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1713 {
1714 /* We've found a spill of one of the preserved general purpose
1715 regs. Record the spill address and advance the spill
1716 register if appropriate. */
1717 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1718 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1719 /* st8.spill [rN] = rM, imm9 */
1720 spill_addr += imm9(instr);
1721 else
1722 spill_addr = 0; /* Done spilling. */
1723 last_prologue_pc = next_pc;
1724 }
1725 }
1726
1727 pc = next_pc;
1728 }
1729
1730 /* If not frameless and we aren't called by skip_prologue, then we need
1731 to calculate registers for the previous frame which will be needed
1732 later. */
1733
1734 if (!frameless && this_frame)
1735 {
1736 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1737 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1738
1739 /* Extract the size of the rotating portion of the stack
1740 frame and the register rename base from the current
1741 frame marker. */
1742 cfm = cache->cfm;
1743 sor = cache->sor;
1744 sof = cache->sof;
1745 sol = cache->sol;
1746 rrb_gr = (cfm >> 18) & 0x7f;
1747
1748 /* Find the bof (beginning of frame). */
1749 bof = rse_address_add (cache->bsp, -sof);
1750
1751 for (i = 0, addr = bof;
1752 i < sof;
1753 i++, addr += 8)
1754 {
1755 if (IS_NaT_COLLECTION_ADDR (addr))
1756 {
1757 addr += 8;
1758 }
1759 if (i+32 == cfm_reg)
1760 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1761 if (i+32 == ret_reg)
1762 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1763 if (i+32 == fp_reg)
1764 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1765 }
1766
1767 /* For the previous argument registers we require the previous bof.
1768 If we can't find the previous cfm, then we can do nothing. */
1769 cfm = 0;
1770 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1771 {
1772 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1773 8, byte_order);
1774 }
1775 else if (cfm_reg != 0)
1776 {
1777 get_frame_register (this_frame, cfm_reg, buf);
1778 cfm = extract_unsigned_integer (buf, 8, byte_order);
1779 }
1780 cache->prev_cfm = cfm;
1781
1782 if (cfm != 0)
1783 {
1784 sor = ((cfm >> 14) & 0xf) * 8;
1785 sof = (cfm & 0x7f);
1786 sol = (cfm >> 7) & 0x7f;
1787 rrb_gr = (cfm >> 18) & 0x7f;
1788
1789 /* The previous bof only requires subtraction of the sol (size of
1790 locals) due to the overlap between output and input of
1791 subsequent frames. */
1792 bof = rse_address_add (bof, -sol);
1793
1794 for (i = 0, addr = bof;
1795 i < sof;
1796 i++, addr += 8)
1797 {
1798 if (IS_NaT_COLLECTION_ADDR (addr))
1799 {
1800 addr += 8;
1801 }
1802 if (i < sor)
1803 cache->saved_regs[IA64_GR32_REGNUM
1804 + ((i + (sor - rrb_gr)) % sor)]
1805 = addr;
1806 else
1807 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1808 }
1809
1810 }
1811 }
1812
1813 /* Try and trust the lim_pc value whenever possible. */
1814 if (trust_limit && lim_pc >= last_prologue_pc)
1815 last_prologue_pc = lim_pc;
1816
1817 cache->frameless = frameless;
1818 cache->after_prologue = last_prologue_pc;
1819 cache->mem_stack_frame_size = mem_stack_frame_size;
1820 cache->fp_reg = fp_reg;
1821
1822 return last_prologue_pc;
1823 }
1824
1825 CORE_ADDR
1826 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1827 {
1828 struct ia64_frame_cache cache;
1829 cache.base = 0;
1830 cache.after_prologue = 0;
1831 cache.cfm = 0;
1832 cache.bsp = 0;
1833
1834 /* Call examine_prologue with - as third argument since we don't
1835 have a next frame pointer to send. */
1836 return examine_prologue (pc, pc+1024, 0, &cache);
1837 }
1838
1839
1840 /* Normal frames. */
1841
1842 static struct ia64_frame_cache *
1843 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1844 {
1845 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1846 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1847 struct ia64_frame_cache *cache;
1848 gdb_byte buf[8];
1849 CORE_ADDR cfm;
1850
1851 if (*this_cache)
1852 return (struct ia64_frame_cache *) *this_cache;
1853
1854 cache = ia64_alloc_frame_cache ();
1855 *this_cache = cache;
1856
1857 get_frame_register (this_frame, sp_regnum, buf);
1858 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1859
1860 /* We always want the bsp to point to the end of frame.
1861 This way, we can always get the beginning of frame (bof)
1862 by subtracting frame size. */
1863 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1864 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1865
1866 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1867
1868 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1869 cfm = extract_unsigned_integer (buf, 8, byte_order);
1870
1871 cache->sof = (cfm & 0x7f);
1872 cache->sol = (cfm >> 7) & 0x7f;
1873 cache->sor = ((cfm >> 14) & 0xf) * 8;
1874
1875 cache->cfm = cfm;
1876
1877 cache->pc = get_frame_func (this_frame);
1878
1879 if (cache->pc != 0)
1880 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1881
1882 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1883
1884 return cache;
1885 }
1886
1887 static void
1888 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1889 struct frame_id *this_id)
1890 {
1891 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1892 struct ia64_frame_cache *cache =
1893 ia64_frame_cache (this_frame, this_cache);
1894
1895 /* If outermost frame, mark with null frame id. */
1896 if (cache->base != 0)
1897 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1898 if (gdbarch_debug >= 1)
1899 fprintf_unfiltered (gdb_stdlog,
1900 "regular frame id: code %s, stack %s, "
1901 "special %s, this_frame %s\n",
1902 paddress (gdbarch, this_id->code_addr),
1903 paddress (gdbarch, this_id->stack_addr),
1904 paddress (gdbarch, cache->bsp),
1905 host_address_to_string (this_frame));
1906 }
1907
1908 static struct value *
1909 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1910 int regnum)
1911 {
1912 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1913 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1914 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1915 gdb_byte buf[8];
1916
1917 gdb_assert (regnum >= 0);
1918
1919 if (!target_has_registers)
1920 error (_("No registers."));
1921
1922 if (regnum == gdbarch_sp_regnum (gdbarch))
1923 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1924
1925 else if (regnum == IA64_BSP_REGNUM)
1926 {
1927 struct value *val;
1928 CORE_ADDR prev_cfm, bsp, prev_bsp;
1929
1930 /* We want to calculate the previous bsp as the end of the previous
1931 register stack frame. This corresponds to what the hardware bsp
1932 register will be if we pop the frame back which is why we might
1933 have been called. We know the beginning of the current frame is
1934 cache->bsp - cache->sof. This value in the previous frame points
1935 to the start of the output registers. We can calculate the end of
1936 that frame by adding the size of output:
1937 (sof (size of frame) - sol (size of locals)). */
1938 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1939 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1940 8, byte_order);
1941 bsp = rse_address_add (cache->bsp, -(cache->sof));
1942 prev_bsp =
1943 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1944
1945 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1946 }
1947
1948 else if (regnum == IA64_CFM_REGNUM)
1949 {
1950 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1951
1952 if (addr != 0)
1953 return frame_unwind_got_memory (this_frame, regnum, addr);
1954
1955 if (cache->prev_cfm)
1956 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1957
1958 if (cache->frameless)
1959 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1960 IA64_PFS_REGNUM);
1961 return frame_unwind_got_register (this_frame, regnum, 0);
1962 }
1963
1964 else if (regnum == IA64_VFP_REGNUM)
1965 {
1966 /* If the function in question uses an automatic register (r32-r127)
1967 for the frame pointer, it'll be found by ia64_find_saved_register()
1968 above. If the function lacks one of these frame pointers, we can
1969 still provide a value since we know the size of the frame. */
1970 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1971 }
1972
1973 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1974 {
1975 struct value *pr_val;
1976 ULONGEST prN;
1977
1978 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1979 IA64_PR_REGNUM);
1980 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1981 {
1982 /* Fetch predicate register rename base from current frame
1983 marker for this frame. */
1984 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1985
1986 /* Adjust the register number to account for register rotation. */
1987 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1988 }
1989 prN = extract_bit_field (value_contents_all (pr_val),
1990 regnum - VP0_REGNUM, 1);
1991 return frame_unwind_got_constant (this_frame, regnum, prN);
1992 }
1993
1994 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1995 {
1996 struct value *unat_val;
1997 ULONGEST unatN;
1998 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1999 IA64_UNAT_REGNUM);
2000 unatN = extract_bit_field (value_contents_all (unat_val),
2001 regnum - IA64_NAT0_REGNUM, 1);
2002 return frame_unwind_got_constant (this_frame, regnum, unatN);
2003 }
2004
2005 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2006 {
2007 int natval = 0;
2008 /* Find address of general register corresponding to nat bit we're
2009 interested in. */
2010 CORE_ADDR gr_addr;
2011
2012 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2013
2014 if (gr_addr != 0)
2015 {
2016 /* Compute address of nat collection bits. */
2017 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2018 CORE_ADDR bsp;
2019 CORE_ADDR nat_collection;
2020 int nat_bit;
2021
2022 /* If our nat collection address is bigger than bsp, we have to get
2023 the nat collection from rnat. Otherwise, we fetch the nat
2024 collection from the computed address. */
2025 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2026 bsp = extract_unsigned_integer (buf, 8, byte_order);
2027 if (nat_addr >= bsp)
2028 {
2029 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2030 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2031 }
2032 else
2033 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2034 nat_bit = (gr_addr >> 3) & 0x3f;
2035 natval = (nat_collection >> nat_bit) & 1;
2036 }
2037
2038 return frame_unwind_got_constant (this_frame, regnum, natval);
2039 }
2040
2041 else if (regnum == IA64_IP_REGNUM)
2042 {
2043 CORE_ADDR pc = 0;
2044 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2045
2046 if (addr != 0)
2047 {
2048 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2049 pc = extract_unsigned_integer (buf, 8, byte_order);
2050 }
2051 else if (cache->frameless)
2052 {
2053 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2054 pc = extract_unsigned_integer (buf, 8, byte_order);
2055 }
2056 pc &= ~0xf;
2057 return frame_unwind_got_constant (this_frame, regnum, pc);
2058 }
2059
2060 else if (regnum == IA64_PSR_REGNUM)
2061 {
2062 /* We don't know how to get the complete previous PSR, but we need it
2063 for the slot information when we unwind the pc (pc is formed of IP
2064 register plus slot information from PSR). To get the previous
2065 slot information, we mask it off the return address. */
2066 ULONGEST slot_num = 0;
2067 CORE_ADDR pc = 0;
2068 CORE_ADDR psr = 0;
2069 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2070
2071 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2072 psr = extract_unsigned_integer (buf, 8, byte_order);
2073
2074 if (addr != 0)
2075 {
2076 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2077 pc = extract_unsigned_integer (buf, 8, byte_order);
2078 }
2079 else if (cache->frameless)
2080 {
2081 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2082 pc = extract_unsigned_integer (buf, 8, byte_order);
2083 }
2084 psr &= ~(3LL << 41);
2085 slot_num = pc & 0x3LL;
2086 psr |= (CORE_ADDR)slot_num << 41;
2087 return frame_unwind_got_constant (this_frame, regnum, psr);
2088 }
2089
2090 else if (regnum == IA64_BR0_REGNUM)
2091 {
2092 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2093
2094 if (addr != 0)
2095 return frame_unwind_got_memory (this_frame, regnum, addr);
2096
2097 return frame_unwind_got_constant (this_frame, regnum, 0);
2098 }
2099
2100 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2101 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2102 {
2103 CORE_ADDR addr = 0;
2104
2105 if (regnum >= V32_REGNUM)
2106 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2107 addr = cache->saved_regs[regnum];
2108 if (addr != 0)
2109 return frame_unwind_got_memory (this_frame, regnum, addr);
2110
2111 if (cache->frameless)
2112 {
2113 struct value *reg_val;
2114 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2115
2116 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2117 with the same code above? */
2118 if (regnum >= V32_REGNUM)
2119 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2120 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2121 IA64_CFM_REGNUM);
2122 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2123 8, byte_order);
2124 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2125 IA64_BSP_REGNUM);
2126 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2127 8, byte_order);
2128 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2129
2130 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2131 return frame_unwind_got_memory (this_frame, regnum, addr);
2132 }
2133
2134 return frame_unwind_got_constant (this_frame, regnum, 0);
2135 }
2136
2137 else /* All other registers. */
2138 {
2139 CORE_ADDR addr = 0;
2140
2141 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2142 {
2143 /* Fetch floating point register rename base from current
2144 frame marker for this frame. */
2145 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2146
2147 /* Adjust the floating point register number to account for
2148 register rotation. */
2149 regnum = IA64_FR32_REGNUM
2150 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2151 }
2152
2153 /* If we have stored a memory address, access the register. */
2154 addr = cache->saved_regs[regnum];
2155 if (addr != 0)
2156 return frame_unwind_got_memory (this_frame, regnum, addr);
2157 /* Otherwise, punt and get the current value of the register. */
2158 else
2159 return frame_unwind_got_register (this_frame, regnum, regnum);
2160 }
2161 }
2162
2163 static const struct frame_unwind ia64_frame_unwind =
2164 {
2165 NORMAL_FRAME,
2166 default_frame_unwind_stop_reason,
2167 &ia64_frame_this_id,
2168 &ia64_frame_prev_register,
2169 NULL,
2170 default_frame_sniffer
2171 };
2172
2173 /* Signal trampolines. */
2174
2175 static void
2176 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2177 struct ia64_frame_cache *cache)
2178 {
2179 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2181
2182 if (tdep->sigcontext_register_address)
2183 {
2184 int regno;
2185
2186 cache->saved_regs[IA64_VRAP_REGNUM]
2187 = tdep->sigcontext_register_address (gdbarch, cache->base,
2188 IA64_IP_REGNUM);
2189 cache->saved_regs[IA64_CFM_REGNUM]
2190 = tdep->sigcontext_register_address (gdbarch, cache->base,
2191 IA64_CFM_REGNUM);
2192 cache->saved_regs[IA64_PSR_REGNUM]
2193 = tdep->sigcontext_register_address (gdbarch, cache->base,
2194 IA64_PSR_REGNUM);
2195 cache->saved_regs[IA64_BSP_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_BSP_REGNUM);
2198 cache->saved_regs[IA64_RNAT_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_RNAT_REGNUM);
2201 cache->saved_regs[IA64_CCV_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_CCV_REGNUM);
2204 cache->saved_regs[IA64_UNAT_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_UNAT_REGNUM);
2207 cache->saved_regs[IA64_FPSR_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_FPSR_REGNUM);
2210 cache->saved_regs[IA64_PFS_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_PFS_REGNUM);
2213 cache->saved_regs[IA64_LC_REGNUM]
2214 = tdep->sigcontext_register_address (gdbarch, cache->base,
2215 IA64_LC_REGNUM);
2216
2217 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2218 cache->saved_regs[regno] =
2219 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2220 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2221 cache->saved_regs[regno] =
2222 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2223 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2224 cache->saved_regs[regno] =
2225 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2226 }
2227 }
2228
2229 static struct ia64_frame_cache *
2230 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2231 {
2232 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2233 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2234 struct ia64_frame_cache *cache;
2235 gdb_byte buf[8];
2236
2237 if (*this_cache)
2238 return (struct ia64_frame_cache *) *this_cache;
2239
2240 cache = ia64_alloc_frame_cache ();
2241
2242 get_frame_register (this_frame, sp_regnum, buf);
2243 /* Note that frame size is hard-coded below. We cannot calculate it
2244 via prologue examination. */
2245 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2246
2247 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2248 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2249
2250 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2251 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2252 cache->sof = cache->cfm & 0x7f;
2253
2254 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2255
2256 *this_cache = cache;
2257 return cache;
2258 }
2259
2260 static void
2261 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2262 void **this_cache, struct frame_id *this_id)
2263 {
2264 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2265 struct ia64_frame_cache *cache =
2266 ia64_sigtramp_frame_cache (this_frame, this_cache);
2267
2268 (*this_id) = frame_id_build_special (cache->base,
2269 get_frame_pc (this_frame),
2270 cache->bsp);
2271 if (gdbarch_debug >= 1)
2272 fprintf_unfiltered (gdb_stdlog,
2273 "sigtramp frame id: code %s, stack %s, "
2274 "special %s, this_frame %s\n",
2275 paddress (gdbarch, this_id->code_addr),
2276 paddress (gdbarch, this_id->stack_addr),
2277 paddress (gdbarch, cache->bsp),
2278 host_address_to_string (this_frame));
2279 }
2280
2281 static struct value *
2282 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2283 void **this_cache, int regnum)
2284 {
2285 struct ia64_frame_cache *cache =
2286 ia64_sigtramp_frame_cache (this_frame, this_cache);
2287
2288 gdb_assert (regnum >= 0);
2289
2290 if (!target_has_registers)
2291 error (_("No registers."));
2292
2293 if (regnum == IA64_IP_REGNUM)
2294 {
2295 CORE_ADDR pc = 0;
2296 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2297
2298 if (addr != 0)
2299 {
2300 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2302 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2303 }
2304 pc &= ~0xf;
2305 return frame_unwind_got_constant (this_frame, regnum, pc);
2306 }
2307
2308 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2309 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2310 {
2311 CORE_ADDR addr = 0;
2312
2313 if (regnum >= V32_REGNUM)
2314 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2315 addr = cache->saved_regs[regnum];
2316 if (addr != 0)
2317 return frame_unwind_got_memory (this_frame, regnum, addr);
2318
2319 return frame_unwind_got_constant (this_frame, regnum, 0);
2320 }
2321
2322 else /* All other registers not listed above. */
2323 {
2324 CORE_ADDR addr = cache->saved_regs[regnum];
2325
2326 if (addr != 0)
2327 return frame_unwind_got_memory (this_frame, regnum, addr);
2328
2329 return frame_unwind_got_constant (this_frame, regnum, 0);
2330 }
2331 }
2332
2333 static int
2334 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2335 struct frame_info *this_frame,
2336 void **this_cache)
2337 {
2338 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2339 if (tdep->pc_in_sigtramp)
2340 {
2341 CORE_ADDR pc = get_frame_pc (this_frame);
2342
2343 if (tdep->pc_in_sigtramp (pc))
2344 return 1;
2345 }
2346
2347 return 0;
2348 }
2349
2350 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2351 {
2352 SIGTRAMP_FRAME,
2353 default_frame_unwind_stop_reason,
2354 ia64_sigtramp_frame_this_id,
2355 ia64_sigtramp_frame_prev_register,
2356 NULL,
2357 ia64_sigtramp_frame_sniffer
2358 };
2359
2360 \f
2361
2362 static CORE_ADDR
2363 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2364 {
2365 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2366
2367 return cache->base;
2368 }
2369
2370 static const struct frame_base ia64_frame_base =
2371 {
2372 &ia64_frame_unwind,
2373 ia64_frame_base_address,
2374 ia64_frame_base_address,
2375 ia64_frame_base_address
2376 };
2377
2378 #ifdef HAVE_LIBUNWIND_IA64_H
2379
2380 struct ia64_unwind_table_entry
2381 {
2382 unw_word_t start_offset;
2383 unw_word_t end_offset;
2384 unw_word_t info_offset;
2385 };
2386
2387 static __inline__ uint64_t
2388 ia64_rse_slot_num (uint64_t addr)
2389 {
2390 return (addr >> 3) & 0x3f;
2391 }
2392
2393 /* Skip over a designated number of registers in the backing
2394 store, remembering every 64th position is for NAT. */
2395 static __inline__ uint64_t
2396 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2397 {
2398 long delta = ia64_rse_slot_num(addr) + num_regs;
2399
2400 if (num_regs < 0)
2401 delta -= 0x3e;
2402 return addr + ((num_regs + delta/0x3f) << 3);
2403 }
2404
2405 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2406 register number to a libunwind register number. */
2407 static int
2408 ia64_gdb2uw_regnum (int regnum)
2409 {
2410 if (regnum == sp_regnum)
2411 return UNW_IA64_SP;
2412 else if (regnum == IA64_BSP_REGNUM)
2413 return UNW_IA64_BSP;
2414 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2415 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2416 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2417 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2418 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2419 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2420 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2421 return -1;
2422 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2423 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2424 else if (regnum == IA64_PR_REGNUM)
2425 return UNW_IA64_PR;
2426 else if (regnum == IA64_IP_REGNUM)
2427 return UNW_REG_IP;
2428 else if (regnum == IA64_CFM_REGNUM)
2429 return UNW_IA64_CFM;
2430 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2431 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2432 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2433 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2434 else
2435 return -1;
2436 }
2437
2438 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2439 register number to a ia64 gdb register number. */
2440 static int
2441 ia64_uw2gdb_regnum (int uw_regnum)
2442 {
2443 if (uw_regnum == UNW_IA64_SP)
2444 return sp_regnum;
2445 else if (uw_regnum == UNW_IA64_BSP)
2446 return IA64_BSP_REGNUM;
2447 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2448 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2449 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2450 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2451 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2452 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2453 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2454 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2455 else if (uw_regnum == UNW_IA64_PR)
2456 return IA64_PR_REGNUM;
2457 else if (uw_regnum == UNW_REG_IP)
2458 return IA64_IP_REGNUM;
2459 else if (uw_regnum == UNW_IA64_CFM)
2460 return IA64_CFM_REGNUM;
2461 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2462 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2463 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2464 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2465 else
2466 return -1;
2467 }
2468
2469 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2470 a float register or not. */
2471 static int
2472 ia64_is_fpreg (int uw_regnum)
2473 {
2474 return unw_is_fpreg (uw_regnum);
2475 }
2476
2477 /* Libunwind callback accessor function for general registers. */
2478 static int
2479 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2480 int write, void *arg)
2481 {
2482 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2483 unw_word_t bsp, sof, cfm, psr, ip;
2484 struct frame_info *this_frame = (struct frame_info *) arg;
2485 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2486 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2487 long new_sof, old_sof;
2488
2489 /* We never call any libunwind routines that need to write registers. */
2490 gdb_assert (!write);
2491
2492 switch (uw_regnum)
2493 {
2494 case UNW_REG_IP:
2495 /* Libunwind expects to see the pc value which means the slot number
2496 from the psr must be merged with the ip word address. */
2497 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2498 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2499 *val = ip | ((psr >> 41) & 0x3);
2500 break;
2501
2502 case UNW_IA64_AR_BSP:
2503 /* Libunwind expects to see the beginning of the current
2504 register frame so we must account for the fact that
2505 ptrace() will return a value for bsp that points *after*
2506 the current register frame. */
2507 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2508 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2509 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2510 *val = ia64_rse_skip_regs (bsp, -sof);
2511 break;
2512
2513 case UNW_IA64_AR_BSPSTORE:
2514 /* Libunwind wants bspstore to be after the current register frame.
2515 This is what ptrace() and gdb treats as the regular bsp value. */
2516 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2517 break;
2518
2519 default:
2520 /* For all other registers, just unwind the value directly. */
2521 *val = get_frame_register_unsigned (this_frame, regnum);
2522 break;
2523 }
2524
2525 if (gdbarch_debug >= 1)
2526 fprintf_unfiltered (gdb_stdlog,
2527 " access_reg: from cache: %4s=%s\n",
2528 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2529 ? ia64_register_names[regnum] : "r??"),
2530 paddress (gdbarch, *val));
2531 return 0;
2532 }
2533
2534 /* Libunwind callback accessor function for floating-point registers. */
2535 static int
2536 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2537 unw_fpreg_t *val, int write, void *arg)
2538 {
2539 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2540 struct frame_info *this_frame = (struct frame_info *) arg;
2541
2542 /* We never call any libunwind routines that need to write registers. */
2543 gdb_assert (!write);
2544
2545 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2546
2547 return 0;
2548 }
2549
2550 /* Libunwind callback accessor function for top-level rse registers. */
2551 static int
2552 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2553 unw_word_t *val, int write, void *arg)
2554 {
2555 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2556 unw_word_t bsp, sof, cfm, psr, ip;
2557 struct regcache *regcache = (struct regcache *) arg;
2558 struct gdbarch *gdbarch = regcache->arch ();
2559 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2560 long new_sof, old_sof;
2561
2562 /* We never call any libunwind routines that need to write registers. */
2563 gdb_assert (!write);
2564
2565 switch (uw_regnum)
2566 {
2567 case UNW_REG_IP:
2568 /* Libunwind expects to see the pc value which means the slot number
2569 from the psr must be merged with the ip word address. */
2570 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2571 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2572 *val = ip | ((psr >> 41) & 0x3);
2573 break;
2574
2575 case UNW_IA64_AR_BSP:
2576 /* Libunwind expects to see the beginning of the current
2577 register frame so we must account for the fact that
2578 ptrace() will return a value for bsp that points *after*
2579 the current register frame. */
2580 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2581 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2582 sof = (cfm & 0x7f);
2583 *val = ia64_rse_skip_regs (bsp, -sof);
2584 break;
2585
2586 case UNW_IA64_AR_BSPSTORE:
2587 /* Libunwind wants bspstore to be after the current register frame.
2588 This is what ptrace() and gdb treats as the regular bsp value. */
2589 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2590 break;
2591
2592 default:
2593 /* For all other registers, just unwind the value directly. */
2594 regcache_cooked_read_unsigned (regcache, regnum, val);
2595 break;
2596 }
2597
2598 if (gdbarch_debug >= 1)
2599 fprintf_unfiltered (gdb_stdlog,
2600 " access_rse_reg: from cache: %4s=%s\n",
2601 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2602 ? ia64_register_names[regnum] : "r??"),
2603 paddress (gdbarch, *val));
2604
2605 return 0;
2606 }
2607
2608 /* Libunwind callback accessor function for top-level fp registers. */
2609 static int
2610 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2611 unw_fpreg_t *val, int write, void *arg)
2612 {
2613 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2614 struct regcache *regcache = (struct regcache *) arg;
2615
2616 /* We never call any libunwind routines that need to write registers. */
2617 gdb_assert (!write);
2618
2619 regcache_cooked_read (regcache, regnum, (gdb_byte *) val);
2620
2621 return 0;
2622 }
2623
2624 /* Libunwind callback accessor function for accessing memory. */
2625 static int
2626 ia64_access_mem (unw_addr_space_t as,
2627 unw_word_t addr, unw_word_t *val,
2628 int write, void *arg)
2629 {
2630 if (addr - KERNEL_START < ktab_size)
2631 {
2632 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2633 + (addr - KERNEL_START));
2634
2635 if (write)
2636 *laddr = *val;
2637 else
2638 *val = *laddr;
2639 return 0;
2640 }
2641
2642 /* XXX do we need to normalize byte-order here? */
2643 if (write)
2644 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2645 else
2646 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2647 }
2648
2649 /* Call low-level function to access the kernel unwind table. */
2650 static LONGEST
2651 getunwind_table (gdb_byte **buf_p)
2652 {
2653 LONGEST x;
2654
2655 /* FIXME drow/2005-09-10: This code used to call
2656 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2657 for the currently running ia64-linux kernel. That data should
2658 come from the core file and be accessed via the auxv vector; if
2659 we want to preserve fall back to the running kernel's table, then
2660 we should find a way to override the corefile layer's
2661 xfer_partial method. */
2662
2663 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2664 NULL, buf_p);
2665
2666 return x;
2667 }
2668
2669 /* Get the kernel unwind table. */
2670 static int
2671 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2672 {
2673 static struct ia64_table_entry *etab;
2674
2675 if (!ktab)
2676 {
2677 gdb_byte *ktab_buf;
2678 LONGEST size;
2679
2680 size = getunwind_table (&ktab_buf);
2681 if (size <= 0)
2682 return -UNW_ENOINFO;
2683
2684 ktab = (struct ia64_table_entry *) ktab_buf;
2685 ktab_size = size;
2686
2687 for (etab = ktab; etab->start_offset; ++etab)
2688 etab->info_offset += KERNEL_START;
2689 }
2690
2691 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2692 return -UNW_ENOINFO;
2693
2694 di->format = UNW_INFO_FORMAT_TABLE;
2695 di->gp = 0;
2696 di->start_ip = ktab[0].start_offset;
2697 di->end_ip = etab[-1].end_offset;
2698 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2699 di->u.ti.segbase = 0;
2700 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2701 di->u.ti.table_data = (unw_word_t *) ktab;
2702
2703 if (gdbarch_debug >= 1)
2704 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2705 "segbase=%s, length=%s, gp=%s\n",
2706 (char *) di->u.ti.name_ptr,
2707 hex_string (di->u.ti.segbase),
2708 pulongest (di->u.ti.table_len),
2709 hex_string (di->gp));
2710 return 0;
2711 }
2712
2713 /* Find the unwind table entry for a specified address. */
2714 static int
2715 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2716 unw_dyn_info_t *dip, void **buf)
2717 {
2718 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2719 Elf_Internal_Ehdr *ehdr;
2720 unw_word_t segbase = 0;
2721 CORE_ADDR load_base;
2722 bfd *bfd;
2723 int i;
2724
2725 bfd = objfile->obfd;
2726
2727 ehdr = elf_tdata (bfd)->elf_header;
2728 phdr = elf_tdata (bfd)->phdr;
2729
2730 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2731
2732 for (i = 0; i < ehdr->e_phnum; ++i)
2733 {
2734 switch (phdr[i].p_type)
2735 {
2736 case PT_LOAD:
2737 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2738 < phdr[i].p_memsz)
2739 p_text = phdr + i;
2740 break;
2741
2742 case PT_IA_64_UNWIND:
2743 p_unwind = phdr + i;
2744 break;
2745
2746 default:
2747 break;
2748 }
2749 }
2750
2751 if (!p_text || !p_unwind)
2752 return -UNW_ENOINFO;
2753
2754 /* Verify that the segment that contains the IP also contains
2755 the static unwind table. If not, we may be in the Linux kernel's
2756 DSO gate page in which case the unwind table is another segment.
2757 Otherwise, we are dealing with runtime-generated code, for which we
2758 have no info here. */
2759 segbase = p_text->p_vaddr + load_base;
2760
2761 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2762 {
2763 int ok = 0;
2764 for (i = 0; i < ehdr->e_phnum; ++i)
2765 {
2766 if (phdr[i].p_type == PT_LOAD
2767 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2768 {
2769 ok = 1;
2770 /* Get the segbase from the section containing the
2771 libunwind table. */
2772 segbase = phdr[i].p_vaddr + load_base;
2773 }
2774 }
2775 if (!ok)
2776 return -UNW_ENOINFO;
2777 }
2778
2779 dip->start_ip = p_text->p_vaddr + load_base;
2780 dip->end_ip = dip->start_ip + p_text->p_memsz;
2781 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2782 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2783 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2784 dip->u.rti.segbase = segbase;
2785 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2786 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2787
2788 return 0;
2789 }
2790
2791 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2792 static int
2793 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2794 int need_unwind_info, void *arg)
2795 {
2796 struct obj_section *sec = find_pc_section (ip);
2797 unw_dyn_info_t di;
2798 int ret;
2799 void *buf = NULL;
2800
2801 if (!sec)
2802 {
2803 /* XXX This only works if the host and the target architecture are
2804 both ia64 and if the have (more or less) the same kernel
2805 version. */
2806 if (get_kernel_table (ip, &di) < 0)
2807 return -UNW_ENOINFO;
2808
2809 if (gdbarch_debug >= 1)
2810 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2811 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2812 "length=%s,data=%s)\n",
2813 hex_string (ip), (char *)di.u.ti.name_ptr,
2814 hex_string (di.u.ti.segbase),
2815 hex_string (di.start_ip), hex_string (di.end_ip),
2816 hex_string (di.gp),
2817 pulongest (di.u.ti.table_len),
2818 hex_string ((CORE_ADDR)di.u.ti.table_data));
2819 }
2820 else
2821 {
2822 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2823 if (ret < 0)
2824 return ret;
2825
2826 if (gdbarch_debug >= 1)
2827 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2828 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2829 "length=%s,data=%s)\n",
2830 hex_string (ip), (char *)di.u.rti.name_ptr,
2831 hex_string (di.u.rti.segbase),
2832 hex_string (di.start_ip), hex_string (di.end_ip),
2833 hex_string (di.gp),
2834 pulongest (di.u.rti.table_len),
2835 hex_string (di.u.rti.table_data));
2836 }
2837
2838 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2839 arg);
2840
2841 /* We no longer need the dyn info storage so free it. */
2842 xfree (buf);
2843
2844 return ret;
2845 }
2846
2847 /* Libunwind callback accessor function for cleanup. */
2848 static void
2849 ia64_put_unwind_info (unw_addr_space_t as,
2850 unw_proc_info_t *pip, void *arg)
2851 {
2852 /* Nothing required for now. */
2853 }
2854
2855 /* Libunwind callback accessor function to get head of the dynamic
2856 unwind-info registration list. */
2857 static int
2858 ia64_get_dyn_info_list (unw_addr_space_t as,
2859 unw_word_t *dilap, void *arg)
2860 {
2861 struct obj_section *text_sec;
2862 struct objfile *objfile;
2863 unw_word_t ip, addr;
2864 unw_dyn_info_t di;
2865 int ret;
2866
2867 if (!libunwind_is_initialized ())
2868 return -UNW_ENOINFO;
2869
2870 for (objfile = object_files; objfile; objfile = objfile->next)
2871 {
2872 void *buf = NULL;
2873
2874 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2875 ip = obj_section_addr (text_sec);
2876 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2877 if (ret >= 0)
2878 {
2879 addr = libunwind_find_dyn_list (as, &di, arg);
2880 /* We no longer need the dyn info storage so free it. */
2881 xfree (buf);
2882
2883 if (addr)
2884 {
2885 if (gdbarch_debug >= 1)
2886 fprintf_unfiltered (gdb_stdlog,
2887 "dynamic unwind table in objfile %s "
2888 "at %s (gp=%s)\n",
2889 bfd_get_filename (objfile->obfd),
2890 hex_string (addr), hex_string (di.gp));
2891 *dilap = addr;
2892 return 0;
2893 }
2894 }
2895 }
2896 return -UNW_ENOINFO;
2897 }
2898
2899
2900 /* Frame interface functions for libunwind. */
2901
2902 static void
2903 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2904 struct frame_id *this_id)
2905 {
2906 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2907 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2908 struct frame_id id = outer_frame_id;
2909 gdb_byte buf[8];
2910 CORE_ADDR bsp;
2911
2912 libunwind_frame_this_id (this_frame, this_cache, &id);
2913 if (frame_id_eq (id, outer_frame_id))
2914 {
2915 (*this_id) = outer_frame_id;
2916 return;
2917 }
2918
2919 /* We must add the bsp as the special address for frame comparison
2920 purposes. */
2921 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2922 bsp = extract_unsigned_integer (buf, 8, byte_order);
2923
2924 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2925
2926 if (gdbarch_debug >= 1)
2927 fprintf_unfiltered (gdb_stdlog,
2928 "libunwind frame id: code %s, stack %s, "
2929 "special %s, this_frame %s\n",
2930 paddress (gdbarch, id.code_addr),
2931 paddress (gdbarch, id.stack_addr),
2932 paddress (gdbarch, bsp),
2933 host_address_to_string (this_frame));
2934 }
2935
2936 static struct value *
2937 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2938 void **this_cache, int regnum)
2939 {
2940 int reg = regnum;
2941 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2942 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2943 struct value *val;
2944
2945 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2946 reg = IA64_PR_REGNUM;
2947 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2948 reg = IA64_UNAT_REGNUM;
2949
2950 /* Let libunwind do most of the work. */
2951 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2952
2953 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2954 {
2955 ULONGEST prN_val;
2956
2957 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2958 {
2959 int rrb_pr = 0;
2960 ULONGEST cfm;
2961
2962 /* Fetch predicate register rename base from current frame
2963 marker for this frame. */
2964 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2965 rrb_pr = (cfm >> 32) & 0x3f;
2966
2967 /* Adjust the register number to account for register rotation. */
2968 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2969 }
2970 prN_val = extract_bit_field (value_contents_all (val),
2971 regnum - VP0_REGNUM, 1);
2972 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2973 }
2974
2975 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2976 {
2977 ULONGEST unatN_val;
2978
2979 unatN_val = extract_bit_field (value_contents_all (val),
2980 regnum - IA64_NAT0_REGNUM, 1);
2981 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2982 }
2983
2984 else if (regnum == IA64_BSP_REGNUM)
2985 {
2986 struct value *cfm_val;
2987 CORE_ADDR prev_bsp, prev_cfm;
2988
2989 /* We want to calculate the previous bsp as the end of the previous
2990 register stack frame. This corresponds to what the hardware bsp
2991 register will be if we pop the frame back which is why we might
2992 have been called. We know that libunwind will pass us back the
2993 beginning of the current frame so we should just add sof to it. */
2994 prev_bsp = extract_unsigned_integer (value_contents_all (val),
2995 8, byte_order);
2996 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2997 IA64_CFM_REGNUM);
2998 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
2999 8, byte_order);
3000 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
3001
3002 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3003 }
3004 else
3005 return val;
3006 }
3007
3008 static int
3009 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3010 struct frame_info *this_frame,
3011 void **this_cache)
3012 {
3013 if (libunwind_is_initialized ()
3014 && libunwind_frame_sniffer (self, this_frame, this_cache))
3015 return 1;
3016
3017 return 0;
3018 }
3019
3020 static const struct frame_unwind ia64_libunwind_frame_unwind =
3021 {
3022 NORMAL_FRAME,
3023 default_frame_unwind_stop_reason,
3024 ia64_libunwind_frame_this_id,
3025 ia64_libunwind_frame_prev_register,
3026 NULL,
3027 ia64_libunwind_frame_sniffer,
3028 libunwind_frame_dealloc_cache
3029 };
3030
3031 static void
3032 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3033 void **this_cache,
3034 struct frame_id *this_id)
3035 {
3036 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3037 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3038 gdb_byte buf[8];
3039 CORE_ADDR bsp;
3040 struct frame_id id = outer_frame_id;
3041 CORE_ADDR prev_ip;
3042
3043 libunwind_frame_this_id (this_frame, this_cache, &id);
3044 if (frame_id_eq (id, outer_frame_id))
3045 {
3046 (*this_id) = outer_frame_id;
3047 return;
3048 }
3049
3050 /* We must add the bsp as the special address for frame comparison
3051 purposes. */
3052 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3053 bsp = extract_unsigned_integer (buf, 8, byte_order);
3054
3055 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3056 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3057
3058 if (gdbarch_debug >= 1)
3059 fprintf_unfiltered (gdb_stdlog,
3060 "libunwind sigtramp frame id: code %s, "
3061 "stack %s, special %s, this_frame %s\n",
3062 paddress (gdbarch, id.code_addr),
3063 paddress (gdbarch, id.stack_addr),
3064 paddress (gdbarch, bsp),
3065 host_address_to_string (this_frame));
3066 }
3067
3068 static struct value *
3069 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3070 void **this_cache, int regnum)
3071 {
3072 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3073 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3074 struct value *prev_ip_val;
3075 CORE_ADDR prev_ip;
3076
3077 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3078 method of getting previous registers. */
3079 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3080 IA64_IP_REGNUM);
3081 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3082 8, byte_order);
3083
3084 if (prev_ip == 0)
3085 {
3086 void *tmp_cache = NULL;
3087 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3088 regnum);
3089 }
3090 else
3091 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3092 }
3093
3094 static int
3095 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3096 struct frame_info *this_frame,
3097 void **this_cache)
3098 {
3099 if (libunwind_is_initialized ())
3100 {
3101 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3102 return 1;
3103 return 0;
3104 }
3105 else
3106 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3107 }
3108
3109 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3110 {
3111 SIGTRAMP_FRAME,
3112 default_frame_unwind_stop_reason,
3113 ia64_libunwind_sigtramp_frame_this_id,
3114 ia64_libunwind_sigtramp_frame_prev_register,
3115 NULL,
3116 ia64_libunwind_sigtramp_frame_sniffer
3117 };
3118
3119 /* Set of libunwind callback acccessor functions. */
3120 unw_accessors_t ia64_unw_accessors =
3121 {
3122 ia64_find_proc_info_x,
3123 ia64_put_unwind_info,
3124 ia64_get_dyn_info_list,
3125 ia64_access_mem,
3126 ia64_access_reg,
3127 ia64_access_fpreg,
3128 /* resume */
3129 /* get_proc_name */
3130 };
3131
3132 /* Set of special libunwind callback acccessor functions specific for accessing
3133 the rse registers. At the top of the stack, we want libunwind to figure out
3134 how to read r32 - r127. Though usually they are found sequentially in
3135 memory starting from $bof, this is not always true. */
3136 unw_accessors_t ia64_unw_rse_accessors =
3137 {
3138 ia64_find_proc_info_x,
3139 ia64_put_unwind_info,
3140 ia64_get_dyn_info_list,
3141 ia64_access_mem,
3142 ia64_access_rse_reg,
3143 ia64_access_rse_fpreg,
3144 /* resume */
3145 /* get_proc_name */
3146 };
3147
3148 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3149 ia64-libunwind-tdep code to use. */
3150 struct libunwind_descr ia64_libunwind_descr =
3151 {
3152 ia64_gdb2uw_regnum,
3153 ia64_uw2gdb_regnum,
3154 ia64_is_fpreg,
3155 &ia64_unw_accessors,
3156 &ia64_unw_rse_accessors,
3157 };
3158
3159 #endif /* HAVE_LIBUNWIND_IA64_H */
3160
3161 static int
3162 ia64_use_struct_convention (struct type *type)
3163 {
3164 struct type *float_elt_type;
3165
3166 /* Don't use the struct convention for anything but structure,
3167 union, or array types. */
3168 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3169 || TYPE_CODE (type) == TYPE_CODE_UNION
3170 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3171 return 0;
3172
3173 /* HFAs are structures (or arrays) consisting entirely of floating
3174 point values of the same length. Up to 8 of these are returned
3175 in registers. Don't use the struct convention when this is the
3176 case. */
3177 float_elt_type = is_float_or_hfa_type (type);
3178 if (float_elt_type != NULL
3179 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3180 return 0;
3181
3182 /* Other structs of length 32 or less are returned in r8-r11.
3183 Don't use the struct convention for those either. */
3184 return TYPE_LENGTH (type) > 32;
3185 }
3186
3187 /* Return non-zero if TYPE is a structure or union type. */
3188
3189 static int
3190 ia64_struct_type_p (const struct type *type)
3191 {
3192 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3193 || TYPE_CODE (type) == TYPE_CODE_UNION);
3194 }
3195
3196 static void
3197 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3198 gdb_byte *valbuf)
3199 {
3200 struct gdbarch *gdbarch = regcache->arch ();
3201 struct type *float_elt_type;
3202
3203 float_elt_type = is_float_or_hfa_type (type);
3204 if (float_elt_type != NULL)
3205 {
3206 gdb_byte from[IA64_FP_REGISTER_SIZE];
3207 int offset = 0;
3208 int regnum = IA64_FR8_REGNUM;
3209 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3210
3211 while (n-- > 0)
3212 {
3213 regcache_cooked_read (regcache, regnum, from);
3214 target_float_convert (from, ia64_ext_type (gdbarch),
3215 valbuf + offset, float_elt_type);
3216 offset += TYPE_LENGTH (float_elt_type);
3217 regnum++;
3218 }
3219 }
3220 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3221 {
3222 /* This is an integral value, and its size is less than 8 bytes.
3223 These values are LSB-aligned, so extract the relevant bytes,
3224 and copy them into VALBUF. */
3225 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3226 so I suppose we should also add handling here for integral values
3227 whose size is greater than 8. But I wasn't able to create such
3228 a type, neither in C nor in Ada, so not worrying about these yet. */
3229 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3230 ULONGEST val;
3231
3232 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3233 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3234 }
3235 else
3236 {
3237 ULONGEST val;
3238 int offset = 0;
3239 int regnum = IA64_GR8_REGNUM;
3240 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3241 int n = TYPE_LENGTH (type) / reglen;
3242 int m = TYPE_LENGTH (type) % reglen;
3243
3244 while (n-- > 0)
3245 {
3246 ULONGEST val;
3247 regcache_cooked_read_unsigned (regcache, regnum, &val);
3248 memcpy ((char *)valbuf + offset, &val, reglen);
3249 offset += reglen;
3250 regnum++;
3251 }
3252
3253 if (m)
3254 {
3255 regcache_cooked_read_unsigned (regcache, regnum, &val);
3256 memcpy ((char *)valbuf + offset, &val, m);
3257 }
3258 }
3259 }
3260
3261 static void
3262 ia64_store_return_value (struct type *type, struct regcache *regcache,
3263 const gdb_byte *valbuf)
3264 {
3265 struct gdbarch *gdbarch = regcache->arch ();
3266 struct type *float_elt_type;
3267
3268 float_elt_type = is_float_or_hfa_type (type);
3269 if (float_elt_type != NULL)
3270 {
3271 gdb_byte to[IA64_FP_REGISTER_SIZE];
3272 int offset = 0;
3273 int regnum = IA64_FR8_REGNUM;
3274 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3275
3276 while (n-- > 0)
3277 {
3278 target_float_convert (valbuf + offset, float_elt_type,
3279 to, ia64_ext_type (gdbarch));
3280 regcache_cooked_write (regcache, regnum, to);
3281 offset += TYPE_LENGTH (float_elt_type);
3282 regnum++;
3283 }
3284 }
3285 else
3286 {
3287 ULONGEST val;
3288 int offset = 0;
3289 int regnum = IA64_GR8_REGNUM;
3290 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3291 int n = TYPE_LENGTH (type) / reglen;
3292 int m = TYPE_LENGTH (type) % reglen;
3293
3294 while (n-- > 0)
3295 {
3296 ULONGEST val;
3297 memcpy (&val, (char *)valbuf + offset, reglen);
3298 regcache_cooked_write_unsigned (regcache, regnum, val);
3299 offset += reglen;
3300 regnum++;
3301 }
3302
3303 if (m)
3304 {
3305 memcpy (&val, (char *)valbuf + offset, m);
3306 regcache_cooked_write_unsigned (regcache, regnum, val);
3307 }
3308 }
3309 }
3310
3311 static enum return_value_convention
3312 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3313 struct type *valtype, struct regcache *regcache,
3314 gdb_byte *readbuf, const gdb_byte *writebuf)
3315 {
3316 int struct_return = ia64_use_struct_convention (valtype);
3317
3318 if (writebuf != NULL)
3319 {
3320 gdb_assert (!struct_return);
3321 ia64_store_return_value (valtype, regcache, writebuf);
3322 }
3323
3324 if (readbuf != NULL)
3325 {
3326 gdb_assert (!struct_return);
3327 ia64_extract_return_value (valtype, regcache, readbuf);
3328 }
3329
3330 if (struct_return)
3331 return RETURN_VALUE_STRUCT_CONVENTION;
3332 else
3333 return RETURN_VALUE_REGISTER_CONVENTION;
3334 }
3335
3336 static int
3337 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3338 {
3339 switch (TYPE_CODE (t))
3340 {
3341 case TYPE_CODE_FLT:
3342 if (*etp)
3343 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3344 else
3345 {
3346 *etp = t;
3347 return 1;
3348 }
3349 break;
3350 case TYPE_CODE_ARRAY:
3351 return
3352 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3353 etp);
3354 break;
3355 case TYPE_CODE_STRUCT:
3356 {
3357 int i;
3358
3359 for (i = 0; i < TYPE_NFIELDS (t); i++)
3360 if (!is_float_or_hfa_type_recurse
3361 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3362 return 0;
3363 return 1;
3364 }
3365 break;
3366 default:
3367 return 0;
3368 break;
3369 }
3370 }
3371
3372 /* Determine if the given type is one of the floating point types or
3373 and HFA (which is a struct, array, or combination thereof whose
3374 bottom-most elements are all of the same floating point type). */
3375
3376 static struct type *
3377 is_float_or_hfa_type (struct type *t)
3378 {
3379 struct type *et = 0;
3380
3381 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3382 }
3383
3384
3385 /* Return 1 if the alignment of T is such that the next even slot
3386 should be used. Return 0, if the next available slot should
3387 be used. (See section 8.5.1 of the IA-64 Software Conventions
3388 and Runtime manual). */
3389
3390 static int
3391 slot_alignment_is_next_even (struct type *t)
3392 {
3393 switch (TYPE_CODE (t))
3394 {
3395 case TYPE_CODE_INT:
3396 case TYPE_CODE_FLT:
3397 if (TYPE_LENGTH (t) > 8)
3398 return 1;
3399 else
3400 return 0;
3401 case TYPE_CODE_ARRAY:
3402 return
3403 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3404 case TYPE_CODE_STRUCT:
3405 {
3406 int i;
3407
3408 for (i = 0; i < TYPE_NFIELDS (t); i++)
3409 if (slot_alignment_is_next_even
3410 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3411 return 1;
3412 return 0;
3413 }
3414 default:
3415 return 0;
3416 }
3417 }
3418
3419 /* Attempt to find (and return) the global pointer for the given
3420 function.
3421
3422 This is a rather nasty bit of code searchs for the .dynamic section
3423 in the objfile corresponding to the pc of the function we're trying
3424 to call. Once it finds the addresses at which the .dynamic section
3425 lives in the child process, it scans the Elf64_Dyn entries for a
3426 DT_PLTGOT tag. If it finds one of these, the corresponding
3427 d_un.d_ptr value is the global pointer. */
3428
3429 static CORE_ADDR
3430 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3431 CORE_ADDR faddr)
3432 {
3433 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3434 struct obj_section *faddr_sect;
3435
3436 faddr_sect = find_pc_section (faddr);
3437 if (faddr_sect != NULL)
3438 {
3439 struct obj_section *osect;
3440
3441 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3442 {
3443 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3444 break;
3445 }
3446
3447 if (osect < faddr_sect->objfile->sections_end)
3448 {
3449 CORE_ADDR addr, endaddr;
3450
3451 addr = obj_section_addr (osect);
3452 endaddr = obj_section_endaddr (osect);
3453
3454 while (addr < endaddr)
3455 {
3456 int status;
3457 LONGEST tag;
3458 gdb_byte buf[8];
3459
3460 status = target_read_memory (addr, buf, sizeof (buf));
3461 if (status != 0)
3462 break;
3463 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3464
3465 if (tag == DT_PLTGOT)
3466 {
3467 CORE_ADDR global_pointer;
3468
3469 status = target_read_memory (addr + 8, buf, sizeof (buf));
3470 if (status != 0)
3471 break;
3472 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3473 byte_order);
3474
3475 /* The payoff... */
3476 return global_pointer;
3477 }
3478
3479 if (tag == DT_NULL)
3480 break;
3481
3482 addr += 16;
3483 }
3484 }
3485 }
3486 return 0;
3487 }
3488
3489 /* Attempt to find (and return) the global pointer for the given
3490 function. We first try the find_global_pointer_from_solib routine
3491 from the gdbarch tdep vector, if provided. And if that does not
3492 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3493
3494 static CORE_ADDR
3495 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3496 {
3497 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3498 CORE_ADDR addr = 0;
3499
3500 if (tdep->find_global_pointer_from_solib)
3501 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3502 if (addr == 0)
3503 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3504 return addr;
3505 }
3506
3507 /* Given a function's address, attempt to find (and return) the
3508 corresponding (canonical) function descriptor. Return 0 if
3509 not found. */
3510 static CORE_ADDR
3511 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3512 {
3513 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3514 struct obj_section *faddr_sect;
3515
3516 /* Return early if faddr is already a function descriptor. */
3517 faddr_sect = find_pc_section (faddr);
3518 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3519 return faddr;
3520
3521 if (faddr_sect != NULL)
3522 {
3523 struct obj_section *osect;
3524 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3525 {
3526 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3527 break;
3528 }
3529
3530 if (osect < faddr_sect->objfile->sections_end)
3531 {
3532 CORE_ADDR addr, endaddr;
3533
3534 addr = obj_section_addr (osect);
3535 endaddr = obj_section_endaddr (osect);
3536
3537 while (addr < endaddr)
3538 {
3539 int status;
3540 LONGEST faddr2;
3541 gdb_byte buf[8];
3542
3543 status = target_read_memory (addr, buf, sizeof (buf));
3544 if (status != 0)
3545 break;
3546 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3547
3548 if (faddr == faddr2)
3549 return addr;
3550
3551 addr += 16;
3552 }
3553 }
3554 }
3555 return 0;
3556 }
3557
3558 /* Attempt to find a function descriptor corresponding to the
3559 given address. If none is found, construct one on the
3560 stack using the address at fdaptr. */
3561
3562 static CORE_ADDR
3563 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3564 {
3565 struct gdbarch *gdbarch = regcache->arch ();
3566 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3567 CORE_ADDR fdesc;
3568
3569 fdesc = find_extant_func_descr (gdbarch, faddr);
3570
3571 if (fdesc == 0)
3572 {
3573 ULONGEST global_pointer;
3574 gdb_byte buf[16];
3575
3576 fdesc = *fdaptr;
3577 *fdaptr += 16;
3578
3579 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3580
3581 if (global_pointer == 0)
3582 regcache_cooked_read_unsigned (regcache,
3583 IA64_GR1_REGNUM, &global_pointer);
3584
3585 store_unsigned_integer (buf, 8, byte_order, faddr);
3586 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3587
3588 write_memory (fdesc, buf, 16);
3589 }
3590
3591 return fdesc;
3592 }
3593
3594 /* Use the following routine when printing out function pointers
3595 so the user can see the function address rather than just the
3596 function descriptor. */
3597 static CORE_ADDR
3598 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3599 struct target_ops *targ)
3600 {
3601 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3602 struct obj_section *s;
3603 gdb_byte buf[8];
3604
3605 s = find_pc_section (addr);
3606
3607 /* check if ADDR points to a function descriptor. */
3608 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3609 return read_memory_unsigned_integer (addr, 8, byte_order);
3610
3611 /* Normally, functions live inside a section that is executable.
3612 So, if ADDR points to a non-executable section, then treat it
3613 as a function descriptor and return the target address iff
3614 the target address itself points to a section that is executable.
3615 Check first the memory of the whole length of 8 bytes is readable. */
3616 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3617 && target_read_memory (addr, buf, 8) == 0)
3618 {
3619 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3620 struct obj_section *pc_section = find_pc_section (pc);
3621
3622 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3623 return pc;
3624 }
3625
3626 /* There are also descriptors embedded in vtables. */
3627 if (s)
3628 {
3629 struct bound_minimal_symbol minsym;
3630
3631 minsym = lookup_minimal_symbol_by_pc (addr);
3632
3633 if (minsym.minsym
3634 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3635 return read_memory_unsigned_integer (addr, 8, byte_order);
3636 }
3637
3638 return addr;
3639 }
3640
3641 static CORE_ADDR
3642 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3643 {
3644 return sp & ~0xfLL;
3645 }
3646
3647 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3648
3649 static void
3650 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3651 {
3652 ULONGEST cfm, pfs, new_bsp;
3653
3654 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3655
3656 new_bsp = rse_address_add (bsp, sof);
3657 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3658
3659 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3660 pfs &= 0xc000000000000000LL;
3661 pfs |= (cfm & 0xffffffffffffLL);
3662 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3663
3664 cfm &= 0xc000000000000000LL;
3665 cfm |= sof;
3666 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3667 }
3668
3669 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3670 ia64. */
3671
3672 static void
3673 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3674 int slotnum, gdb_byte *buf)
3675 {
3676 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3677 }
3678
3679 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3680
3681 static void
3682 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3683 {
3684 /* Nothing needed. */
3685 }
3686
3687 static CORE_ADDR
3688 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3689 struct regcache *regcache, CORE_ADDR bp_addr,
3690 int nargs, struct value **args, CORE_ADDR sp,
3691 int struct_return, CORE_ADDR struct_addr)
3692 {
3693 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3694 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3695 int argno;
3696 struct value *arg;
3697 struct type *type;
3698 int len, argoffset;
3699 int nslots, rseslots, memslots, slotnum, nfuncargs;
3700 int floatreg;
3701 ULONGEST bsp;
3702 CORE_ADDR funcdescaddr, global_pointer;
3703 CORE_ADDR func_addr = find_function_addr (function, NULL);
3704
3705 nslots = 0;
3706 nfuncargs = 0;
3707 /* Count the number of slots needed for the arguments. */
3708 for (argno = 0; argno < nargs; argno++)
3709 {
3710 arg = args[argno];
3711 type = check_typedef (value_type (arg));
3712 len = TYPE_LENGTH (type);
3713
3714 if ((nslots & 1) && slot_alignment_is_next_even (type))
3715 nslots++;
3716
3717 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3718 nfuncargs++;
3719
3720 nslots += (len + 7) / 8;
3721 }
3722
3723 /* Divvy up the slots between the RSE and the memory stack. */
3724 rseslots = (nslots > 8) ? 8 : nslots;
3725 memslots = nslots - rseslots;
3726
3727 /* Allocate a new RSE frame. */
3728 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3729 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3730
3731 /* We will attempt to find function descriptors in the .opd segment,
3732 but if we can't we'll construct them ourselves. That being the
3733 case, we'll need to reserve space on the stack for them. */
3734 funcdescaddr = sp - nfuncargs * 16;
3735 funcdescaddr &= ~0xfLL;
3736
3737 /* Adjust the stack pointer to it's new value. The calling conventions
3738 require us to have 16 bytes of scratch, plus whatever space is
3739 necessary for the memory slots and our function descriptors. */
3740 sp = sp - 16 - (memslots + nfuncargs) * 8;
3741 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3742
3743 /* Place the arguments where they belong. The arguments will be
3744 either placed in the RSE backing store or on the memory stack.
3745 In addition, floating point arguments or HFAs are placed in
3746 floating point registers. */
3747 slotnum = 0;
3748 floatreg = IA64_FR8_REGNUM;
3749 for (argno = 0; argno < nargs; argno++)
3750 {
3751 struct type *float_elt_type;
3752
3753 arg = args[argno];
3754 type = check_typedef (value_type (arg));
3755 len = TYPE_LENGTH (type);
3756
3757 /* Special handling for function parameters. */
3758 if (len == 8
3759 && TYPE_CODE (type) == TYPE_CODE_PTR
3760 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3761 {
3762 gdb_byte val_buf[8];
3763 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3764 8, byte_order);
3765 store_unsigned_integer (val_buf, 8, byte_order,
3766 find_func_descr (regcache, faddr,
3767 &funcdescaddr));
3768 if (slotnum < rseslots)
3769 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3770 slotnum, val_buf);
3771 else
3772 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3773 slotnum++;
3774 continue;
3775 }
3776
3777 /* Normal slots. */
3778
3779 /* Skip odd slot if necessary... */
3780 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3781 slotnum++;
3782
3783 argoffset = 0;
3784 while (len > 0)
3785 {
3786 gdb_byte val_buf[8];
3787
3788 memset (val_buf, 0, 8);
3789 if (!ia64_struct_type_p (type) && len < 8)
3790 {
3791 /* Integral types are LSB-aligned, so we have to be careful
3792 to insert the argument on the correct side of the buffer.
3793 This is why we use store_unsigned_integer. */
3794 store_unsigned_integer
3795 (val_buf, 8, byte_order,
3796 extract_unsigned_integer (value_contents (arg), len,
3797 byte_order));
3798 }
3799 else
3800 {
3801 /* This is either an 8bit integral type, or an aggregate.
3802 For 8bit integral type, there is no problem, we just
3803 copy the value over.
3804
3805 For aggregates, the only potentially tricky portion
3806 is to write the last one if it is less than 8 bytes.
3807 In this case, the data is Byte0-aligned. Happy news,
3808 this means that we don't need to differentiate the
3809 handling of 8byte blocks and less-than-8bytes blocks. */
3810 memcpy (val_buf, value_contents (arg) + argoffset,
3811 (len > 8) ? 8 : len);
3812 }
3813
3814 if (slotnum < rseslots)
3815 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3816 slotnum, val_buf);
3817 else
3818 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3819
3820 argoffset += 8;
3821 len -= 8;
3822 slotnum++;
3823 }
3824
3825 /* Handle floating point types (including HFAs). */
3826 float_elt_type = is_float_or_hfa_type (type);
3827 if (float_elt_type != NULL)
3828 {
3829 argoffset = 0;
3830 len = TYPE_LENGTH (type);
3831 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3832 {
3833 gdb_byte to[IA64_FP_REGISTER_SIZE];
3834 target_float_convert (value_contents (arg) + argoffset,
3835 float_elt_type, to,
3836 ia64_ext_type (gdbarch));
3837 regcache_cooked_write (regcache, floatreg, to);
3838 floatreg++;
3839 argoffset += TYPE_LENGTH (float_elt_type);
3840 len -= TYPE_LENGTH (float_elt_type);
3841 }
3842 }
3843 }
3844
3845 /* Store the struct return value in r8 if necessary. */
3846 if (struct_return)
3847 {
3848 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3849 (ULONGEST) struct_addr);
3850 }
3851
3852 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3853
3854 if (global_pointer != 0)
3855 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3856
3857 /* The following is not necessary on HP-UX, because we're using
3858 a dummy code sequence pushed on the stack to make the call, and
3859 this sequence doesn't need b0 to be set in order for our dummy
3860 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3861 it's needed for other OSes, so we do this unconditionaly. */
3862 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3863
3864 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3865
3866 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3867
3868 return sp;
3869 }
3870
3871 static const struct ia64_infcall_ops ia64_infcall_ops =
3872 {
3873 ia64_allocate_new_rse_frame,
3874 ia64_store_argument_in_slot,
3875 ia64_set_function_addr
3876 };
3877
3878 static struct frame_id
3879 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3880 {
3881 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3882 gdb_byte buf[8];
3883 CORE_ADDR sp, bsp;
3884
3885 get_frame_register (this_frame, sp_regnum, buf);
3886 sp = extract_unsigned_integer (buf, 8, byte_order);
3887
3888 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3889 bsp = extract_unsigned_integer (buf, 8, byte_order);
3890
3891 if (gdbarch_debug >= 1)
3892 fprintf_unfiltered (gdb_stdlog,
3893 "dummy frame id: code %s, stack %s, special %s\n",
3894 paddress (gdbarch, get_frame_pc (this_frame)),
3895 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3896
3897 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3898 }
3899
3900 static CORE_ADDR
3901 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3902 {
3903 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3904 gdb_byte buf[8];
3905 CORE_ADDR ip, psr, pc;
3906
3907 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3908 ip = extract_unsigned_integer (buf, 8, byte_order);
3909 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3910 psr = extract_unsigned_integer (buf, 8, byte_order);
3911
3912 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3913 return pc;
3914 }
3915
3916 static int
3917 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3918 {
3919 info->bytes_per_line = SLOT_MULTIPLIER;
3920 return default_print_insn (memaddr, info);
3921 }
3922
3923 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3924
3925 static int
3926 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3927 {
3928 return (cfm & 0x7f);
3929 }
3930
3931 static struct gdbarch *
3932 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3933 {
3934 struct gdbarch *gdbarch;
3935 struct gdbarch_tdep *tdep;
3936
3937 /* If there is already a candidate, use it. */
3938 arches = gdbarch_list_lookup_by_info (arches, &info);
3939 if (arches != NULL)
3940 return arches->gdbarch;
3941
3942 tdep = XCNEW (struct gdbarch_tdep);
3943 gdbarch = gdbarch_alloc (&info, tdep);
3944
3945 tdep->size_of_register_frame = ia64_size_of_register_frame;
3946
3947 /* According to the ia64 specs, instructions that store long double
3948 floats in memory use a long-double format different than that
3949 used in the floating registers. The memory format matches the
3950 x86 extended float format which is 80 bits. An OS may choose to
3951 use this format (e.g. GNU/Linux) or choose to use a different
3952 format for storing long doubles (e.g. HPUX). In the latter case,
3953 the setting of the format may be moved/overridden in an
3954 OS-specific tdep file. */
3955 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3956
3957 set_gdbarch_short_bit (gdbarch, 16);
3958 set_gdbarch_int_bit (gdbarch, 32);
3959 set_gdbarch_long_bit (gdbarch, 64);
3960 set_gdbarch_long_long_bit (gdbarch, 64);
3961 set_gdbarch_float_bit (gdbarch, 32);
3962 set_gdbarch_double_bit (gdbarch, 64);
3963 set_gdbarch_long_double_bit (gdbarch, 128);
3964 set_gdbarch_ptr_bit (gdbarch, 64);
3965
3966 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3967 set_gdbarch_num_pseudo_regs (gdbarch,
3968 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3969 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3970 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3971
3972 set_gdbarch_register_name (gdbarch, ia64_register_name);
3973 set_gdbarch_register_type (gdbarch, ia64_register_type);
3974
3975 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3976 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3977 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3978 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3979 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3980 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3981 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3982
3983 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3984
3985 set_gdbarch_return_value (gdbarch, ia64_return_value);
3986
3987 set_gdbarch_memory_insert_breakpoint (gdbarch,
3988 ia64_memory_insert_breakpoint);
3989 set_gdbarch_memory_remove_breakpoint (gdbarch,
3990 ia64_memory_remove_breakpoint);
3991 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3992 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3993 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3994 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3995
3996 /* Settings for calling functions in the inferior. */
3997 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3998 tdep->infcall_ops = ia64_infcall_ops;
3999 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
4000 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
4001
4002 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4003 #ifdef HAVE_LIBUNWIND_IA64_H
4004 frame_unwind_append_unwinder (gdbarch,
4005 &ia64_libunwind_sigtramp_frame_unwind);
4006 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4007 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4008 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4009 #else
4010 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4011 #endif
4012 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4013 frame_base_set_default (gdbarch, &ia64_frame_base);
4014
4015 /* Settings that should be unnecessary. */
4016 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4017
4018 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4019 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4020 ia64_convert_from_func_ptr_addr);
4021
4022 /* The virtual table contains 16-byte descriptors, not pointers to
4023 descriptors. */
4024 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4025
4026 /* Hook in ABI-specific overrides, if they have been registered. */
4027 gdbarch_init_osabi (info, gdbarch);
4028
4029 return gdbarch;
4030 }
4031
4032 void
4033 _initialize_ia64_tdep (void)
4034 {
4035 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4036 }
This page took 0.111455 seconds and 5 git commands to generate.