Update year range in copyright notice of all files owned by the GDB project.
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "doublest.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72
73 #endif
74
75 /* An enumeration of the different IA-64 instruction types. */
76
77 typedef enum instruction_type
78 {
79 A, /* Integer ALU ; I-unit or M-unit */
80 I, /* Non-ALU integer; I-unit */
81 M, /* Memory ; M-unit */
82 F, /* Floating-point ; F-unit */
83 B, /* Branch ; B-unit */
84 L, /* Extended (L+X) ; I-unit */
85 X, /* Extended (L+X) ; I-unit */
86 undefined /* undefined or reserved */
87 } instruction_type;
88
89 /* We represent IA-64 PC addresses as the value of the instruction
90 pointer or'd with some bit combination in the low nibble which
91 represents the slot number in the bundle addressed by the
92 instruction pointer. The problem is that the Linux kernel
93 multiplies its slot numbers (for exceptions) by one while the
94 disassembler multiplies its slot numbers by 6. In addition, I've
95 heard it said that the simulator uses 1 as the multiplier.
96
97 I've fixed the disassembler so that the bytes_per_line field will
98 be the slot multiplier. If bytes_per_line comes in as zero, it
99 is set to six (which is how it was set up initially). -- objdump
100 displays pretty disassembly dumps with this value. For our purposes,
101 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
102 never want to also display the raw bytes the way objdump does. */
103
104 #define SLOT_MULTIPLIER 1
105
106 /* Length in bytes of an instruction bundle. */
107
108 #define BUNDLE_LEN 16
109
110 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
111
112 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
113 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
114 #endif
115
116 static gdbarch_init_ftype ia64_gdbarch_init;
117
118 static gdbarch_register_name_ftype ia64_register_name;
119 static gdbarch_register_type_ftype ia64_register_type;
120 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
121 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
122 static struct type *is_float_or_hfa_type (struct type *t);
123 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
124 CORE_ADDR faddr);
125
126 #define NUM_IA64_RAW_REGS 462
127
128 static int sp_regnum = IA64_GR12_REGNUM;
129 static int fp_regnum = IA64_VFP_REGNUM;
130 static int lr_regnum = IA64_VRAP_REGNUM;
131
132 /* NOTE: we treat the register stack registers r32-r127 as
133 pseudo-registers because they may not be accessible via the ptrace
134 register get/set interfaces. */
135
136 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
137 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
138 V127_REGNUM = V32_REGNUM + 95,
139 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
140 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
141
142 /* Array of register names; There should be ia64_num_regs strings in
143 the initializer. */
144
145 static char *ia64_register_names[] =
146 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
147 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
148 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
149 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
150 "", "", "", "", "", "", "", "",
151 "", "", "", "", "", "", "", "",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162
163 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
164 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
165 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
166 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
167 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
168 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
169 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
170 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
171 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
172 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
173 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
174 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
175 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
176 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
177 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
178 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
179
180 "", "", "", "", "", "", "", "",
181 "", "", "", "", "", "", "", "",
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188
189 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
190
191 "vfp", "vrap",
192
193 "pr", "ip", "psr", "cfm",
194
195 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
196 "", "", "", "", "", "", "", "",
197 "rsc", "bsp", "bspstore", "rnat",
198 "", "fcr", "", "",
199 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
200 "ccv", "", "", "", "unat", "", "", "",
201 "fpsr", "", "", "", "itc",
202 "", "", "", "", "", "", "", "", "", "",
203 "", "", "", "", "", "", "", "", "",
204 "pfs", "lc", "ec",
205 "", "", "", "", "", "", "", "", "", "",
206 "", "", "", "", "", "", "", "", "", "",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "",
212 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
213 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
214 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
215 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
216 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
217 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
218 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
219 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
220 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
221 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
222 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
223 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
224 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
225 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
226 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
227 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
228
229 "bof",
230
231 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
232 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
233 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
234 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
235 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
236 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
237 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
238 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
239 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
240 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
241 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
242 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
243
244 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
245 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
246 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
247 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
248 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
249 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
250 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
251 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
252 };
253
254 struct ia64_frame_cache
255 {
256 CORE_ADDR base; /* frame pointer base for frame */
257 CORE_ADDR pc; /* function start pc for frame */
258 CORE_ADDR saved_sp; /* stack pointer for frame */
259 CORE_ADDR bsp; /* points at r32 for the current frame */
260 CORE_ADDR cfm; /* cfm value for current frame */
261 CORE_ADDR prev_cfm; /* cfm value for previous frame */
262 int frameless;
263 int sof; /* Size of frame (decoded from cfm value). */
264 int sol; /* Size of locals (decoded from cfm value). */
265 int sor; /* Number of rotating registers (decoded from
266 cfm value). */
267 CORE_ADDR after_prologue;
268 /* Address of first instruction after the last
269 prologue instruction; Note that there may
270 be instructions from the function's body
271 intermingled with the prologue. */
272 int mem_stack_frame_size;
273 /* Size of the memory stack frame (may be zero),
274 or -1 if it has not been determined yet. */
275 int fp_reg; /* Register number (if any) used a frame pointer
276 for this frame. 0 if no register is being used
277 as the frame pointer. */
278
279 /* Saved registers. */
280 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
281
282 };
283
284 static int
285 floatformat_valid (const struct floatformat *fmt, const void *from)
286 {
287 return 1;
288 }
289
290 static const struct floatformat floatformat_ia64_ext_little =
291 {
292 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
293 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
294 };
295
296 static const struct floatformat floatformat_ia64_ext_big =
297 {
298 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
299 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
300 };
301
302 static const struct floatformat *floatformats_ia64_ext[2] =
303 {
304 &floatformat_ia64_ext_big,
305 &floatformat_ia64_ext_little
306 };
307
308 static struct type *
309 ia64_ext_type (struct gdbarch *gdbarch)
310 {
311 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
312
313 if (!tdep->ia64_ext_type)
314 tdep->ia64_ext_type
315 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
316 floatformats_ia64_ext);
317
318 return tdep->ia64_ext_type;
319 }
320
321 static int
322 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
323 struct reggroup *group)
324 {
325 int vector_p;
326 int float_p;
327 int raw_p;
328 if (group == all_reggroup)
329 return 1;
330 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
331 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
332 raw_p = regnum < NUM_IA64_RAW_REGS;
333 if (group == float_reggroup)
334 return float_p;
335 if (group == vector_reggroup)
336 return vector_p;
337 if (group == general_reggroup)
338 return (!vector_p && !float_p);
339 if (group == save_reggroup || group == restore_reggroup)
340 return raw_p;
341 return 0;
342 }
343
344 static const char *
345 ia64_register_name (struct gdbarch *gdbarch, int reg)
346 {
347 return ia64_register_names[reg];
348 }
349
350 struct type *
351 ia64_register_type (struct gdbarch *arch, int reg)
352 {
353 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
354 return ia64_ext_type (arch);
355 else
356 return builtin_type (arch)->builtin_long;
357 }
358
359 static int
360 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
361 {
362 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
363 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
364 return reg;
365 }
366
367
368 /* Extract ``len'' bits from an instruction bundle starting at
369 bit ``from''. */
370
371 static long long
372 extract_bit_field (const gdb_byte *bundle, int from, int len)
373 {
374 long long result = 0LL;
375 int to = from + len;
376 int from_byte = from / 8;
377 int to_byte = to / 8;
378 unsigned char *b = (unsigned char *) bundle;
379 unsigned char c;
380 int lshift;
381 int i;
382
383 c = b[from_byte];
384 if (from_byte == to_byte)
385 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
386 result = c >> (from % 8);
387 lshift = 8 - (from % 8);
388
389 for (i = from_byte+1; i < to_byte; i++)
390 {
391 result |= ((long long) b[i]) << lshift;
392 lshift += 8;
393 }
394
395 if (from_byte < to_byte && (to % 8 != 0))
396 {
397 c = b[to_byte];
398 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
399 result |= ((long long) c) << lshift;
400 }
401
402 return result;
403 }
404
405 /* Replace the specified bits in an instruction bundle. */
406
407 static void
408 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
409 {
410 int to = from + len;
411 int from_byte = from / 8;
412 int to_byte = to / 8;
413 unsigned char *b = (unsigned char *) bundle;
414 unsigned char c;
415
416 if (from_byte == to_byte)
417 {
418 unsigned char left, right;
419 c = b[from_byte];
420 left = (c >> (to % 8)) << (to % 8);
421 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
422 c = (unsigned char) (val & 0xff);
423 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
424 c |= right | left;
425 b[from_byte] = c;
426 }
427 else
428 {
429 int i;
430 c = b[from_byte];
431 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
432 c = c | (val << (from % 8));
433 b[from_byte] = c;
434 val >>= 8 - from % 8;
435
436 for (i = from_byte+1; i < to_byte; i++)
437 {
438 c = val & 0xff;
439 val >>= 8;
440 b[i] = c;
441 }
442
443 if (to % 8 != 0)
444 {
445 unsigned char cv = (unsigned char) val;
446 c = b[to_byte];
447 c = c >> (to % 8) << (to % 8);
448 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
449 b[to_byte] = c;
450 }
451 }
452 }
453
454 /* Return the contents of slot N (for N = 0, 1, or 2) in
455 and instruction bundle. */
456
457 static long long
458 slotN_contents (gdb_byte *bundle, int slotnum)
459 {
460 return extract_bit_field (bundle, 5+41*slotnum, 41);
461 }
462
463 /* Store an instruction in an instruction bundle. */
464
465 static void
466 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
467 {
468 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
469 }
470
471 static const enum instruction_type template_encoding_table[32][3] =
472 {
473 { M, I, I }, /* 00 */
474 { M, I, I }, /* 01 */
475 { M, I, I }, /* 02 */
476 { M, I, I }, /* 03 */
477 { M, L, X }, /* 04 */
478 { M, L, X }, /* 05 */
479 { undefined, undefined, undefined }, /* 06 */
480 { undefined, undefined, undefined }, /* 07 */
481 { M, M, I }, /* 08 */
482 { M, M, I }, /* 09 */
483 { M, M, I }, /* 0A */
484 { M, M, I }, /* 0B */
485 { M, F, I }, /* 0C */
486 { M, F, I }, /* 0D */
487 { M, M, F }, /* 0E */
488 { M, M, F }, /* 0F */
489 { M, I, B }, /* 10 */
490 { M, I, B }, /* 11 */
491 { M, B, B }, /* 12 */
492 { M, B, B }, /* 13 */
493 { undefined, undefined, undefined }, /* 14 */
494 { undefined, undefined, undefined }, /* 15 */
495 { B, B, B }, /* 16 */
496 { B, B, B }, /* 17 */
497 { M, M, B }, /* 18 */
498 { M, M, B }, /* 19 */
499 { undefined, undefined, undefined }, /* 1A */
500 { undefined, undefined, undefined }, /* 1B */
501 { M, F, B }, /* 1C */
502 { M, F, B }, /* 1D */
503 { undefined, undefined, undefined }, /* 1E */
504 { undefined, undefined, undefined }, /* 1F */
505 };
506
507 /* Fetch and (partially) decode an instruction at ADDR and return the
508 address of the next instruction to fetch. */
509
510 static CORE_ADDR
511 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
512 {
513 gdb_byte bundle[BUNDLE_LEN];
514 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
515 long long template;
516 int val;
517
518 /* Warn about slot numbers greater than 2. We used to generate
519 an error here on the assumption that the user entered an invalid
520 address. But, sometimes GDB itself requests an invalid address.
521 This can (easily) happen when execution stops in a function for
522 which there are no symbols. The prologue scanner will attempt to
523 find the beginning of the function - if the nearest symbol
524 happens to not be aligned on a bundle boundary (16 bytes), the
525 resulting starting address will cause GDB to think that the slot
526 number is too large.
527
528 So we warn about it and set the slot number to zero. It is
529 not necessarily a fatal condition, particularly if debugging
530 at the assembly language level. */
531 if (slotnum > 2)
532 {
533 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
534 "Using slot 0 instead"));
535 slotnum = 0;
536 }
537
538 addr &= ~0x0f;
539
540 val = target_read_memory (addr, bundle, BUNDLE_LEN);
541
542 if (val != 0)
543 return 0;
544
545 *instr = slotN_contents (bundle, slotnum);
546 template = extract_bit_field (bundle, 0, 5);
547 *it = template_encoding_table[(int)template][slotnum];
548
549 if (slotnum == 2 || (slotnum == 1 && *it == L))
550 addr += 16;
551 else
552 addr += (slotnum + 1) * SLOT_MULTIPLIER;
553
554 return addr;
555 }
556
557 /* There are 5 different break instructions (break.i, break.b,
558 break.m, break.f, and break.x), but they all have the same
559 encoding. (The five bit template in the low five bits of the
560 instruction bundle distinguishes one from another.)
561
562 The runtime architecture manual specifies that break instructions
563 used for debugging purposes must have the upper two bits of the 21
564 bit immediate set to a 0 and a 1 respectively. A breakpoint
565 instruction encodes the most significant bit of its 21 bit
566 immediate at bit 36 of the 41 bit instruction. The penultimate msb
567 is at bit 25 which leads to the pattern below.
568
569 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
570 it turns out that 0x80000 was used as the syscall break in the early
571 simulators. So I changed the pattern slightly to do "break.i 0x080001"
572 instead. But that didn't work either (I later found out that this
573 pattern was used by the simulator that I was using.) So I ended up
574 using the pattern seen below.
575
576 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
577 while we need bit-based addressing as the instructions length is 41 bits and
578 we must not modify/corrupt the adjacent slots in the same bundle.
579 Fortunately we may store larger memory incl. the adjacent bits with the
580 original memory content (not the possibly already stored breakpoints there).
581 We need to be careful in ia64_memory_remove_breakpoint to always restore
582 only the specific bits of this instruction ignoring any adjacent stored
583 bits.
584
585 We use the original addressing with the low nibble in the range <0..2> which
586 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
587 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
588 bytes just without these two possibly skipped bytes to not to exceed to the
589 next bundle.
590
591 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
592 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
593 In such case there is no other place where to store
594 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
595 SLOTNUM in ia64_memory_remove_breakpoint.
596
597 There is one special case where we need to be extra careful:
598 L-X instructions, which are instructions that occupy 2 slots
599 (The L part is always in slot 1, and the X part is always in
600 slot 2). We must refuse to insert breakpoints for an address
601 that points at slot 2 of a bundle where an L-X instruction is
602 present, since there is logically no instruction at that address.
603 However, to make things more interesting, the opcode of L-X
604 instructions is located in slot 2. This means that, to insert
605 a breakpoint at an address that points to slot 1, we actually
606 need to write the breakpoint in slot 2! Slot 1 is actually
607 the extended operand, so writing the breakpoint there would not
608 have the desired effect. Another side-effect of this issue
609 is that we need to make sure that the shadow contents buffer
610 does save byte 15 of our instruction bundle (this is the tail
611 end of slot 2, which wouldn't be saved if we were to insert
612 the breakpoint in slot 1).
613
614 ia64 16-byte bundle layout:
615 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
616
617 The current addressing used by the code below:
618 original PC placed_address placed_size required covered
619 == bp_tgt->shadow_len reqd \subset covered
620 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
621 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
622 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
623
624 L-X instructions are treated a little specially, as explained above:
625 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
626
627 `objdump -d' and some other tools show a bit unjustified offsets:
628 original PC byte where starts the instruction objdump offset
629 0xABCDE0 0xABCDE0 0xABCDE0
630 0xABCDE1 0xABCDE5 0xABCDE6
631 0xABCDE2 0xABCDEA 0xABCDEC
632 */
633
634 #define IA64_BREAKPOINT 0x00003333300LL
635
636 static int
637 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
638 struct bp_target_info *bp_tgt)
639 {
640 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
641 gdb_byte bundle[BUNDLE_LEN];
642 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
643 long long instr_breakpoint;
644 int val;
645 int template;
646 struct cleanup *cleanup;
647
648 if (slotnum > 2)
649 error (_("Can't insert breakpoint for slot numbers greater than 2."));
650
651 addr &= ~0x0f;
652
653 /* Enable the automatic memory restoration from breakpoints while
654 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
655 Otherwise, we could possibly store into the shadow parts of the adjacent
656 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
657 breakpoint instruction bits region. */
658 cleanup = make_show_memory_breakpoints_cleanup (0);
659 val = target_read_memory (addr, bundle, BUNDLE_LEN);
660 if (val != 0)
661 {
662 do_cleanups (cleanup);
663 return val;
664 }
665
666 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
667 for addressing the SHADOW_CONTENTS placement. */
668 shadow_slotnum = slotnum;
669
670 /* Always cover the last byte of the bundle in case we are inserting
671 a breakpoint on an L-X instruction. */
672 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
673
674 template = extract_bit_field (bundle, 0, 5);
675 if (template_encoding_table[template][slotnum] == X)
676 {
677 /* X unit types can only be used in slot 2, and are actually
678 part of a 2-slot L-X instruction. We cannot break at this
679 address, as this is the second half of an instruction that
680 lives in slot 1 of that bundle. */
681 gdb_assert (slotnum == 2);
682 error (_("Can't insert breakpoint for non-existing slot X"));
683 }
684 if (template_encoding_table[template][slotnum] == L)
685 {
686 /* L unit types can only be used in slot 1. But the associated
687 opcode for that instruction is in slot 2, so bump the slot number
688 accordingly. */
689 gdb_assert (slotnum == 1);
690 slotnum = 2;
691 }
692
693 /* Store the whole bundle, except for the initial skipped bytes by the slot
694 number interpreted as bytes offset in PLACED_ADDRESS. */
695 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
696 bp_tgt->shadow_len);
697
698 /* Re-read the same bundle as above except that, this time, read it in order
699 to compute the new bundle inside which we will be inserting the
700 breakpoint. Therefore, disable the automatic memory restoration from
701 breakpoints while we read our instruction bundle. Otherwise, the general
702 restoration mechanism kicks in and we would possibly remove parts of the
703 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
704 the real breakpoint instruction bits region. */
705 make_show_memory_breakpoints_cleanup (1);
706 val = target_read_memory (addr, bundle, BUNDLE_LEN);
707 if (val != 0)
708 {
709 do_cleanups (cleanup);
710 return val;
711 }
712
713 /* Breakpoints already present in the code will get deteacted and not get
714 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
715 location cannot induce the internal error as they are optimized into
716 a single instance by update_global_location_list. */
717 instr_breakpoint = slotN_contents (bundle, slotnum);
718 if (instr_breakpoint == IA64_BREAKPOINT)
719 internal_error (__FILE__, __LINE__,
720 _("Address %s already contains a breakpoint."),
721 paddress (gdbarch, bp_tgt->placed_address));
722 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
723
724 bp_tgt->placed_size = bp_tgt->shadow_len;
725
726 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
727 bp_tgt->shadow_len);
728
729 do_cleanups (cleanup);
730 return val;
731 }
732
733 static int
734 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
735 struct bp_target_info *bp_tgt)
736 {
737 CORE_ADDR addr = bp_tgt->placed_address;
738 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
739 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
740 long long instr_breakpoint, instr_saved;
741 int val;
742 int template;
743 struct cleanup *cleanup;
744
745 addr &= ~0x0f;
746
747 /* Disable the automatic memory restoration from breakpoints while
748 we read our instruction bundle. Otherwise, the general restoration
749 mechanism kicks in and we would possibly remove parts of the adjacent
750 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
751 breakpoint instruction bits region. */
752 cleanup = make_show_memory_breakpoints_cleanup (1);
753 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
754 if (val != 0)
755 {
756 do_cleanups (cleanup);
757 return val;
758 }
759
760 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
761 for addressing the SHADOW_CONTENTS placement. */
762 shadow_slotnum = slotnum;
763
764 template = extract_bit_field (bundle_mem, 0, 5);
765 if (template_encoding_table[template][slotnum] == X)
766 {
767 /* X unit types can only be used in slot 2, and are actually
768 part of a 2-slot L-X instruction. We refuse to insert
769 breakpoints at this address, so there should be no reason
770 for us attempting to remove one there, except if the program's
771 code somehow got modified in memory. */
772 gdb_assert (slotnum == 2);
773 warning (_("Cannot remove breakpoint at address %s from non-existing "
774 "X-type slot, memory has changed underneath"),
775 paddress (gdbarch, bp_tgt->placed_address));
776 do_cleanups (cleanup);
777 return -1;
778 }
779 if (template_encoding_table[template][slotnum] == L)
780 {
781 /* L unit types can only be used in slot 1. But the breakpoint
782 was actually saved using slot 2, so update the slot number
783 accordingly. */
784 gdb_assert (slotnum == 1);
785 slotnum = 2;
786 }
787
788 gdb_assert (bp_tgt->placed_size == BUNDLE_LEN - shadow_slotnum);
789 gdb_assert (bp_tgt->placed_size == bp_tgt->shadow_len);
790
791 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
792 if (instr_breakpoint != IA64_BREAKPOINT)
793 {
794 warning (_("Cannot remove breakpoint at address %s, "
795 "no break instruction at such address."),
796 paddress (gdbarch, bp_tgt->placed_address));
797 do_cleanups (cleanup);
798 return -1;
799 }
800
801 /* Extract the original saved instruction from SLOTNUM normalizing its
802 bit-shift for INSTR_SAVED. */
803 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
804 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
805 bp_tgt->shadow_len);
806 instr_saved = slotN_contents (bundle_saved, slotnum);
807
808 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
809 and not any of the other ones that are stored in SHADOW_CONTENTS. */
810 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
811 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
812
813 do_cleanups (cleanup);
814 return val;
815 }
816
817 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
818 instruction slots ranges are bit-granular (41 bits) we have to provide an
819 extended range as described for ia64_memory_insert_breakpoint. We also take
820 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
821 make a match for permanent breakpoints. */
822
823 static const gdb_byte *
824 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
825 CORE_ADDR *pcptr, int *lenptr)
826 {
827 CORE_ADDR addr = *pcptr;
828 static gdb_byte bundle[BUNDLE_LEN];
829 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
830 long long instr_fetched;
831 int val;
832 int template;
833 struct cleanup *cleanup;
834
835 if (slotnum > 2)
836 error (_("Can't insert breakpoint for slot numbers greater than 2."));
837
838 addr &= ~0x0f;
839
840 /* Enable the automatic memory restoration from breakpoints while
841 we read our instruction bundle to match bp_loc_is_permanent. */
842 cleanup = make_show_memory_breakpoints_cleanup (0);
843 val = target_read_memory (addr, bundle, BUNDLE_LEN);
844 do_cleanups (cleanup);
845
846 /* The memory might be unreachable. This can happen, for instance,
847 when the user inserts a breakpoint at an invalid address. */
848 if (val != 0)
849 return NULL;
850
851 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
852 for addressing the SHADOW_CONTENTS placement. */
853 shadow_slotnum = slotnum;
854
855 /* Cover always the last byte of the bundle for the L-X slot case. */
856 *lenptr = BUNDLE_LEN - shadow_slotnum;
857
858 /* Check for L type instruction in slot 1, if present then bump up the slot
859 number to the slot 2. */
860 template = extract_bit_field (bundle, 0, 5);
861 if (template_encoding_table[template][slotnum] == X)
862 {
863 gdb_assert (slotnum == 2);
864 error (_("Can't insert breakpoint for non-existing slot X"));
865 }
866 if (template_encoding_table[template][slotnum] == L)
867 {
868 gdb_assert (slotnum == 1);
869 slotnum = 2;
870 }
871
872 /* A break instruction has its all its opcode bits cleared except for
873 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
874 we should not touch the L slot - the upper 41 bits of the parameter. */
875 instr_fetched = slotN_contents (bundle, slotnum);
876 instr_fetched &= 0x1003ffffc0LL;
877 replace_slotN_contents (bundle, instr_fetched, slotnum);
878
879 return bundle + shadow_slotnum;
880 }
881
882 static CORE_ADDR
883 ia64_read_pc (struct regcache *regcache)
884 {
885 ULONGEST psr_value, pc_value;
886 int slot_num;
887
888 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
889 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
890 slot_num = (psr_value >> 41) & 3;
891
892 return pc_value | (slot_num * SLOT_MULTIPLIER);
893 }
894
895 void
896 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
897 {
898 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
899 ULONGEST psr_value;
900
901 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
902 psr_value &= ~(3LL << 41);
903 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
904
905 new_pc &= ~0xfLL;
906
907 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
908 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
909 }
910
911 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
912
913 /* Returns the address of the slot that's NSLOTS slots away from
914 the address ADDR. NSLOTS may be positive or negative. */
915 static CORE_ADDR
916 rse_address_add(CORE_ADDR addr, int nslots)
917 {
918 CORE_ADDR new_addr;
919 int mandatory_nat_slots = nslots / 63;
920 int direction = nslots < 0 ? -1 : 1;
921
922 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
923
924 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
925 new_addr += 8 * direction;
926
927 if (IS_NaT_COLLECTION_ADDR(new_addr))
928 new_addr += 8 * direction;
929
930 return new_addr;
931 }
932
933 static enum register_status
934 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
935 int regnum, gdb_byte *buf)
936 {
937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
938 enum register_status status;
939
940 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
941 {
942 #ifdef HAVE_LIBUNWIND_IA64_H
943 /* First try and use the libunwind special reg accessor,
944 otherwise fallback to standard logic. */
945 if (!libunwind_is_initialized ()
946 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
947 #endif
948 {
949 /* The fallback position is to assume that r32-r127 are
950 found sequentially in memory starting at $bof. This
951 isn't always true, but without libunwind, this is the
952 best we can do. */
953 enum register_status status;
954 ULONGEST cfm;
955 ULONGEST bsp;
956 CORE_ADDR reg;
957
958 status = regcache_cooked_read_unsigned (regcache,
959 IA64_BSP_REGNUM, &bsp);
960 if (status != REG_VALID)
961 return status;
962
963 status = regcache_cooked_read_unsigned (regcache,
964 IA64_CFM_REGNUM, &cfm);
965 if (status != REG_VALID)
966 return status;
967
968 /* The bsp points at the end of the register frame so we
969 subtract the size of frame from it to get start of
970 register frame. */
971 bsp = rse_address_add (bsp, -(cfm & 0x7f));
972
973 if ((cfm & 0x7f) > regnum - V32_REGNUM)
974 {
975 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
976 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
977 store_unsigned_integer (buf, register_size (gdbarch, regnum),
978 byte_order, reg);
979 }
980 else
981 store_unsigned_integer (buf, register_size (gdbarch, regnum),
982 byte_order, 0);
983 }
984 }
985 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
986 {
987 ULONGEST unatN_val;
988 ULONGEST unat;
989 status = regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
990 if (status != REG_VALID)
991 return status;
992 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
993 store_unsigned_integer (buf, register_size (gdbarch, regnum),
994 byte_order, unatN_val);
995 }
996 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
997 {
998 ULONGEST natN_val = 0;
999 ULONGEST bsp;
1000 ULONGEST cfm;
1001 CORE_ADDR gr_addr = 0;
1002 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1003 if (status != REG_VALID)
1004 return status;
1005 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1006 if (status != REG_VALID)
1007 return status;
1008
1009 /* The bsp points at the end of the register frame so we
1010 subtract the size of frame from it to get start of register frame. */
1011 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1012
1013 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1014 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1015
1016 if (gr_addr != 0)
1017 {
1018 /* Compute address of nat collection bits. */
1019 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1020 CORE_ADDR nat_collection;
1021 int nat_bit;
1022 /* If our nat collection address is bigger than bsp, we have to get
1023 the nat collection from rnat. Otherwise, we fetch the nat
1024 collection from the computed address. */
1025 if (nat_addr >= bsp)
1026 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1027 &nat_collection);
1028 else
1029 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1030 nat_bit = (gr_addr >> 3) & 0x3f;
1031 natN_val = (nat_collection >> nat_bit) & 1;
1032 }
1033
1034 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1035 byte_order, natN_val);
1036 }
1037 else if (regnum == VBOF_REGNUM)
1038 {
1039 /* A virtual register frame start is provided for user convenience.
1040 It can be calculated as the bsp - sof (sizeof frame). */
1041 ULONGEST bsp, vbsp;
1042 ULONGEST cfm;
1043 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1044 if (status != REG_VALID)
1045 return status;
1046 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1047 if (status != REG_VALID)
1048 return status;
1049
1050 /* The bsp points at the end of the register frame so we
1051 subtract the size of frame from it to get beginning of frame. */
1052 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1053 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1054 byte_order, vbsp);
1055 }
1056 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1057 {
1058 ULONGEST pr;
1059 ULONGEST cfm;
1060 ULONGEST prN_val;
1061 status = regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1062 if (status != REG_VALID)
1063 return status;
1064 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1065 if (status != REG_VALID)
1066 return status;
1067
1068 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1069 {
1070 /* Fetch predicate register rename base from current frame
1071 marker for this frame. */
1072 int rrb_pr = (cfm >> 32) & 0x3f;
1073
1074 /* Adjust the register number to account for register rotation. */
1075 regnum = VP16_REGNUM
1076 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1077 }
1078 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1079 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1080 byte_order, prN_val);
1081 }
1082 else
1083 memset (buf, 0, register_size (gdbarch, regnum));
1084
1085 return REG_VALID;
1086 }
1087
1088 static void
1089 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1090 int regnum, const gdb_byte *buf)
1091 {
1092 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1093
1094 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1095 {
1096 ULONGEST bsp;
1097 ULONGEST cfm;
1098 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1099 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1100
1101 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1102
1103 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1104 {
1105 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1106 write_memory (reg_addr, (void *) buf, 8);
1107 }
1108 }
1109 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1110 {
1111 ULONGEST unatN_val, unat, unatN_mask;
1112 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1113 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1114 regnum),
1115 byte_order);
1116 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1117 if (unatN_val == 0)
1118 unat &= ~unatN_mask;
1119 else if (unatN_val == 1)
1120 unat |= unatN_mask;
1121 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1122 }
1123 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1124 {
1125 ULONGEST natN_val;
1126 ULONGEST bsp;
1127 ULONGEST cfm;
1128 CORE_ADDR gr_addr = 0;
1129 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1130 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1131
1132 /* The bsp points at the end of the register frame so we
1133 subtract the size of frame from it to get start of register frame. */
1134 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1135
1136 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1137 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1138
1139 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1140 regnum),
1141 byte_order);
1142
1143 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1144 {
1145 /* Compute address of nat collection bits. */
1146 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1147 CORE_ADDR nat_collection;
1148 int natN_bit = (gr_addr >> 3) & 0x3f;
1149 ULONGEST natN_mask = (1LL << natN_bit);
1150 /* If our nat collection address is bigger than bsp, we have to get
1151 the nat collection from rnat. Otherwise, we fetch the nat
1152 collection from the computed address. */
1153 if (nat_addr >= bsp)
1154 {
1155 regcache_cooked_read_unsigned (regcache,
1156 IA64_RNAT_REGNUM,
1157 &nat_collection);
1158 if (natN_val)
1159 nat_collection |= natN_mask;
1160 else
1161 nat_collection &= ~natN_mask;
1162 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1163 nat_collection);
1164 }
1165 else
1166 {
1167 gdb_byte nat_buf[8];
1168 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1169 if (natN_val)
1170 nat_collection |= natN_mask;
1171 else
1172 nat_collection &= ~natN_mask;
1173 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1174 byte_order, nat_collection);
1175 write_memory (nat_addr, nat_buf, 8);
1176 }
1177 }
1178 }
1179 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1180 {
1181 ULONGEST pr;
1182 ULONGEST cfm;
1183 ULONGEST prN_val;
1184 ULONGEST prN_mask;
1185
1186 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1187 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1188
1189 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1190 {
1191 /* Fetch predicate register rename base from current frame
1192 marker for this frame. */
1193 int rrb_pr = (cfm >> 32) & 0x3f;
1194
1195 /* Adjust the register number to account for register rotation. */
1196 regnum = VP16_REGNUM
1197 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1198 }
1199 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1200 byte_order);
1201 prN_mask = (1LL << (regnum - VP0_REGNUM));
1202 if (prN_val == 0)
1203 pr &= ~prN_mask;
1204 else if (prN_val == 1)
1205 pr |= prN_mask;
1206 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1207 }
1208 }
1209
1210 /* The ia64 needs to convert between various ieee floating-point formats
1211 and the special ia64 floating point register format. */
1212
1213 static int
1214 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1215 {
1216 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1217 && type != ia64_ext_type (gdbarch));
1218 }
1219
1220 static int
1221 ia64_register_to_value (struct frame_info *frame, int regnum,
1222 struct type *valtype, gdb_byte *out,
1223 int *optimizedp, int *unavailablep)
1224 {
1225 struct gdbarch *gdbarch = get_frame_arch (frame);
1226 gdb_byte in[MAX_REGISTER_SIZE];
1227
1228 /* Convert to TYPE. */
1229 if (!get_frame_register_bytes (frame, regnum, 0,
1230 register_size (gdbarch, regnum),
1231 in, optimizedp, unavailablep))
1232 return 0;
1233
1234 convert_typed_floating (in, ia64_ext_type (gdbarch), out, valtype);
1235 *optimizedp = *unavailablep = 0;
1236 return 1;
1237 }
1238
1239 static void
1240 ia64_value_to_register (struct frame_info *frame, int regnum,
1241 struct type *valtype, const gdb_byte *in)
1242 {
1243 struct gdbarch *gdbarch = get_frame_arch (frame);
1244 gdb_byte out[MAX_REGISTER_SIZE];
1245 convert_typed_floating (in, valtype, out, ia64_ext_type (gdbarch));
1246 put_frame_register (frame, regnum, out);
1247 }
1248
1249
1250 /* Limit the number of skipped non-prologue instructions since examining
1251 of the prologue is expensive. */
1252 static int max_skip_non_prologue_insns = 40;
1253
1254 /* Given PC representing the starting address of a function, and
1255 LIM_PC which is the (sloppy) limit to which to scan when looking
1256 for a prologue, attempt to further refine this limit by using
1257 the line data in the symbol table. If successful, a better guess
1258 on where the prologue ends is returned, otherwise the previous
1259 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1260 which will be set to indicate whether the returned limit may be
1261 used with no further scanning in the event that the function is
1262 frameless. */
1263
1264 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1265 superseded by skip_prologue_using_sal. */
1266
1267 static CORE_ADDR
1268 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1269 {
1270 struct symtab_and_line prologue_sal;
1271 CORE_ADDR start_pc = pc;
1272 CORE_ADDR end_pc;
1273
1274 /* The prologue can not possibly go past the function end itself,
1275 so we can already adjust LIM_PC accordingly. */
1276 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1277 lim_pc = end_pc;
1278
1279 /* Start off not trusting the limit. */
1280 *trust_limit = 0;
1281
1282 prologue_sal = find_pc_line (pc, 0);
1283 if (prologue_sal.line != 0)
1284 {
1285 int i;
1286 CORE_ADDR addr = prologue_sal.end;
1287
1288 /* Handle the case in which compiler's optimizer/scheduler
1289 has moved instructions into the prologue. We scan ahead
1290 in the function looking for address ranges whose corresponding
1291 line number is less than or equal to the first one that we
1292 found for the function. (It can be less than when the
1293 scheduler puts a body instruction before the first prologue
1294 instruction.) */
1295 for (i = 2 * max_skip_non_prologue_insns;
1296 i > 0 && (lim_pc == 0 || addr < lim_pc);
1297 i--)
1298 {
1299 struct symtab_and_line sal;
1300
1301 sal = find_pc_line (addr, 0);
1302 if (sal.line == 0)
1303 break;
1304 if (sal.line <= prologue_sal.line
1305 && sal.symtab == prologue_sal.symtab)
1306 {
1307 prologue_sal = sal;
1308 }
1309 addr = sal.end;
1310 }
1311
1312 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1313 {
1314 lim_pc = prologue_sal.end;
1315 if (start_pc == get_pc_function_start (lim_pc))
1316 *trust_limit = 1;
1317 }
1318 }
1319 return lim_pc;
1320 }
1321
1322 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1323 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1324 || (14 <= (_regnum_) && (_regnum_) <= 31))
1325 #define imm9(_instr_) \
1326 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1327 | (((_instr_) & 0x00008000000LL) >> 20) \
1328 | (((_instr_) & 0x00000001fc0LL) >> 6))
1329
1330 /* Allocate and initialize a frame cache. */
1331
1332 static struct ia64_frame_cache *
1333 ia64_alloc_frame_cache (void)
1334 {
1335 struct ia64_frame_cache *cache;
1336 int i;
1337
1338 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1339
1340 /* Base address. */
1341 cache->base = 0;
1342 cache->pc = 0;
1343 cache->cfm = 0;
1344 cache->prev_cfm = 0;
1345 cache->sof = 0;
1346 cache->sol = 0;
1347 cache->sor = 0;
1348 cache->bsp = 0;
1349 cache->fp_reg = 0;
1350 cache->frameless = 1;
1351
1352 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1353 cache->saved_regs[i] = 0;
1354
1355 return cache;
1356 }
1357
1358 static CORE_ADDR
1359 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1360 struct frame_info *this_frame,
1361 struct ia64_frame_cache *cache)
1362 {
1363 CORE_ADDR next_pc;
1364 CORE_ADDR last_prologue_pc = pc;
1365 instruction_type it;
1366 long long instr;
1367 int cfm_reg = 0;
1368 int ret_reg = 0;
1369 int fp_reg = 0;
1370 int unat_save_reg = 0;
1371 int pr_save_reg = 0;
1372 int mem_stack_frame_size = 0;
1373 int spill_reg = 0;
1374 CORE_ADDR spill_addr = 0;
1375 char instores[8];
1376 char infpstores[8];
1377 char reg_contents[256];
1378 int trust_limit;
1379 int frameless = 1;
1380 int i;
1381 CORE_ADDR addr;
1382 gdb_byte buf[8];
1383 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1384
1385 memset (instores, 0, sizeof instores);
1386 memset (infpstores, 0, sizeof infpstores);
1387 memset (reg_contents, 0, sizeof reg_contents);
1388
1389 if (cache->after_prologue != 0
1390 && cache->after_prologue <= lim_pc)
1391 return cache->after_prologue;
1392
1393 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1394 next_pc = fetch_instruction (pc, &it, &instr);
1395
1396 /* We want to check if we have a recognizable function start before we
1397 look ahead for a prologue. */
1398 if (pc < lim_pc && next_pc
1399 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1400 {
1401 /* alloc - start of a regular function. */
1402 int sor = (int) ((instr & 0x00078000000LL) >> 27);
1403 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1404 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1405 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1406
1407 /* Verify that the current cfm matches what we think is the
1408 function start. If we have somehow jumped within a function,
1409 we do not want to interpret the prologue and calculate the
1410 addresses of various registers such as the return address.
1411 We will instead treat the frame as frameless. */
1412 if (!this_frame ||
1413 (sof == (cache->cfm & 0x7f) &&
1414 sol == ((cache->cfm >> 7) & 0x7f)))
1415 frameless = 0;
1416
1417 cfm_reg = rN;
1418 last_prologue_pc = next_pc;
1419 pc = next_pc;
1420 }
1421 else
1422 {
1423 /* Look for a leaf routine. */
1424 if (pc < lim_pc && next_pc
1425 && (it == I || it == M)
1426 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1427 {
1428 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1429 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1430 | ((instr & 0x001f8000000LL) >> 20)
1431 | ((instr & 0x000000fe000LL) >> 13));
1432 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1433 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1434 int qp = (int) (instr & 0x0000000003fLL);
1435 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1436 {
1437 /* mov r2, r12 - beginning of leaf routine. */
1438 fp_reg = rN;
1439 last_prologue_pc = next_pc;
1440 }
1441 }
1442
1443 /* If we don't recognize a regular function or leaf routine, we are
1444 done. */
1445 if (!fp_reg)
1446 {
1447 pc = lim_pc;
1448 if (trust_limit)
1449 last_prologue_pc = lim_pc;
1450 }
1451 }
1452
1453 /* Loop, looking for prologue instructions, keeping track of
1454 where preserved registers were spilled. */
1455 while (pc < lim_pc)
1456 {
1457 next_pc = fetch_instruction (pc, &it, &instr);
1458 if (next_pc == 0)
1459 break;
1460
1461 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1462 {
1463 /* Exit loop upon hitting a non-nop branch instruction. */
1464 if (trust_limit)
1465 lim_pc = pc;
1466 break;
1467 }
1468 else if (((instr & 0x3fLL) != 0LL) &&
1469 (frameless || ret_reg != 0))
1470 {
1471 /* Exit loop upon hitting a predicated instruction if
1472 we already have the return register or if we are frameless. */
1473 if (trust_limit)
1474 lim_pc = pc;
1475 break;
1476 }
1477 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1478 {
1479 /* Move from BR */
1480 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1481 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1482 int qp = (int) (instr & 0x0000000003f);
1483
1484 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1485 {
1486 ret_reg = rN;
1487 last_prologue_pc = next_pc;
1488 }
1489 }
1490 else if ((it == I || it == M)
1491 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1492 {
1493 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1494 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1495 | ((instr & 0x001f8000000LL) >> 20)
1496 | ((instr & 0x000000fe000LL) >> 13));
1497 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1498 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1499 int qp = (int) (instr & 0x0000000003fLL);
1500
1501 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1502 {
1503 /* mov rN, r12 */
1504 fp_reg = rN;
1505 last_prologue_pc = next_pc;
1506 }
1507 else if (qp == 0 && rN == 12 && rM == 12)
1508 {
1509 /* adds r12, -mem_stack_frame_size, r12 */
1510 mem_stack_frame_size -= imm;
1511 last_prologue_pc = next_pc;
1512 }
1513 else if (qp == 0 && rN == 2
1514 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1515 {
1516 gdb_byte buf[MAX_REGISTER_SIZE];
1517 CORE_ADDR saved_sp = 0;
1518 /* adds r2, spilloffset, rFramePointer
1519 or
1520 adds r2, spilloffset, r12
1521
1522 Get ready for stf.spill or st8.spill instructions.
1523 The address to start spilling at is loaded into r2.
1524 FIXME: Why r2? That's what gcc currently uses; it
1525 could well be different for other compilers. */
1526
1527 /* Hmm... whether or not this will work will depend on
1528 where the pc is. If it's still early in the prologue
1529 this'll be wrong. FIXME */
1530 if (this_frame)
1531 {
1532 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1533 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1534 get_frame_register (this_frame, sp_regnum, buf);
1535 saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1536 }
1537 spill_addr = saved_sp
1538 + (rM == 12 ? 0 : mem_stack_frame_size)
1539 + imm;
1540 spill_reg = rN;
1541 last_prologue_pc = next_pc;
1542 }
1543 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1544 rN < 256 && imm == 0)
1545 {
1546 /* mov rN, rM where rM is an input register. */
1547 reg_contents[rN] = rM;
1548 last_prologue_pc = next_pc;
1549 }
1550 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1551 rM == 2)
1552 {
1553 /* mov r12, r2 */
1554 last_prologue_pc = next_pc;
1555 break;
1556 }
1557 }
1558 else if (it == M
1559 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1560 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1561 {
1562 /* stf.spill [rN] = fM, imm9
1563 or
1564 stf.spill [rN] = fM */
1565
1566 int imm = imm9(instr);
1567 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1568 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1569 int qp = (int) (instr & 0x0000000003fLL);
1570 if (qp == 0 && rN == spill_reg && spill_addr != 0
1571 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1572 {
1573 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1574
1575 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1576 spill_addr += imm;
1577 else
1578 spill_addr = 0; /* last one; must be done. */
1579 last_prologue_pc = next_pc;
1580 }
1581 }
1582 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1583 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1584 {
1585 /* mov.m rN = arM
1586 or
1587 mov.i rN = arM */
1588
1589 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1590 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1591 int qp = (int) (instr & 0x0000000003fLL);
1592 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1593 {
1594 /* We have something like "mov.m r3 = ar.unat". Remember the
1595 r3 (or whatever) and watch for a store of this register... */
1596 unat_save_reg = rN;
1597 last_prologue_pc = next_pc;
1598 }
1599 }
1600 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1601 {
1602 /* mov rN = pr */
1603 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1604 int qp = (int) (instr & 0x0000000003fLL);
1605 if (qp == 0 && isScratch (rN))
1606 {
1607 pr_save_reg = rN;
1608 last_prologue_pc = next_pc;
1609 }
1610 }
1611 else if (it == M
1612 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1613 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1614 {
1615 /* st8 [rN] = rM
1616 or
1617 st8 [rN] = rM, imm9 */
1618 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1619 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1620 int qp = (int) (instr & 0x0000000003fLL);
1621 int indirect = rM < 256 ? reg_contents[rM] : 0;
1622 if (qp == 0 && rN == spill_reg && spill_addr != 0
1623 && (rM == unat_save_reg || rM == pr_save_reg))
1624 {
1625 /* We've found a spill of either the UNAT register or the PR
1626 register. (Well, not exactly; what we've actually found is
1627 a spill of the register that UNAT or PR was moved to).
1628 Record that fact and move on... */
1629 if (rM == unat_save_reg)
1630 {
1631 /* Track UNAT register. */
1632 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1633 unat_save_reg = 0;
1634 }
1635 else
1636 {
1637 /* Track PR register. */
1638 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1639 pr_save_reg = 0;
1640 }
1641 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1642 /* st8 [rN] = rM, imm9 */
1643 spill_addr += imm9(instr);
1644 else
1645 spill_addr = 0; /* Must be done spilling. */
1646 last_prologue_pc = next_pc;
1647 }
1648 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1649 {
1650 /* Allow up to one store of each input register. */
1651 instores[rM-32] = 1;
1652 last_prologue_pc = next_pc;
1653 }
1654 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1655 !instores[indirect-32])
1656 {
1657 /* Allow an indirect store of an input register. */
1658 instores[indirect-32] = 1;
1659 last_prologue_pc = next_pc;
1660 }
1661 }
1662 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1663 {
1664 /* One of
1665 st1 [rN] = rM
1666 st2 [rN] = rM
1667 st4 [rN] = rM
1668 st8 [rN] = rM
1669 Note that the st8 case is handled in the clause above.
1670
1671 Advance over stores of input registers. One store per input
1672 register is permitted. */
1673 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1674 int qp = (int) (instr & 0x0000000003fLL);
1675 int indirect = rM < 256 ? reg_contents[rM] : 0;
1676 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1677 {
1678 instores[rM-32] = 1;
1679 last_prologue_pc = next_pc;
1680 }
1681 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1682 !instores[indirect-32])
1683 {
1684 /* Allow an indirect store of an input register. */
1685 instores[indirect-32] = 1;
1686 last_prologue_pc = next_pc;
1687 }
1688 }
1689 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1690 {
1691 /* Either
1692 stfs [rN] = fM
1693 or
1694 stfd [rN] = fM
1695
1696 Advance over stores of floating point input registers. Again
1697 one store per register is permitted. */
1698 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1699 int qp = (int) (instr & 0x0000000003fLL);
1700 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1701 {
1702 infpstores[fM-8] = 1;
1703 last_prologue_pc = next_pc;
1704 }
1705 }
1706 else if (it == M
1707 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1708 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1709 {
1710 /* st8.spill [rN] = rM
1711 or
1712 st8.spill [rN] = rM, imm9 */
1713 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1714 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1715 int qp = (int) (instr & 0x0000000003fLL);
1716 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1717 {
1718 /* We've found a spill of one of the preserved general purpose
1719 regs. Record the spill address and advance the spill
1720 register if appropriate. */
1721 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1722 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1723 /* st8.spill [rN] = rM, imm9 */
1724 spill_addr += imm9(instr);
1725 else
1726 spill_addr = 0; /* Done spilling. */
1727 last_prologue_pc = next_pc;
1728 }
1729 }
1730
1731 pc = next_pc;
1732 }
1733
1734 /* If not frameless and we aren't called by skip_prologue, then we need
1735 to calculate registers for the previous frame which will be needed
1736 later. */
1737
1738 if (!frameless && this_frame)
1739 {
1740 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1741 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1742
1743 /* Extract the size of the rotating portion of the stack
1744 frame and the register rename base from the current
1745 frame marker. */
1746 cfm = cache->cfm;
1747 sor = cache->sor;
1748 sof = cache->sof;
1749 sol = cache->sol;
1750 rrb_gr = (cfm >> 18) & 0x7f;
1751
1752 /* Find the bof (beginning of frame). */
1753 bof = rse_address_add (cache->bsp, -sof);
1754
1755 for (i = 0, addr = bof;
1756 i < sof;
1757 i++, addr += 8)
1758 {
1759 if (IS_NaT_COLLECTION_ADDR (addr))
1760 {
1761 addr += 8;
1762 }
1763 if (i+32 == cfm_reg)
1764 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1765 if (i+32 == ret_reg)
1766 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1767 if (i+32 == fp_reg)
1768 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1769 }
1770
1771 /* For the previous argument registers we require the previous bof.
1772 If we can't find the previous cfm, then we can do nothing. */
1773 cfm = 0;
1774 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1775 {
1776 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1777 8, byte_order);
1778 }
1779 else if (cfm_reg != 0)
1780 {
1781 get_frame_register (this_frame, cfm_reg, buf);
1782 cfm = extract_unsigned_integer (buf, 8, byte_order);
1783 }
1784 cache->prev_cfm = cfm;
1785
1786 if (cfm != 0)
1787 {
1788 sor = ((cfm >> 14) & 0xf) * 8;
1789 sof = (cfm & 0x7f);
1790 sol = (cfm >> 7) & 0x7f;
1791 rrb_gr = (cfm >> 18) & 0x7f;
1792
1793 /* The previous bof only requires subtraction of the sol (size of
1794 locals) due to the overlap between output and input of
1795 subsequent frames. */
1796 bof = rse_address_add (bof, -sol);
1797
1798 for (i = 0, addr = bof;
1799 i < sof;
1800 i++, addr += 8)
1801 {
1802 if (IS_NaT_COLLECTION_ADDR (addr))
1803 {
1804 addr += 8;
1805 }
1806 if (i < sor)
1807 cache->saved_regs[IA64_GR32_REGNUM
1808 + ((i + (sor - rrb_gr)) % sor)]
1809 = addr;
1810 else
1811 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1812 }
1813
1814 }
1815 }
1816
1817 /* Try and trust the lim_pc value whenever possible. */
1818 if (trust_limit && lim_pc >= last_prologue_pc)
1819 last_prologue_pc = lim_pc;
1820
1821 cache->frameless = frameless;
1822 cache->after_prologue = last_prologue_pc;
1823 cache->mem_stack_frame_size = mem_stack_frame_size;
1824 cache->fp_reg = fp_reg;
1825
1826 return last_prologue_pc;
1827 }
1828
1829 CORE_ADDR
1830 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1831 {
1832 struct ia64_frame_cache cache;
1833 cache.base = 0;
1834 cache.after_prologue = 0;
1835 cache.cfm = 0;
1836 cache.bsp = 0;
1837
1838 /* Call examine_prologue with - as third argument since we don't
1839 have a next frame pointer to send. */
1840 return examine_prologue (pc, pc+1024, 0, &cache);
1841 }
1842
1843
1844 /* Normal frames. */
1845
1846 static struct ia64_frame_cache *
1847 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1848 {
1849 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1850 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1851 struct ia64_frame_cache *cache;
1852 gdb_byte buf[8];
1853 CORE_ADDR cfm, psr;
1854
1855 if (*this_cache)
1856 return *this_cache;
1857
1858 cache = ia64_alloc_frame_cache ();
1859 *this_cache = cache;
1860
1861 get_frame_register (this_frame, sp_regnum, buf);
1862 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1863
1864 /* We always want the bsp to point to the end of frame.
1865 This way, we can always get the beginning of frame (bof)
1866 by subtracting frame size. */
1867 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1868 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1869
1870 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1871 psr = extract_unsigned_integer (buf, 8, byte_order);
1872
1873 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1874 cfm = extract_unsigned_integer (buf, 8, byte_order);
1875
1876 cache->sof = (cfm & 0x7f);
1877 cache->sol = (cfm >> 7) & 0x7f;
1878 cache->sor = ((cfm >> 14) & 0xf) * 8;
1879
1880 cache->cfm = cfm;
1881
1882 cache->pc = get_frame_func (this_frame);
1883
1884 if (cache->pc != 0)
1885 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1886
1887 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1888
1889 return cache;
1890 }
1891
1892 static void
1893 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1894 struct frame_id *this_id)
1895 {
1896 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1897 struct ia64_frame_cache *cache =
1898 ia64_frame_cache (this_frame, this_cache);
1899
1900 /* If outermost frame, mark with null frame id. */
1901 if (cache->base != 0)
1902 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1903 if (gdbarch_debug >= 1)
1904 fprintf_unfiltered (gdb_stdlog,
1905 "regular frame id: code %s, stack %s, "
1906 "special %s, this_frame %s\n",
1907 paddress (gdbarch, this_id->code_addr),
1908 paddress (gdbarch, this_id->stack_addr),
1909 paddress (gdbarch, cache->bsp),
1910 host_address_to_string (this_frame));
1911 }
1912
1913 static struct value *
1914 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1915 int regnum)
1916 {
1917 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1918 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1919 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1920 gdb_byte buf[8];
1921
1922 gdb_assert (regnum >= 0);
1923
1924 if (!target_has_registers)
1925 error (_("No registers."));
1926
1927 if (regnum == gdbarch_sp_regnum (gdbarch))
1928 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1929
1930 else if (regnum == IA64_BSP_REGNUM)
1931 {
1932 struct value *val;
1933 CORE_ADDR prev_cfm, bsp, prev_bsp;
1934
1935 /* We want to calculate the previous bsp as the end of the previous
1936 register stack frame. This corresponds to what the hardware bsp
1937 register will be if we pop the frame back which is why we might
1938 have been called. We know the beginning of the current frame is
1939 cache->bsp - cache->sof. This value in the previous frame points
1940 to the start of the output registers. We can calculate the end of
1941 that frame by adding the size of output:
1942 (sof (size of frame) - sol (size of locals)). */
1943 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1944 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1945 8, byte_order);
1946 bsp = rse_address_add (cache->bsp, -(cache->sof));
1947 prev_bsp =
1948 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1949
1950 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1951 }
1952
1953 else if (regnum == IA64_CFM_REGNUM)
1954 {
1955 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1956
1957 if (addr != 0)
1958 return frame_unwind_got_memory (this_frame, regnum, addr);
1959
1960 if (cache->prev_cfm)
1961 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1962
1963 if (cache->frameless)
1964 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1965 IA64_PFS_REGNUM);
1966 return frame_unwind_got_register (this_frame, regnum, 0);
1967 }
1968
1969 else if (regnum == IA64_VFP_REGNUM)
1970 {
1971 /* If the function in question uses an automatic register (r32-r127)
1972 for the frame pointer, it'll be found by ia64_find_saved_register()
1973 above. If the function lacks one of these frame pointers, we can
1974 still provide a value since we know the size of the frame. */
1975 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1976 }
1977
1978 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1979 {
1980 struct value *pr_val;
1981 ULONGEST prN;
1982
1983 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1984 IA64_PR_REGNUM);
1985 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1986 {
1987 /* Fetch predicate register rename base from current frame
1988 marker for this frame. */
1989 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1990
1991 /* Adjust the register number to account for register rotation. */
1992 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1993 }
1994 prN = extract_bit_field (value_contents_all (pr_val),
1995 regnum - VP0_REGNUM, 1);
1996 return frame_unwind_got_constant (this_frame, regnum, prN);
1997 }
1998
1999 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
2000 {
2001 struct value *unat_val;
2002 ULONGEST unatN;
2003 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2004 IA64_UNAT_REGNUM);
2005 unatN = extract_bit_field (value_contents_all (unat_val),
2006 regnum - IA64_NAT0_REGNUM, 1);
2007 return frame_unwind_got_constant (this_frame, regnum, unatN);
2008 }
2009
2010 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2011 {
2012 int natval = 0;
2013 /* Find address of general register corresponding to nat bit we're
2014 interested in. */
2015 CORE_ADDR gr_addr;
2016
2017 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2018
2019 if (gr_addr != 0)
2020 {
2021 /* Compute address of nat collection bits. */
2022 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2023 CORE_ADDR bsp;
2024 CORE_ADDR nat_collection;
2025 int nat_bit;
2026
2027 /* If our nat collection address is bigger than bsp, we have to get
2028 the nat collection from rnat. Otherwise, we fetch the nat
2029 collection from the computed address. */
2030 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2031 bsp = extract_unsigned_integer (buf, 8, byte_order);
2032 if (nat_addr >= bsp)
2033 {
2034 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2035 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2036 }
2037 else
2038 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2039 nat_bit = (gr_addr >> 3) & 0x3f;
2040 natval = (nat_collection >> nat_bit) & 1;
2041 }
2042
2043 return frame_unwind_got_constant (this_frame, regnum, natval);
2044 }
2045
2046 else if (regnum == IA64_IP_REGNUM)
2047 {
2048 CORE_ADDR pc = 0;
2049 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2050
2051 if (addr != 0)
2052 {
2053 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2054 pc = extract_unsigned_integer (buf, 8, byte_order);
2055 }
2056 else if (cache->frameless)
2057 {
2058 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2059 pc = extract_unsigned_integer (buf, 8, byte_order);
2060 }
2061 pc &= ~0xf;
2062 return frame_unwind_got_constant (this_frame, regnum, pc);
2063 }
2064
2065 else if (regnum == IA64_PSR_REGNUM)
2066 {
2067 /* We don't know how to get the complete previous PSR, but we need it
2068 for the slot information when we unwind the pc (pc is formed of IP
2069 register plus slot information from PSR). To get the previous
2070 slot information, we mask it off the return address. */
2071 ULONGEST slot_num = 0;
2072 CORE_ADDR pc = 0;
2073 CORE_ADDR psr = 0;
2074 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2075
2076 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2077 psr = extract_unsigned_integer (buf, 8, byte_order);
2078
2079 if (addr != 0)
2080 {
2081 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2082 pc = extract_unsigned_integer (buf, 8, byte_order);
2083 }
2084 else if (cache->frameless)
2085 {
2086 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2087 pc = extract_unsigned_integer (buf, 8, byte_order);
2088 }
2089 psr &= ~(3LL << 41);
2090 slot_num = pc & 0x3LL;
2091 psr |= (CORE_ADDR)slot_num << 41;
2092 return frame_unwind_got_constant (this_frame, regnum, psr);
2093 }
2094
2095 else if (regnum == IA64_BR0_REGNUM)
2096 {
2097 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2098
2099 if (addr != 0)
2100 return frame_unwind_got_memory (this_frame, regnum, addr);
2101
2102 return frame_unwind_got_constant (this_frame, regnum, 0);
2103 }
2104
2105 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2106 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2107 {
2108 CORE_ADDR addr = 0;
2109
2110 if (regnum >= V32_REGNUM)
2111 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2112 addr = cache->saved_regs[regnum];
2113 if (addr != 0)
2114 return frame_unwind_got_memory (this_frame, regnum, addr);
2115
2116 if (cache->frameless)
2117 {
2118 struct value *reg_val;
2119 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2120
2121 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2122 with the same code above? */
2123 if (regnum >= V32_REGNUM)
2124 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2125 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2126 IA64_CFM_REGNUM);
2127 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2128 8, byte_order);
2129 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2130 IA64_BSP_REGNUM);
2131 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2132 8, byte_order);
2133 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2134
2135 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2136 return frame_unwind_got_memory (this_frame, regnum, addr);
2137 }
2138
2139 return frame_unwind_got_constant (this_frame, regnum, 0);
2140 }
2141
2142 else /* All other registers. */
2143 {
2144 CORE_ADDR addr = 0;
2145
2146 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2147 {
2148 /* Fetch floating point register rename base from current
2149 frame marker for this frame. */
2150 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2151
2152 /* Adjust the floating point register number to account for
2153 register rotation. */
2154 regnum = IA64_FR32_REGNUM
2155 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2156 }
2157
2158 /* If we have stored a memory address, access the register. */
2159 addr = cache->saved_regs[regnum];
2160 if (addr != 0)
2161 return frame_unwind_got_memory (this_frame, regnum, addr);
2162 /* Otherwise, punt and get the current value of the register. */
2163 else
2164 return frame_unwind_got_register (this_frame, regnum, regnum);
2165 }
2166 }
2167
2168 static const struct frame_unwind ia64_frame_unwind =
2169 {
2170 NORMAL_FRAME,
2171 default_frame_unwind_stop_reason,
2172 &ia64_frame_this_id,
2173 &ia64_frame_prev_register,
2174 NULL,
2175 default_frame_sniffer
2176 };
2177
2178 /* Signal trampolines. */
2179
2180 static void
2181 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2182 struct ia64_frame_cache *cache)
2183 {
2184 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2185 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2186
2187 if (tdep->sigcontext_register_address)
2188 {
2189 int regno;
2190
2191 cache->saved_regs[IA64_VRAP_REGNUM]
2192 = tdep->sigcontext_register_address (gdbarch, cache->base,
2193 IA64_IP_REGNUM);
2194 cache->saved_regs[IA64_CFM_REGNUM]
2195 = tdep->sigcontext_register_address (gdbarch, cache->base,
2196 IA64_CFM_REGNUM);
2197 cache->saved_regs[IA64_PSR_REGNUM]
2198 = tdep->sigcontext_register_address (gdbarch, cache->base,
2199 IA64_PSR_REGNUM);
2200 cache->saved_regs[IA64_BSP_REGNUM]
2201 = tdep->sigcontext_register_address (gdbarch, cache->base,
2202 IA64_BSP_REGNUM);
2203 cache->saved_regs[IA64_RNAT_REGNUM]
2204 = tdep->sigcontext_register_address (gdbarch, cache->base,
2205 IA64_RNAT_REGNUM);
2206 cache->saved_regs[IA64_CCV_REGNUM]
2207 = tdep->sigcontext_register_address (gdbarch, cache->base,
2208 IA64_CCV_REGNUM);
2209 cache->saved_regs[IA64_UNAT_REGNUM]
2210 = tdep->sigcontext_register_address (gdbarch, cache->base,
2211 IA64_UNAT_REGNUM);
2212 cache->saved_regs[IA64_FPSR_REGNUM]
2213 = tdep->sigcontext_register_address (gdbarch, cache->base,
2214 IA64_FPSR_REGNUM);
2215 cache->saved_regs[IA64_PFS_REGNUM]
2216 = tdep->sigcontext_register_address (gdbarch, cache->base,
2217 IA64_PFS_REGNUM);
2218 cache->saved_regs[IA64_LC_REGNUM]
2219 = tdep->sigcontext_register_address (gdbarch, cache->base,
2220 IA64_LC_REGNUM);
2221
2222 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2223 cache->saved_regs[regno] =
2224 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2225 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2226 cache->saved_regs[regno] =
2227 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2228 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2229 cache->saved_regs[regno] =
2230 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2231 }
2232 }
2233
2234 static struct ia64_frame_cache *
2235 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2236 {
2237 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2238 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2239 struct ia64_frame_cache *cache;
2240 gdb_byte buf[8];
2241
2242 if (*this_cache)
2243 return *this_cache;
2244
2245 cache = ia64_alloc_frame_cache ();
2246
2247 get_frame_register (this_frame, sp_regnum, buf);
2248 /* Note that frame size is hard-coded below. We cannot calculate it
2249 via prologue examination. */
2250 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2251
2252 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2253 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2254
2255 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2256 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2257 cache->sof = cache->cfm & 0x7f;
2258
2259 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2260
2261 *this_cache = cache;
2262 return cache;
2263 }
2264
2265 static void
2266 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2267 void **this_cache, struct frame_id *this_id)
2268 {
2269 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2270 struct ia64_frame_cache *cache =
2271 ia64_sigtramp_frame_cache (this_frame, this_cache);
2272
2273 (*this_id) = frame_id_build_special (cache->base,
2274 get_frame_pc (this_frame),
2275 cache->bsp);
2276 if (gdbarch_debug >= 1)
2277 fprintf_unfiltered (gdb_stdlog,
2278 "sigtramp frame id: code %s, stack %s, "
2279 "special %s, this_frame %s\n",
2280 paddress (gdbarch, this_id->code_addr),
2281 paddress (gdbarch, this_id->stack_addr),
2282 paddress (gdbarch, cache->bsp),
2283 host_address_to_string (this_frame));
2284 }
2285
2286 static struct value *
2287 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2288 void **this_cache, int regnum)
2289 {
2290 gdb_byte buf[MAX_REGISTER_SIZE];
2291
2292 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2293 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2294 struct ia64_frame_cache *cache =
2295 ia64_sigtramp_frame_cache (this_frame, this_cache);
2296
2297 gdb_assert (regnum >= 0);
2298
2299 if (!target_has_registers)
2300 error (_("No registers."));
2301
2302 if (regnum == IA64_IP_REGNUM)
2303 {
2304 CORE_ADDR pc = 0;
2305 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2306
2307 if (addr != 0)
2308 {
2309 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2310 pc = extract_unsigned_integer (buf, 8, byte_order);
2311 }
2312 pc &= ~0xf;
2313 return frame_unwind_got_constant (this_frame, regnum, pc);
2314 }
2315
2316 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2317 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2318 {
2319 CORE_ADDR addr = 0;
2320
2321 if (regnum >= V32_REGNUM)
2322 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2323 addr = cache->saved_regs[regnum];
2324 if (addr != 0)
2325 return frame_unwind_got_memory (this_frame, regnum, addr);
2326
2327 return frame_unwind_got_constant (this_frame, regnum, 0);
2328 }
2329
2330 else /* All other registers not listed above. */
2331 {
2332 CORE_ADDR addr = cache->saved_regs[regnum];
2333
2334 if (addr != 0)
2335 return frame_unwind_got_memory (this_frame, regnum, addr);
2336
2337 return frame_unwind_got_constant (this_frame, regnum, 0);
2338 }
2339 }
2340
2341 static int
2342 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2343 struct frame_info *this_frame,
2344 void **this_cache)
2345 {
2346 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2347 if (tdep->pc_in_sigtramp)
2348 {
2349 CORE_ADDR pc = get_frame_pc (this_frame);
2350
2351 if (tdep->pc_in_sigtramp (pc))
2352 return 1;
2353 }
2354
2355 return 0;
2356 }
2357
2358 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2359 {
2360 SIGTRAMP_FRAME,
2361 default_frame_unwind_stop_reason,
2362 ia64_sigtramp_frame_this_id,
2363 ia64_sigtramp_frame_prev_register,
2364 NULL,
2365 ia64_sigtramp_frame_sniffer
2366 };
2367
2368 \f
2369
2370 static CORE_ADDR
2371 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2372 {
2373 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2374
2375 return cache->base;
2376 }
2377
2378 static const struct frame_base ia64_frame_base =
2379 {
2380 &ia64_frame_unwind,
2381 ia64_frame_base_address,
2382 ia64_frame_base_address,
2383 ia64_frame_base_address
2384 };
2385
2386 #ifdef HAVE_LIBUNWIND_IA64_H
2387
2388 struct ia64_unwind_table_entry
2389 {
2390 unw_word_t start_offset;
2391 unw_word_t end_offset;
2392 unw_word_t info_offset;
2393 };
2394
2395 static __inline__ uint64_t
2396 ia64_rse_slot_num (uint64_t addr)
2397 {
2398 return (addr >> 3) & 0x3f;
2399 }
2400
2401 /* Skip over a designated number of registers in the backing
2402 store, remembering every 64th position is for NAT. */
2403 static __inline__ uint64_t
2404 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2405 {
2406 long delta = ia64_rse_slot_num(addr) + num_regs;
2407
2408 if (num_regs < 0)
2409 delta -= 0x3e;
2410 return addr + ((num_regs + delta/0x3f) << 3);
2411 }
2412
2413 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2414 register number to a libunwind register number. */
2415 static int
2416 ia64_gdb2uw_regnum (int regnum)
2417 {
2418 if (regnum == sp_regnum)
2419 return UNW_IA64_SP;
2420 else if (regnum == IA64_BSP_REGNUM)
2421 return UNW_IA64_BSP;
2422 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2423 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2424 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2425 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2426 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2427 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2428 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2429 return -1;
2430 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2431 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2432 else if (regnum == IA64_PR_REGNUM)
2433 return UNW_IA64_PR;
2434 else if (regnum == IA64_IP_REGNUM)
2435 return UNW_REG_IP;
2436 else if (regnum == IA64_CFM_REGNUM)
2437 return UNW_IA64_CFM;
2438 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2439 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2440 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2441 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2442 else
2443 return -1;
2444 }
2445
2446 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2447 register number to a ia64 gdb register number. */
2448 static int
2449 ia64_uw2gdb_regnum (int uw_regnum)
2450 {
2451 if (uw_regnum == UNW_IA64_SP)
2452 return sp_regnum;
2453 else if (uw_regnum == UNW_IA64_BSP)
2454 return IA64_BSP_REGNUM;
2455 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2456 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2457 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2458 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2459 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2460 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2461 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2462 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2463 else if (uw_regnum == UNW_IA64_PR)
2464 return IA64_PR_REGNUM;
2465 else if (uw_regnum == UNW_REG_IP)
2466 return IA64_IP_REGNUM;
2467 else if (uw_regnum == UNW_IA64_CFM)
2468 return IA64_CFM_REGNUM;
2469 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2470 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2471 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2472 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2473 else
2474 return -1;
2475 }
2476
2477 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2478 a float register or not. */
2479 static int
2480 ia64_is_fpreg (int uw_regnum)
2481 {
2482 return unw_is_fpreg (uw_regnum);
2483 }
2484
2485 /* Libunwind callback accessor function for general registers. */
2486 static int
2487 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2488 int write, void *arg)
2489 {
2490 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2491 unw_word_t bsp, sof, sol, cfm, psr, ip;
2492 struct frame_info *this_frame = arg;
2493 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2494 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2495 long new_sof, old_sof;
2496 gdb_byte buf[MAX_REGISTER_SIZE];
2497
2498 /* We never call any libunwind routines that need to write registers. */
2499 gdb_assert (!write);
2500
2501 switch (uw_regnum)
2502 {
2503 case UNW_REG_IP:
2504 /* Libunwind expects to see the pc value which means the slot number
2505 from the psr must be merged with the ip word address. */
2506 get_frame_register (this_frame, IA64_IP_REGNUM, buf);
2507 ip = extract_unsigned_integer (buf, 8, byte_order);
2508 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2509 psr = extract_unsigned_integer (buf, 8, byte_order);
2510 *val = ip | ((psr >> 41) & 0x3);
2511 break;
2512
2513 case UNW_IA64_AR_BSP:
2514 /* Libunwind expects to see the beginning of the current
2515 register frame so we must account for the fact that
2516 ptrace() will return a value for bsp that points *after*
2517 the current register frame. */
2518 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2519 bsp = extract_unsigned_integer (buf, 8, byte_order);
2520 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2521 cfm = extract_unsigned_integer (buf, 8, byte_order);
2522 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2523 *val = ia64_rse_skip_regs (bsp, -sof);
2524 break;
2525
2526 case UNW_IA64_AR_BSPSTORE:
2527 /* Libunwind wants bspstore to be after the current register frame.
2528 This is what ptrace() and gdb treats as the regular bsp value. */
2529 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2530 *val = extract_unsigned_integer (buf, 8, byte_order);
2531 break;
2532
2533 default:
2534 /* For all other registers, just unwind the value directly. */
2535 get_frame_register (this_frame, regnum, buf);
2536 *val = extract_unsigned_integer (buf, 8, byte_order);
2537 break;
2538 }
2539
2540 if (gdbarch_debug >= 1)
2541 fprintf_unfiltered (gdb_stdlog,
2542 " access_reg: from cache: %4s=%s\n",
2543 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2544 ? ia64_register_names[regnum] : "r??"),
2545 paddress (gdbarch, *val));
2546 return 0;
2547 }
2548
2549 /* Libunwind callback accessor function for floating-point registers. */
2550 static int
2551 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2552 unw_fpreg_t *val, int write, void *arg)
2553 {
2554 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2555 struct frame_info *this_frame = arg;
2556
2557 /* We never call any libunwind routines that need to write registers. */
2558 gdb_assert (!write);
2559
2560 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2561
2562 return 0;
2563 }
2564
2565 /* Libunwind callback accessor function for top-level rse registers. */
2566 static int
2567 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2568 unw_word_t *val, int write, void *arg)
2569 {
2570 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2571 unw_word_t bsp, sof, sol, cfm, psr, ip;
2572 struct regcache *regcache = arg;
2573 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2574 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2575 long new_sof, old_sof;
2576 gdb_byte buf[MAX_REGISTER_SIZE];
2577
2578 /* We never call any libunwind routines that need to write registers. */
2579 gdb_assert (!write);
2580
2581 switch (uw_regnum)
2582 {
2583 case UNW_REG_IP:
2584 /* Libunwind expects to see the pc value which means the slot number
2585 from the psr must be merged with the ip word address. */
2586 regcache_cooked_read (regcache, IA64_IP_REGNUM, buf);
2587 ip = extract_unsigned_integer (buf, 8, byte_order);
2588 regcache_cooked_read (regcache, IA64_PSR_REGNUM, buf);
2589 psr = extract_unsigned_integer (buf, 8, byte_order);
2590 *val = ip | ((psr >> 41) & 0x3);
2591 break;
2592
2593 case UNW_IA64_AR_BSP:
2594 /* Libunwind expects to see the beginning of the current
2595 register frame so we must account for the fact that
2596 ptrace() will return a value for bsp that points *after*
2597 the current register frame. */
2598 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2599 bsp = extract_unsigned_integer (buf, 8, byte_order);
2600 regcache_cooked_read (regcache, IA64_CFM_REGNUM, buf);
2601 cfm = extract_unsigned_integer (buf, 8, byte_order);
2602 sof = (cfm & 0x7f);
2603 *val = ia64_rse_skip_regs (bsp, -sof);
2604 break;
2605
2606 case UNW_IA64_AR_BSPSTORE:
2607 /* Libunwind wants bspstore to be after the current register frame.
2608 This is what ptrace() and gdb treats as the regular bsp value. */
2609 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2610 *val = extract_unsigned_integer (buf, 8, byte_order);
2611 break;
2612
2613 default:
2614 /* For all other registers, just unwind the value directly. */
2615 regcache_cooked_read (regcache, regnum, buf);
2616 *val = extract_unsigned_integer (buf, 8, byte_order);
2617 break;
2618 }
2619
2620 if (gdbarch_debug >= 1)
2621 fprintf_unfiltered (gdb_stdlog,
2622 " access_rse_reg: from cache: %4s=%s\n",
2623 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2624 ? ia64_register_names[regnum] : "r??"),
2625 paddress (gdbarch, *val));
2626
2627 return 0;
2628 }
2629
2630 /* Libunwind callback accessor function for top-level fp registers. */
2631 static int
2632 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2633 unw_fpreg_t *val, int write, void *arg)
2634 {
2635 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2636 struct regcache *regcache = arg;
2637
2638 /* We never call any libunwind routines that need to write registers. */
2639 gdb_assert (!write);
2640
2641 regcache_cooked_read (regcache, regnum, (gdb_byte *) val);
2642
2643 return 0;
2644 }
2645
2646 /* Libunwind callback accessor function for accessing memory. */
2647 static int
2648 ia64_access_mem (unw_addr_space_t as,
2649 unw_word_t addr, unw_word_t *val,
2650 int write, void *arg)
2651 {
2652 if (addr - KERNEL_START < ktab_size)
2653 {
2654 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2655 + (addr - KERNEL_START));
2656
2657 if (write)
2658 *laddr = *val;
2659 else
2660 *val = *laddr;
2661 return 0;
2662 }
2663
2664 /* XXX do we need to normalize byte-order here? */
2665 if (write)
2666 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2667 else
2668 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2669 }
2670
2671 /* Call low-level function to access the kernel unwind table. */
2672 static LONGEST
2673 getunwind_table (gdb_byte **buf_p)
2674 {
2675 LONGEST x;
2676
2677 /* FIXME drow/2005-09-10: This code used to call
2678 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2679 for the currently running ia64-linux kernel. That data should
2680 come from the core file and be accessed via the auxv vector; if
2681 we want to preserve fall back to the running kernel's table, then
2682 we should find a way to override the corefile layer's
2683 xfer_partial method. */
2684
2685 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2686 NULL, buf_p);
2687
2688 return x;
2689 }
2690
2691 /* Get the kernel unwind table. */
2692 static int
2693 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2694 {
2695 static struct ia64_table_entry *etab;
2696
2697 if (!ktab)
2698 {
2699 gdb_byte *ktab_buf;
2700 LONGEST size;
2701
2702 size = getunwind_table (&ktab_buf);
2703 if (size <= 0)
2704 return -UNW_ENOINFO;
2705
2706 ktab = (struct ia64_table_entry *) ktab_buf;
2707 ktab_size = size;
2708
2709 for (etab = ktab; etab->start_offset; ++etab)
2710 etab->info_offset += KERNEL_START;
2711 }
2712
2713 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2714 return -UNW_ENOINFO;
2715
2716 di->format = UNW_INFO_FORMAT_TABLE;
2717 di->gp = 0;
2718 di->start_ip = ktab[0].start_offset;
2719 di->end_ip = etab[-1].end_offset;
2720 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2721 di->u.ti.segbase = 0;
2722 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2723 di->u.ti.table_data = (unw_word_t *) ktab;
2724
2725 if (gdbarch_debug >= 1)
2726 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2727 "segbase=%s, length=%s, gp=%s\n",
2728 (char *) di->u.ti.name_ptr,
2729 hex_string (di->u.ti.segbase),
2730 pulongest (di->u.ti.table_len),
2731 hex_string (di->gp));
2732 return 0;
2733 }
2734
2735 /* Find the unwind table entry for a specified address. */
2736 static int
2737 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2738 unw_dyn_info_t *dip, void **buf)
2739 {
2740 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2741 Elf_Internal_Ehdr *ehdr;
2742 unw_word_t segbase = 0;
2743 CORE_ADDR load_base;
2744 bfd *bfd;
2745 int i;
2746
2747 bfd = objfile->obfd;
2748
2749 ehdr = elf_tdata (bfd)->elf_header;
2750 phdr = elf_tdata (bfd)->phdr;
2751
2752 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2753
2754 for (i = 0; i < ehdr->e_phnum; ++i)
2755 {
2756 switch (phdr[i].p_type)
2757 {
2758 case PT_LOAD:
2759 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2760 < phdr[i].p_memsz)
2761 p_text = phdr + i;
2762 break;
2763
2764 case PT_IA_64_UNWIND:
2765 p_unwind = phdr + i;
2766 break;
2767
2768 default:
2769 break;
2770 }
2771 }
2772
2773 if (!p_text || !p_unwind)
2774 return -UNW_ENOINFO;
2775
2776 /* Verify that the segment that contains the IP also contains
2777 the static unwind table. If not, we may be in the Linux kernel's
2778 DSO gate page in which case the unwind table is another segment.
2779 Otherwise, we are dealing with runtime-generated code, for which we
2780 have no info here. */
2781 segbase = p_text->p_vaddr + load_base;
2782
2783 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2784 {
2785 int ok = 0;
2786 for (i = 0; i < ehdr->e_phnum; ++i)
2787 {
2788 if (phdr[i].p_type == PT_LOAD
2789 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2790 {
2791 ok = 1;
2792 /* Get the segbase from the section containing the
2793 libunwind table. */
2794 segbase = phdr[i].p_vaddr + load_base;
2795 }
2796 }
2797 if (!ok)
2798 return -UNW_ENOINFO;
2799 }
2800
2801 dip->start_ip = p_text->p_vaddr + load_base;
2802 dip->end_ip = dip->start_ip + p_text->p_memsz;
2803 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2804 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2805 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2806 dip->u.rti.segbase = segbase;
2807 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2808 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2809
2810 return 0;
2811 }
2812
2813 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2814 static int
2815 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2816 int need_unwind_info, void *arg)
2817 {
2818 struct obj_section *sec = find_pc_section (ip);
2819 unw_dyn_info_t di;
2820 int ret;
2821 void *buf = NULL;
2822
2823 if (!sec)
2824 {
2825 /* XXX This only works if the host and the target architecture are
2826 both ia64 and if the have (more or less) the same kernel
2827 version. */
2828 if (get_kernel_table (ip, &di) < 0)
2829 return -UNW_ENOINFO;
2830
2831 if (gdbarch_debug >= 1)
2832 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2833 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2834 "length=%s,data=%s)\n",
2835 hex_string (ip), (char *)di.u.ti.name_ptr,
2836 hex_string (di.u.ti.segbase),
2837 hex_string (di.start_ip), hex_string (di.end_ip),
2838 hex_string (di.gp),
2839 pulongest (di.u.ti.table_len),
2840 hex_string ((CORE_ADDR)di.u.ti.table_data));
2841 }
2842 else
2843 {
2844 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2845 if (ret < 0)
2846 return ret;
2847
2848 if (gdbarch_debug >= 1)
2849 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2850 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2851 "length=%s,data=%s)\n",
2852 hex_string (ip), (char *)di.u.rti.name_ptr,
2853 hex_string (di.u.rti.segbase),
2854 hex_string (di.start_ip), hex_string (di.end_ip),
2855 hex_string (di.gp),
2856 pulongest (di.u.rti.table_len),
2857 hex_string (di.u.rti.table_data));
2858 }
2859
2860 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2861 arg);
2862
2863 /* We no longer need the dyn info storage so free it. */
2864 xfree (buf);
2865
2866 return ret;
2867 }
2868
2869 /* Libunwind callback accessor function for cleanup. */
2870 static void
2871 ia64_put_unwind_info (unw_addr_space_t as,
2872 unw_proc_info_t *pip, void *arg)
2873 {
2874 /* Nothing required for now. */
2875 }
2876
2877 /* Libunwind callback accessor function to get head of the dynamic
2878 unwind-info registration list. */
2879 static int
2880 ia64_get_dyn_info_list (unw_addr_space_t as,
2881 unw_word_t *dilap, void *arg)
2882 {
2883 struct obj_section *text_sec;
2884 struct objfile *objfile;
2885 unw_word_t ip, addr;
2886 unw_dyn_info_t di;
2887 int ret;
2888
2889 if (!libunwind_is_initialized ())
2890 return -UNW_ENOINFO;
2891
2892 for (objfile = object_files; objfile; objfile = objfile->next)
2893 {
2894 void *buf = NULL;
2895
2896 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2897 ip = obj_section_addr (text_sec);
2898 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2899 if (ret >= 0)
2900 {
2901 addr = libunwind_find_dyn_list (as, &di, arg);
2902 /* We no longer need the dyn info storage so free it. */
2903 xfree (buf);
2904
2905 if (addr)
2906 {
2907 if (gdbarch_debug >= 1)
2908 fprintf_unfiltered (gdb_stdlog,
2909 "dynamic unwind table in objfile %s "
2910 "at %s (gp=%s)\n",
2911 bfd_get_filename (objfile->obfd),
2912 hex_string (addr), hex_string (di.gp));
2913 *dilap = addr;
2914 return 0;
2915 }
2916 }
2917 }
2918 return -UNW_ENOINFO;
2919 }
2920
2921
2922 /* Frame interface functions for libunwind. */
2923
2924 static void
2925 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2926 struct frame_id *this_id)
2927 {
2928 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2929 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2930 struct frame_id id = outer_frame_id;
2931 gdb_byte buf[8];
2932 CORE_ADDR bsp;
2933
2934 libunwind_frame_this_id (this_frame, this_cache, &id);
2935 if (frame_id_eq (id, outer_frame_id))
2936 {
2937 (*this_id) = outer_frame_id;
2938 return;
2939 }
2940
2941 /* We must add the bsp as the special address for frame comparison
2942 purposes. */
2943 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2944 bsp = extract_unsigned_integer (buf, 8, byte_order);
2945
2946 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2947
2948 if (gdbarch_debug >= 1)
2949 fprintf_unfiltered (gdb_stdlog,
2950 "libunwind frame id: code %s, stack %s, "
2951 "special %s, this_frame %s\n",
2952 paddress (gdbarch, id.code_addr),
2953 paddress (gdbarch, id.stack_addr),
2954 paddress (gdbarch, bsp),
2955 host_address_to_string (this_frame));
2956 }
2957
2958 static struct value *
2959 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2960 void **this_cache, int regnum)
2961 {
2962 int reg = regnum;
2963 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2964 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2965 struct value *val;
2966
2967 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2968 reg = IA64_PR_REGNUM;
2969 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2970 reg = IA64_UNAT_REGNUM;
2971
2972 /* Let libunwind do most of the work. */
2973 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2974
2975 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2976 {
2977 ULONGEST prN_val;
2978
2979 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2980 {
2981 int rrb_pr = 0;
2982 ULONGEST cfm;
2983 gdb_byte buf[MAX_REGISTER_SIZE];
2984
2985 /* Fetch predicate register rename base from current frame
2986 marker for this frame. */
2987 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2988 cfm = extract_unsigned_integer (buf, 8, byte_order);
2989 rrb_pr = (cfm >> 32) & 0x3f;
2990
2991 /* Adjust the register number to account for register rotation. */
2992 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2993 }
2994 prN_val = extract_bit_field (value_contents_all (val),
2995 regnum - VP0_REGNUM, 1);
2996 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2997 }
2998
2999 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
3000 {
3001 ULONGEST unatN_val;
3002
3003 unatN_val = extract_bit_field (value_contents_all (val),
3004 regnum - IA64_NAT0_REGNUM, 1);
3005 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
3006 }
3007
3008 else if (regnum == IA64_BSP_REGNUM)
3009 {
3010 struct value *cfm_val;
3011 CORE_ADDR prev_bsp, prev_cfm;
3012
3013 /* We want to calculate the previous bsp as the end of the previous
3014 register stack frame. This corresponds to what the hardware bsp
3015 register will be if we pop the frame back which is why we might
3016 have been called. We know that libunwind will pass us back the
3017 beginning of the current frame so we should just add sof to it. */
3018 prev_bsp = extract_unsigned_integer (value_contents_all (val),
3019 8, byte_order);
3020 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
3021 IA64_CFM_REGNUM);
3022 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
3023 8, byte_order);
3024 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
3025
3026 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3027 }
3028 else
3029 return val;
3030 }
3031
3032 static int
3033 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3034 struct frame_info *this_frame,
3035 void **this_cache)
3036 {
3037 if (libunwind_is_initialized ()
3038 && libunwind_frame_sniffer (self, this_frame, this_cache))
3039 return 1;
3040
3041 return 0;
3042 }
3043
3044 static const struct frame_unwind ia64_libunwind_frame_unwind =
3045 {
3046 NORMAL_FRAME,
3047 default_frame_unwind_stop_reason,
3048 ia64_libunwind_frame_this_id,
3049 ia64_libunwind_frame_prev_register,
3050 NULL,
3051 ia64_libunwind_frame_sniffer,
3052 libunwind_frame_dealloc_cache
3053 };
3054
3055 static void
3056 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3057 void **this_cache,
3058 struct frame_id *this_id)
3059 {
3060 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3061 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3062 gdb_byte buf[8];
3063 CORE_ADDR bsp;
3064 struct frame_id id = outer_frame_id;
3065 CORE_ADDR prev_ip;
3066
3067 libunwind_frame_this_id (this_frame, this_cache, &id);
3068 if (frame_id_eq (id, outer_frame_id))
3069 {
3070 (*this_id) = outer_frame_id;
3071 return;
3072 }
3073
3074 /* We must add the bsp as the special address for frame comparison
3075 purposes. */
3076 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3077 bsp = extract_unsigned_integer (buf, 8, byte_order);
3078
3079 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3080 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3081
3082 if (gdbarch_debug >= 1)
3083 fprintf_unfiltered (gdb_stdlog,
3084 "libunwind sigtramp frame id: code %s, "
3085 "stack %s, special %s, this_frame %s\n",
3086 paddress (gdbarch, id.code_addr),
3087 paddress (gdbarch, id.stack_addr),
3088 paddress (gdbarch, bsp),
3089 host_address_to_string (this_frame));
3090 }
3091
3092 static struct value *
3093 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3094 void **this_cache, int regnum)
3095 {
3096 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3097 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3098 struct value *prev_ip_val;
3099 CORE_ADDR prev_ip;
3100
3101 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3102 method of getting previous registers. */
3103 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3104 IA64_IP_REGNUM);
3105 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3106 8, byte_order);
3107
3108 if (prev_ip == 0)
3109 {
3110 void *tmp_cache = NULL;
3111 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3112 regnum);
3113 }
3114 else
3115 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3116 }
3117
3118 static int
3119 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3120 struct frame_info *this_frame,
3121 void **this_cache)
3122 {
3123 if (libunwind_is_initialized ())
3124 {
3125 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3126 return 1;
3127 return 0;
3128 }
3129 else
3130 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3131 }
3132
3133 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3134 {
3135 SIGTRAMP_FRAME,
3136 default_frame_unwind_stop_reason,
3137 ia64_libunwind_sigtramp_frame_this_id,
3138 ia64_libunwind_sigtramp_frame_prev_register,
3139 NULL,
3140 ia64_libunwind_sigtramp_frame_sniffer
3141 };
3142
3143 /* Set of libunwind callback acccessor functions. */
3144 unw_accessors_t ia64_unw_accessors =
3145 {
3146 ia64_find_proc_info_x,
3147 ia64_put_unwind_info,
3148 ia64_get_dyn_info_list,
3149 ia64_access_mem,
3150 ia64_access_reg,
3151 ia64_access_fpreg,
3152 /* resume */
3153 /* get_proc_name */
3154 };
3155
3156 /* Set of special libunwind callback acccessor functions specific for accessing
3157 the rse registers. At the top of the stack, we want libunwind to figure out
3158 how to read r32 - r127. Though usually they are found sequentially in
3159 memory starting from $bof, this is not always true. */
3160 unw_accessors_t ia64_unw_rse_accessors =
3161 {
3162 ia64_find_proc_info_x,
3163 ia64_put_unwind_info,
3164 ia64_get_dyn_info_list,
3165 ia64_access_mem,
3166 ia64_access_rse_reg,
3167 ia64_access_rse_fpreg,
3168 /* resume */
3169 /* get_proc_name */
3170 };
3171
3172 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3173 ia64-libunwind-tdep code to use. */
3174 struct libunwind_descr ia64_libunwind_descr =
3175 {
3176 ia64_gdb2uw_regnum,
3177 ia64_uw2gdb_regnum,
3178 ia64_is_fpreg,
3179 &ia64_unw_accessors,
3180 &ia64_unw_rse_accessors,
3181 };
3182
3183 #endif /* HAVE_LIBUNWIND_IA64_H */
3184
3185 static int
3186 ia64_use_struct_convention (struct type *type)
3187 {
3188 struct type *float_elt_type;
3189
3190 /* Don't use the struct convention for anything but structure,
3191 union, or array types. */
3192 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3193 || TYPE_CODE (type) == TYPE_CODE_UNION
3194 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3195 return 0;
3196
3197 /* HFAs are structures (or arrays) consisting entirely of floating
3198 point values of the same length. Up to 8 of these are returned
3199 in registers. Don't use the struct convention when this is the
3200 case. */
3201 float_elt_type = is_float_or_hfa_type (type);
3202 if (float_elt_type != NULL
3203 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3204 return 0;
3205
3206 /* Other structs of length 32 or less are returned in r8-r11.
3207 Don't use the struct convention for those either. */
3208 return TYPE_LENGTH (type) > 32;
3209 }
3210
3211 /* Return non-zero if TYPE is a structure or union type. */
3212
3213 static int
3214 ia64_struct_type_p (const struct type *type)
3215 {
3216 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3217 || TYPE_CODE (type) == TYPE_CODE_UNION);
3218 }
3219
3220 static void
3221 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3222 gdb_byte *valbuf)
3223 {
3224 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3225 struct type *float_elt_type;
3226
3227 float_elt_type = is_float_or_hfa_type (type);
3228 if (float_elt_type != NULL)
3229 {
3230 gdb_byte from[MAX_REGISTER_SIZE];
3231 int offset = 0;
3232 int regnum = IA64_FR8_REGNUM;
3233 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3234
3235 while (n-- > 0)
3236 {
3237 regcache_cooked_read (regcache, regnum, from);
3238 convert_typed_floating (from, ia64_ext_type (gdbarch),
3239 (char *)valbuf + offset, float_elt_type);
3240 offset += TYPE_LENGTH (float_elt_type);
3241 regnum++;
3242 }
3243 }
3244 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3245 {
3246 /* This is an integral value, and its size is less than 8 bytes.
3247 These values are LSB-aligned, so extract the relevant bytes,
3248 and copy them into VALBUF. */
3249 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3250 so I suppose we should also add handling here for integral values
3251 whose size is greater than 8. But I wasn't able to create such
3252 a type, neither in C nor in Ada, so not worrying about these yet. */
3253 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3254 ULONGEST val;
3255
3256 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3257 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3258 }
3259 else
3260 {
3261 ULONGEST val;
3262 int offset = 0;
3263 int regnum = IA64_GR8_REGNUM;
3264 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3265 int n = TYPE_LENGTH (type) / reglen;
3266 int m = TYPE_LENGTH (type) % reglen;
3267
3268 while (n-- > 0)
3269 {
3270 ULONGEST val;
3271 regcache_cooked_read_unsigned (regcache, regnum, &val);
3272 memcpy ((char *)valbuf + offset, &val, reglen);
3273 offset += reglen;
3274 regnum++;
3275 }
3276
3277 if (m)
3278 {
3279 regcache_cooked_read_unsigned (regcache, regnum, &val);
3280 memcpy ((char *)valbuf + offset, &val, m);
3281 }
3282 }
3283 }
3284
3285 static void
3286 ia64_store_return_value (struct type *type, struct regcache *regcache,
3287 const gdb_byte *valbuf)
3288 {
3289 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3290 struct type *float_elt_type;
3291
3292 float_elt_type = is_float_or_hfa_type (type);
3293 if (float_elt_type != NULL)
3294 {
3295 gdb_byte to[MAX_REGISTER_SIZE];
3296 int offset = 0;
3297 int regnum = IA64_FR8_REGNUM;
3298 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3299
3300 while (n-- > 0)
3301 {
3302 convert_typed_floating ((char *)valbuf + offset, float_elt_type,
3303 to, ia64_ext_type (gdbarch));
3304 regcache_cooked_write (regcache, regnum, to);
3305 offset += TYPE_LENGTH (float_elt_type);
3306 regnum++;
3307 }
3308 }
3309 else
3310 {
3311 ULONGEST val;
3312 int offset = 0;
3313 int regnum = IA64_GR8_REGNUM;
3314 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3315 int n = TYPE_LENGTH (type) / reglen;
3316 int m = TYPE_LENGTH (type) % reglen;
3317
3318 while (n-- > 0)
3319 {
3320 ULONGEST val;
3321 memcpy (&val, (char *)valbuf + offset, reglen);
3322 regcache_cooked_write_unsigned (regcache, regnum, val);
3323 offset += reglen;
3324 regnum++;
3325 }
3326
3327 if (m)
3328 {
3329 memcpy (&val, (char *)valbuf + offset, m);
3330 regcache_cooked_write_unsigned (regcache, regnum, val);
3331 }
3332 }
3333 }
3334
3335 static enum return_value_convention
3336 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3337 struct type *valtype, struct regcache *regcache,
3338 gdb_byte *readbuf, const gdb_byte *writebuf)
3339 {
3340 int struct_return = ia64_use_struct_convention (valtype);
3341
3342 if (writebuf != NULL)
3343 {
3344 gdb_assert (!struct_return);
3345 ia64_store_return_value (valtype, regcache, writebuf);
3346 }
3347
3348 if (readbuf != NULL)
3349 {
3350 gdb_assert (!struct_return);
3351 ia64_extract_return_value (valtype, regcache, readbuf);
3352 }
3353
3354 if (struct_return)
3355 return RETURN_VALUE_STRUCT_CONVENTION;
3356 else
3357 return RETURN_VALUE_REGISTER_CONVENTION;
3358 }
3359
3360 static int
3361 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3362 {
3363 switch (TYPE_CODE (t))
3364 {
3365 case TYPE_CODE_FLT:
3366 if (*etp)
3367 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3368 else
3369 {
3370 *etp = t;
3371 return 1;
3372 }
3373 break;
3374 case TYPE_CODE_ARRAY:
3375 return
3376 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3377 etp);
3378 break;
3379 case TYPE_CODE_STRUCT:
3380 {
3381 int i;
3382
3383 for (i = 0; i < TYPE_NFIELDS (t); i++)
3384 if (!is_float_or_hfa_type_recurse
3385 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3386 return 0;
3387 return 1;
3388 }
3389 break;
3390 default:
3391 return 0;
3392 break;
3393 }
3394 }
3395
3396 /* Determine if the given type is one of the floating point types or
3397 and HFA (which is a struct, array, or combination thereof whose
3398 bottom-most elements are all of the same floating point type). */
3399
3400 static struct type *
3401 is_float_or_hfa_type (struct type *t)
3402 {
3403 struct type *et = 0;
3404
3405 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3406 }
3407
3408
3409 /* Return 1 if the alignment of T is such that the next even slot
3410 should be used. Return 0, if the next available slot should
3411 be used. (See section 8.5.1 of the IA-64 Software Conventions
3412 and Runtime manual). */
3413
3414 static int
3415 slot_alignment_is_next_even (struct type *t)
3416 {
3417 switch (TYPE_CODE (t))
3418 {
3419 case TYPE_CODE_INT:
3420 case TYPE_CODE_FLT:
3421 if (TYPE_LENGTH (t) > 8)
3422 return 1;
3423 else
3424 return 0;
3425 case TYPE_CODE_ARRAY:
3426 return
3427 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3428 case TYPE_CODE_STRUCT:
3429 {
3430 int i;
3431
3432 for (i = 0; i < TYPE_NFIELDS (t); i++)
3433 if (slot_alignment_is_next_even
3434 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3435 return 1;
3436 return 0;
3437 }
3438 default:
3439 return 0;
3440 }
3441 }
3442
3443 /* Attempt to find (and return) the global pointer for the given
3444 function.
3445
3446 This is a rather nasty bit of code searchs for the .dynamic section
3447 in the objfile corresponding to the pc of the function we're trying
3448 to call. Once it finds the addresses at which the .dynamic section
3449 lives in the child process, it scans the Elf64_Dyn entries for a
3450 DT_PLTGOT tag. If it finds one of these, the corresponding
3451 d_un.d_ptr value is the global pointer. */
3452
3453 static CORE_ADDR
3454 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3455 CORE_ADDR faddr)
3456 {
3457 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3458 struct obj_section *faddr_sect;
3459
3460 faddr_sect = find_pc_section (faddr);
3461 if (faddr_sect != NULL)
3462 {
3463 struct obj_section *osect;
3464
3465 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3466 {
3467 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3468 break;
3469 }
3470
3471 if (osect < faddr_sect->objfile->sections_end)
3472 {
3473 CORE_ADDR addr, endaddr;
3474
3475 addr = obj_section_addr (osect);
3476 endaddr = obj_section_endaddr (osect);
3477
3478 while (addr < endaddr)
3479 {
3480 int status;
3481 LONGEST tag;
3482 gdb_byte buf[8];
3483
3484 status = target_read_memory (addr, buf, sizeof (buf));
3485 if (status != 0)
3486 break;
3487 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3488
3489 if (tag == DT_PLTGOT)
3490 {
3491 CORE_ADDR global_pointer;
3492
3493 status = target_read_memory (addr + 8, buf, sizeof (buf));
3494 if (status != 0)
3495 break;
3496 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3497 byte_order);
3498
3499 /* The payoff... */
3500 return global_pointer;
3501 }
3502
3503 if (tag == DT_NULL)
3504 break;
3505
3506 addr += 16;
3507 }
3508 }
3509 }
3510 return 0;
3511 }
3512
3513 /* Attempt to find (and return) the global pointer for the given
3514 function. We first try the find_global_pointer_from_solib routine
3515 from the gdbarch tdep vector, if provided. And if that does not
3516 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3517
3518 static CORE_ADDR
3519 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3520 {
3521 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3522 CORE_ADDR addr = 0;
3523
3524 if (tdep->find_global_pointer_from_solib)
3525 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3526 if (addr == 0)
3527 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3528 return addr;
3529 }
3530
3531 /* Given a function's address, attempt to find (and return) the
3532 corresponding (canonical) function descriptor. Return 0 if
3533 not found. */
3534 static CORE_ADDR
3535 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3536 {
3537 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3538 struct obj_section *faddr_sect;
3539
3540 /* Return early if faddr is already a function descriptor. */
3541 faddr_sect = find_pc_section (faddr);
3542 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3543 return faddr;
3544
3545 if (faddr_sect != NULL)
3546 {
3547 struct obj_section *osect;
3548 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3549 {
3550 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3551 break;
3552 }
3553
3554 if (osect < faddr_sect->objfile->sections_end)
3555 {
3556 CORE_ADDR addr, endaddr;
3557
3558 addr = obj_section_addr (osect);
3559 endaddr = obj_section_endaddr (osect);
3560
3561 while (addr < endaddr)
3562 {
3563 int status;
3564 LONGEST faddr2;
3565 gdb_byte buf[8];
3566
3567 status = target_read_memory (addr, buf, sizeof (buf));
3568 if (status != 0)
3569 break;
3570 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3571
3572 if (faddr == faddr2)
3573 return addr;
3574
3575 addr += 16;
3576 }
3577 }
3578 }
3579 return 0;
3580 }
3581
3582 /* Attempt to find a function descriptor corresponding to the
3583 given address. If none is found, construct one on the
3584 stack using the address at fdaptr. */
3585
3586 static CORE_ADDR
3587 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3588 {
3589 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3590 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3591 CORE_ADDR fdesc;
3592
3593 fdesc = find_extant_func_descr (gdbarch, faddr);
3594
3595 if (fdesc == 0)
3596 {
3597 ULONGEST global_pointer;
3598 gdb_byte buf[16];
3599
3600 fdesc = *fdaptr;
3601 *fdaptr += 16;
3602
3603 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3604
3605 if (global_pointer == 0)
3606 regcache_cooked_read_unsigned (regcache,
3607 IA64_GR1_REGNUM, &global_pointer);
3608
3609 store_unsigned_integer (buf, 8, byte_order, faddr);
3610 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3611
3612 write_memory (fdesc, buf, 16);
3613 }
3614
3615 return fdesc;
3616 }
3617
3618 /* Use the following routine when printing out function pointers
3619 so the user can see the function address rather than just the
3620 function descriptor. */
3621 static CORE_ADDR
3622 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3623 struct target_ops *targ)
3624 {
3625 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3626 struct obj_section *s;
3627 gdb_byte buf[8];
3628
3629 s = find_pc_section (addr);
3630
3631 /* check if ADDR points to a function descriptor. */
3632 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3633 return read_memory_unsigned_integer (addr, 8, byte_order);
3634
3635 /* Normally, functions live inside a section that is executable.
3636 So, if ADDR points to a non-executable section, then treat it
3637 as a function descriptor and return the target address iff
3638 the target address itself points to a section that is executable.
3639 Check first the memory of the whole length of 8 bytes is readable. */
3640 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3641 && target_read_memory (addr, buf, 8) == 0)
3642 {
3643 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3644 struct obj_section *pc_section = find_pc_section (pc);
3645
3646 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3647 return pc;
3648 }
3649
3650 /* There are also descriptors embedded in vtables. */
3651 if (s)
3652 {
3653 struct bound_minimal_symbol minsym;
3654
3655 minsym = lookup_minimal_symbol_by_pc (addr);
3656
3657 if (minsym.minsym
3658 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3659 return read_memory_unsigned_integer (addr, 8, byte_order);
3660 }
3661
3662 return addr;
3663 }
3664
3665 static CORE_ADDR
3666 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3667 {
3668 return sp & ~0xfLL;
3669 }
3670
3671 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3672
3673 static void
3674 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3675 {
3676 ULONGEST cfm, pfs, new_bsp;
3677
3678 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3679
3680 new_bsp = rse_address_add (bsp, sof);
3681 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3682
3683 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3684 pfs &= 0xc000000000000000LL;
3685 pfs |= (cfm & 0xffffffffffffLL);
3686 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3687
3688 cfm &= 0xc000000000000000LL;
3689 cfm |= sof;
3690 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3691 }
3692
3693 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3694 ia64. */
3695
3696 static void
3697 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3698 int slotnum, gdb_byte *buf)
3699 {
3700 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3701 }
3702
3703 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3704
3705 static void
3706 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3707 {
3708 /* Nothing needed. */
3709 }
3710
3711 static CORE_ADDR
3712 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3713 struct regcache *regcache, CORE_ADDR bp_addr,
3714 int nargs, struct value **args, CORE_ADDR sp,
3715 int struct_return, CORE_ADDR struct_addr)
3716 {
3717 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3718 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3719 int argno;
3720 struct value *arg;
3721 struct type *type;
3722 int len, argoffset;
3723 int nslots, rseslots, memslots, slotnum, nfuncargs;
3724 int floatreg;
3725 ULONGEST bsp;
3726 CORE_ADDR funcdescaddr, pc, global_pointer;
3727 CORE_ADDR func_addr = find_function_addr (function, NULL);
3728
3729 nslots = 0;
3730 nfuncargs = 0;
3731 /* Count the number of slots needed for the arguments. */
3732 for (argno = 0; argno < nargs; argno++)
3733 {
3734 arg = args[argno];
3735 type = check_typedef (value_type (arg));
3736 len = TYPE_LENGTH (type);
3737
3738 if ((nslots & 1) && slot_alignment_is_next_even (type))
3739 nslots++;
3740
3741 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3742 nfuncargs++;
3743
3744 nslots += (len + 7) / 8;
3745 }
3746
3747 /* Divvy up the slots between the RSE and the memory stack. */
3748 rseslots = (nslots > 8) ? 8 : nslots;
3749 memslots = nslots - rseslots;
3750
3751 /* Allocate a new RSE frame. */
3752 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3753 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3754
3755 /* We will attempt to find function descriptors in the .opd segment,
3756 but if we can't we'll construct them ourselves. That being the
3757 case, we'll need to reserve space on the stack for them. */
3758 funcdescaddr = sp - nfuncargs * 16;
3759 funcdescaddr &= ~0xfLL;
3760
3761 /* Adjust the stack pointer to it's new value. The calling conventions
3762 require us to have 16 bytes of scratch, plus whatever space is
3763 necessary for the memory slots and our function descriptors. */
3764 sp = sp - 16 - (memslots + nfuncargs) * 8;
3765 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3766
3767 /* Place the arguments where they belong. The arguments will be
3768 either placed in the RSE backing store or on the memory stack.
3769 In addition, floating point arguments or HFAs are placed in
3770 floating point registers. */
3771 slotnum = 0;
3772 floatreg = IA64_FR8_REGNUM;
3773 for (argno = 0; argno < nargs; argno++)
3774 {
3775 struct type *float_elt_type;
3776
3777 arg = args[argno];
3778 type = check_typedef (value_type (arg));
3779 len = TYPE_LENGTH (type);
3780
3781 /* Special handling for function parameters. */
3782 if (len == 8
3783 && TYPE_CODE (type) == TYPE_CODE_PTR
3784 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3785 {
3786 gdb_byte val_buf[8];
3787 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3788 8, byte_order);
3789 store_unsigned_integer (val_buf, 8, byte_order,
3790 find_func_descr (regcache, faddr,
3791 &funcdescaddr));
3792 if (slotnum < rseslots)
3793 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3794 slotnum, val_buf);
3795 else
3796 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3797 slotnum++;
3798 continue;
3799 }
3800
3801 /* Normal slots. */
3802
3803 /* Skip odd slot if necessary... */
3804 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3805 slotnum++;
3806
3807 argoffset = 0;
3808 while (len > 0)
3809 {
3810 gdb_byte val_buf[8];
3811
3812 memset (val_buf, 0, 8);
3813 if (!ia64_struct_type_p (type) && len < 8)
3814 {
3815 /* Integral types are LSB-aligned, so we have to be careful
3816 to insert the argument on the correct side of the buffer.
3817 This is why we use store_unsigned_integer. */
3818 store_unsigned_integer
3819 (val_buf, 8, byte_order,
3820 extract_unsigned_integer (value_contents (arg), len,
3821 byte_order));
3822 }
3823 else
3824 {
3825 /* This is either an 8bit integral type, or an aggregate.
3826 For 8bit integral type, there is no problem, we just
3827 copy the value over.
3828
3829 For aggregates, the only potentially tricky portion
3830 is to write the last one if it is less than 8 bytes.
3831 In this case, the data is Byte0-aligned. Happy news,
3832 this means that we don't need to differentiate the
3833 handling of 8byte blocks and less-than-8bytes blocks. */
3834 memcpy (val_buf, value_contents (arg) + argoffset,
3835 (len > 8) ? 8 : len);
3836 }
3837
3838 if (slotnum < rseslots)
3839 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3840 slotnum, val_buf);
3841 else
3842 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3843
3844 argoffset += 8;
3845 len -= 8;
3846 slotnum++;
3847 }
3848
3849 /* Handle floating point types (including HFAs). */
3850 float_elt_type = is_float_or_hfa_type (type);
3851 if (float_elt_type != NULL)
3852 {
3853 argoffset = 0;
3854 len = TYPE_LENGTH (type);
3855 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3856 {
3857 char to[MAX_REGISTER_SIZE];
3858 convert_typed_floating (value_contents (arg) + argoffset,
3859 float_elt_type, to,
3860 ia64_ext_type (gdbarch));
3861 regcache_cooked_write (regcache, floatreg, (void *)to);
3862 floatreg++;
3863 argoffset += TYPE_LENGTH (float_elt_type);
3864 len -= TYPE_LENGTH (float_elt_type);
3865 }
3866 }
3867 }
3868
3869 /* Store the struct return value in r8 if necessary. */
3870 if (struct_return)
3871 {
3872 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3873 (ULONGEST) struct_addr);
3874 }
3875
3876 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3877
3878 if (global_pointer != 0)
3879 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3880
3881 /* The following is not necessary on HP-UX, because we're using
3882 a dummy code sequence pushed on the stack to make the call, and
3883 this sequence doesn't need b0 to be set in order for our dummy
3884 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3885 it's needed for other OSes, so we do this unconditionaly. */
3886 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3887
3888 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3889
3890 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3891
3892 return sp;
3893 }
3894
3895 static const struct ia64_infcall_ops ia64_infcall_ops =
3896 {
3897 ia64_allocate_new_rse_frame,
3898 ia64_store_argument_in_slot,
3899 ia64_set_function_addr
3900 };
3901
3902 static struct frame_id
3903 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3904 {
3905 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3906 gdb_byte buf[8];
3907 CORE_ADDR sp, bsp;
3908
3909 get_frame_register (this_frame, sp_regnum, buf);
3910 sp = extract_unsigned_integer (buf, 8, byte_order);
3911
3912 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3913 bsp = extract_unsigned_integer (buf, 8, byte_order);
3914
3915 if (gdbarch_debug >= 1)
3916 fprintf_unfiltered (gdb_stdlog,
3917 "dummy frame id: code %s, stack %s, special %s\n",
3918 paddress (gdbarch, get_frame_pc (this_frame)),
3919 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3920
3921 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3922 }
3923
3924 static CORE_ADDR
3925 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3926 {
3927 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3928 gdb_byte buf[8];
3929 CORE_ADDR ip, psr, pc;
3930
3931 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3932 ip = extract_unsigned_integer (buf, 8, byte_order);
3933 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3934 psr = extract_unsigned_integer (buf, 8, byte_order);
3935
3936 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3937 return pc;
3938 }
3939
3940 static int
3941 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3942 {
3943 info->bytes_per_line = SLOT_MULTIPLIER;
3944 return print_insn_ia64 (memaddr, info);
3945 }
3946
3947 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3948
3949 static int
3950 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3951 {
3952 return (cfm & 0x7f);
3953 }
3954
3955 static struct gdbarch *
3956 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3957 {
3958 struct gdbarch *gdbarch;
3959 struct gdbarch_tdep *tdep;
3960
3961 /* If there is already a candidate, use it. */
3962 arches = gdbarch_list_lookup_by_info (arches, &info);
3963 if (arches != NULL)
3964 return arches->gdbarch;
3965
3966 tdep = xzalloc (sizeof (struct gdbarch_tdep));
3967 gdbarch = gdbarch_alloc (&info, tdep);
3968
3969 tdep->size_of_register_frame = ia64_size_of_register_frame;
3970
3971 /* According to the ia64 specs, instructions that store long double
3972 floats in memory use a long-double format different than that
3973 used in the floating registers. The memory format matches the
3974 x86 extended float format which is 80 bits. An OS may choose to
3975 use this format (e.g. GNU/Linux) or choose to use a different
3976 format for storing long doubles (e.g. HPUX). In the latter case,
3977 the setting of the format may be moved/overridden in an
3978 OS-specific tdep file. */
3979 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3980
3981 set_gdbarch_short_bit (gdbarch, 16);
3982 set_gdbarch_int_bit (gdbarch, 32);
3983 set_gdbarch_long_bit (gdbarch, 64);
3984 set_gdbarch_long_long_bit (gdbarch, 64);
3985 set_gdbarch_float_bit (gdbarch, 32);
3986 set_gdbarch_double_bit (gdbarch, 64);
3987 set_gdbarch_long_double_bit (gdbarch, 128);
3988 set_gdbarch_ptr_bit (gdbarch, 64);
3989
3990 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3991 set_gdbarch_num_pseudo_regs (gdbarch,
3992 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3993 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3994 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3995
3996 set_gdbarch_register_name (gdbarch, ia64_register_name);
3997 set_gdbarch_register_type (gdbarch, ia64_register_type);
3998
3999 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
4000 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
4001 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
4002 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
4003 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
4004 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
4005 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
4006
4007 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
4008
4009 set_gdbarch_return_value (gdbarch, ia64_return_value);
4010
4011 set_gdbarch_memory_insert_breakpoint (gdbarch,
4012 ia64_memory_insert_breakpoint);
4013 set_gdbarch_memory_remove_breakpoint (gdbarch,
4014 ia64_memory_remove_breakpoint);
4015 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
4016 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
4017 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
4018
4019 /* Settings for calling functions in the inferior. */
4020 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
4021 tdep->infcall_ops = ia64_infcall_ops;
4022 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
4023 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
4024
4025 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4026 #ifdef HAVE_LIBUNWIND_IA64_H
4027 frame_unwind_append_unwinder (gdbarch,
4028 &ia64_libunwind_sigtramp_frame_unwind);
4029 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4030 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4031 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4032 #else
4033 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4034 #endif
4035 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4036 frame_base_set_default (gdbarch, &ia64_frame_base);
4037
4038 /* Settings that should be unnecessary. */
4039 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4040
4041 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4042 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4043 ia64_convert_from_func_ptr_addr);
4044
4045 /* The virtual table contains 16-byte descriptors, not pointers to
4046 descriptors. */
4047 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4048
4049 /* Hook in ABI-specific overrides, if they have been registered. */
4050 gdbarch_init_osabi (info, gdbarch);
4051
4052 return gdbarch;
4053 }
4054
4055 extern initialize_file_ftype _initialize_ia64_tdep; /* -Wmissing-prototypes */
4056
4057 void
4058 _initialize_ia64_tdep (void)
4059 {
4060 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4061 }
This page took 0.116523 seconds and 5 git commands to generate.