Update copyright year range in all GDB files.
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72 static gdb::optional<gdb::byte_vector> ktab_buf;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 typedef enum instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 } instruction_type;
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 /* Big enough to hold a FP register in bytes. */
130 #define IA64_FP_REGISTER_SIZE 16
131
132 static int sp_regnum = IA64_GR12_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static const char *ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 tdep->ia64_ext_type
317 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
318 floatformats_ia64_ext);
319
320 return tdep->ia64_ext_type;
321 }
322
323 static int
324 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
325 struct reggroup *group)
326 {
327 int vector_p;
328 int float_p;
329 int raw_p;
330 if (group == all_reggroup)
331 return 1;
332 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
333 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
334 raw_p = regnum < NUM_IA64_RAW_REGS;
335 if (group == float_reggroup)
336 return float_p;
337 if (group == vector_reggroup)
338 return vector_p;
339 if (group == general_reggroup)
340 return (!vector_p && !float_p);
341 if (group == save_reggroup || group == restore_reggroup)
342 return raw_p;
343 return 0;
344 }
345
346 static const char *
347 ia64_register_name (struct gdbarch *gdbarch, int reg)
348 {
349 return ia64_register_names[reg];
350 }
351
352 struct type *
353 ia64_register_type (struct gdbarch *arch, int reg)
354 {
355 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
356 return ia64_ext_type (arch);
357 else
358 return builtin_type (arch)->builtin_long;
359 }
360
361 static int
362 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
363 {
364 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
365 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
366 return reg;
367 }
368
369
370 /* Extract ``len'' bits from an instruction bundle starting at
371 bit ``from''. */
372
373 static long long
374 extract_bit_field (const gdb_byte *bundle, int from, int len)
375 {
376 long long result = 0LL;
377 int to = from + len;
378 int from_byte = from / 8;
379 int to_byte = to / 8;
380 unsigned char *b = (unsigned char *) bundle;
381 unsigned char c;
382 int lshift;
383 int i;
384
385 c = b[from_byte];
386 if (from_byte == to_byte)
387 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
388 result = c >> (from % 8);
389 lshift = 8 - (from % 8);
390
391 for (i = from_byte+1; i < to_byte; i++)
392 {
393 result |= ((long long) b[i]) << lshift;
394 lshift += 8;
395 }
396
397 if (from_byte < to_byte && (to % 8 != 0))
398 {
399 c = b[to_byte];
400 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
401 result |= ((long long) c) << lshift;
402 }
403
404 return result;
405 }
406
407 /* Replace the specified bits in an instruction bundle. */
408
409 static void
410 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
411 {
412 int to = from + len;
413 int from_byte = from / 8;
414 int to_byte = to / 8;
415 unsigned char *b = (unsigned char *) bundle;
416 unsigned char c;
417
418 if (from_byte == to_byte)
419 {
420 unsigned char left, right;
421 c = b[from_byte];
422 left = (c >> (to % 8)) << (to % 8);
423 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
424 c = (unsigned char) (val & 0xff);
425 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
426 c |= right | left;
427 b[from_byte] = c;
428 }
429 else
430 {
431 int i;
432 c = b[from_byte];
433 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
434 c = c | (val << (from % 8));
435 b[from_byte] = c;
436 val >>= 8 - from % 8;
437
438 for (i = from_byte+1; i < to_byte; i++)
439 {
440 c = val & 0xff;
441 val >>= 8;
442 b[i] = c;
443 }
444
445 if (to % 8 != 0)
446 {
447 unsigned char cv = (unsigned char) val;
448 c = b[to_byte];
449 c = c >> (to % 8) << (to % 8);
450 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
451 b[to_byte] = c;
452 }
453 }
454 }
455
456 /* Return the contents of slot N (for N = 0, 1, or 2) in
457 and instruction bundle. */
458
459 static long long
460 slotN_contents (gdb_byte *bundle, int slotnum)
461 {
462 return extract_bit_field (bundle, 5+41*slotnum, 41);
463 }
464
465 /* Store an instruction in an instruction bundle. */
466
467 static void
468 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
469 {
470 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
471 }
472
473 static const enum instruction_type template_encoding_table[32][3] =
474 {
475 { M, I, I }, /* 00 */
476 { M, I, I }, /* 01 */
477 { M, I, I }, /* 02 */
478 { M, I, I }, /* 03 */
479 { M, L, X }, /* 04 */
480 { M, L, X }, /* 05 */
481 { undefined, undefined, undefined }, /* 06 */
482 { undefined, undefined, undefined }, /* 07 */
483 { M, M, I }, /* 08 */
484 { M, M, I }, /* 09 */
485 { M, M, I }, /* 0A */
486 { M, M, I }, /* 0B */
487 { M, F, I }, /* 0C */
488 { M, F, I }, /* 0D */
489 { M, M, F }, /* 0E */
490 { M, M, F }, /* 0F */
491 { M, I, B }, /* 10 */
492 { M, I, B }, /* 11 */
493 { M, B, B }, /* 12 */
494 { M, B, B }, /* 13 */
495 { undefined, undefined, undefined }, /* 14 */
496 { undefined, undefined, undefined }, /* 15 */
497 { B, B, B }, /* 16 */
498 { B, B, B }, /* 17 */
499 { M, M, B }, /* 18 */
500 { M, M, B }, /* 19 */
501 { undefined, undefined, undefined }, /* 1A */
502 { undefined, undefined, undefined }, /* 1B */
503 { M, F, B }, /* 1C */
504 { M, F, B }, /* 1D */
505 { undefined, undefined, undefined }, /* 1E */
506 { undefined, undefined, undefined }, /* 1F */
507 };
508
509 /* Fetch and (partially) decode an instruction at ADDR and return the
510 address of the next instruction to fetch. */
511
512 static CORE_ADDR
513 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
514 {
515 gdb_byte bundle[BUNDLE_LEN];
516 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
517 long long templ;
518 int val;
519
520 /* Warn about slot numbers greater than 2. We used to generate
521 an error here on the assumption that the user entered an invalid
522 address. But, sometimes GDB itself requests an invalid address.
523 This can (easily) happen when execution stops in a function for
524 which there are no symbols. The prologue scanner will attempt to
525 find the beginning of the function - if the nearest symbol
526 happens to not be aligned on a bundle boundary (16 bytes), the
527 resulting starting address will cause GDB to think that the slot
528 number is too large.
529
530 So we warn about it and set the slot number to zero. It is
531 not necessarily a fatal condition, particularly if debugging
532 at the assembly language level. */
533 if (slotnum > 2)
534 {
535 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
536 "Using slot 0 instead"));
537 slotnum = 0;
538 }
539
540 addr &= ~0x0f;
541
542 val = target_read_memory (addr, bundle, BUNDLE_LEN);
543
544 if (val != 0)
545 return 0;
546
547 *instr = slotN_contents (bundle, slotnum);
548 templ = extract_bit_field (bundle, 0, 5);
549 *it = template_encoding_table[(int)templ][slotnum];
550
551 if (slotnum == 2 || (slotnum == 1 && *it == L))
552 addr += 16;
553 else
554 addr += (slotnum + 1) * SLOT_MULTIPLIER;
555
556 return addr;
557 }
558
559 /* There are 5 different break instructions (break.i, break.b,
560 break.m, break.f, and break.x), but they all have the same
561 encoding. (The five bit template in the low five bits of the
562 instruction bundle distinguishes one from another.)
563
564 The runtime architecture manual specifies that break instructions
565 used for debugging purposes must have the upper two bits of the 21
566 bit immediate set to a 0 and a 1 respectively. A breakpoint
567 instruction encodes the most significant bit of its 21 bit
568 immediate at bit 36 of the 41 bit instruction. The penultimate msb
569 is at bit 25 which leads to the pattern below.
570
571 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
572 it turns out that 0x80000 was used as the syscall break in the early
573 simulators. So I changed the pattern slightly to do "break.i 0x080001"
574 instead. But that didn't work either (I later found out that this
575 pattern was used by the simulator that I was using.) So I ended up
576 using the pattern seen below.
577
578 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
579 while we need bit-based addressing as the instructions length is 41 bits and
580 we must not modify/corrupt the adjacent slots in the same bundle.
581 Fortunately we may store larger memory incl. the adjacent bits with the
582 original memory content (not the possibly already stored breakpoints there).
583 We need to be careful in ia64_memory_remove_breakpoint to always restore
584 only the specific bits of this instruction ignoring any adjacent stored
585 bits.
586
587 We use the original addressing with the low nibble in the range <0..2> which
588 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
589 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
590 bytes just without these two possibly skipped bytes to not to exceed to the
591 next bundle.
592
593 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
594 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
595 In such case there is no other place where to store
596 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
597 SLOTNUM in ia64_memory_remove_breakpoint.
598
599 There is one special case where we need to be extra careful:
600 L-X instructions, which are instructions that occupy 2 slots
601 (The L part is always in slot 1, and the X part is always in
602 slot 2). We must refuse to insert breakpoints for an address
603 that points at slot 2 of a bundle where an L-X instruction is
604 present, since there is logically no instruction at that address.
605 However, to make things more interesting, the opcode of L-X
606 instructions is located in slot 2. This means that, to insert
607 a breakpoint at an address that points to slot 1, we actually
608 need to write the breakpoint in slot 2! Slot 1 is actually
609 the extended operand, so writing the breakpoint there would not
610 have the desired effect. Another side-effect of this issue
611 is that we need to make sure that the shadow contents buffer
612 does save byte 15 of our instruction bundle (this is the tail
613 end of slot 2, which wouldn't be saved if we were to insert
614 the breakpoint in slot 1).
615
616 ia64 16-byte bundle layout:
617 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
618
619 The current addressing used by the code below:
620 original PC placed_address placed_size required covered
621 == bp_tgt->shadow_len reqd \subset covered
622 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
623 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
624 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
625
626 L-X instructions are treated a little specially, as explained above:
627 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
628
629 `objdump -d' and some other tools show a bit unjustified offsets:
630 original PC byte where starts the instruction objdump offset
631 0xABCDE0 0xABCDE0 0xABCDE0
632 0xABCDE1 0xABCDE5 0xABCDE6
633 0xABCDE2 0xABCDEA 0xABCDEC
634 */
635
636 #define IA64_BREAKPOINT 0x00003333300LL
637
638 static int
639 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
640 struct bp_target_info *bp_tgt)
641 {
642 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
643 gdb_byte bundle[BUNDLE_LEN];
644 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
645 long long instr_breakpoint;
646 int val;
647 int templ;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 scoped_restore restore_memory_0
660 = make_scoped_restore_show_memory_breakpoints (0);
661 val = target_read_memory (addr, bundle, BUNDLE_LEN);
662 if (val != 0)
663 return val;
664
665 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
666 for addressing the SHADOW_CONTENTS placement. */
667 shadow_slotnum = slotnum;
668
669 /* Always cover the last byte of the bundle in case we are inserting
670 a breakpoint on an L-X instruction. */
671 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
672
673 templ = extract_bit_field (bundle, 0, 5);
674 if (template_encoding_table[templ][slotnum] == X)
675 {
676 /* X unit types can only be used in slot 2, and are actually
677 part of a 2-slot L-X instruction. We cannot break at this
678 address, as this is the second half of an instruction that
679 lives in slot 1 of that bundle. */
680 gdb_assert (slotnum == 2);
681 error (_("Can't insert breakpoint for non-existing slot X"));
682 }
683 if (template_encoding_table[templ][slotnum] == L)
684 {
685 /* L unit types can only be used in slot 1. But the associated
686 opcode for that instruction is in slot 2, so bump the slot number
687 accordingly. */
688 gdb_assert (slotnum == 1);
689 slotnum = 2;
690 }
691
692 /* Store the whole bundle, except for the initial skipped bytes by the slot
693 number interpreted as bytes offset in PLACED_ADDRESS. */
694 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
695 bp_tgt->shadow_len);
696
697 /* Re-read the same bundle as above except that, this time, read it in order
698 to compute the new bundle inside which we will be inserting the
699 breakpoint. Therefore, disable the automatic memory restoration from
700 breakpoints while we read our instruction bundle. Otherwise, the general
701 restoration mechanism kicks in and we would possibly remove parts of the
702 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
703 the real breakpoint instruction bits region. */
704 scoped_restore restore_memory_1
705 = make_scoped_restore_show_memory_breakpoints (1);
706 val = target_read_memory (addr, bundle, BUNDLE_LEN);
707 if (val != 0)
708 return val;
709
710 /* Breakpoints already present in the code will get deteacted and not get
711 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
712 location cannot induce the internal error as they are optimized into
713 a single instance by update_global_location_list. */
714 instr_breakpoint = slotN_contents (bundle, slotnum);
715 if (instr_breakpoint == IA64_BREAKPOINT)
716 internal_error (__FILE__, __LINE__,
717 _("Address %s already contains a breakpoint."),
718 paddress (gdbarch, bp_tgt->placed_address));
719 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
720
721 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
722 bp_tgt->shadow_len);
723
724 return val;
725 }
726
727 static int
728 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
729 struct bp_target_info *bp_tgt)
730 {
731 CORE_ADDR addr = bp_tgt->placed_address;
732 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
733 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
734 long long instr_breakpoint, instr_saved;
735 int val;
736 int templ;
737
738 addr &= ~0x0f;
739
740 /* Disable the automatic memory restoration from breakpoints while
741 we read our instruction bundle. Otherwise, the general restoration
742 mechanism kicks in and we would possibly remove parts of the adjacent
743 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
744 breakpoint instruction bits region. */
745 scoped_restore restore_memory_1
746 = make_scoped_restore_show_memory_breakpoints (1);
747 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
748 if (val != 0)
749 return val;
750
751 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
752 for addressing the SHADOW_CONTENTS placement. */
753 shadow_slotnum = slotnum;
754
755 templ = extract_bit_field (bundle_mem, 0, 5);
756 if (template_encoding_table[templ][slotnum] == X)
757 {
758 /* X unit types can only be used in slot 2, and are actually
759 part of a 2-slot L-X instruction. We refuse to insert
760 breakpoints at this address, so there should be no reason
761 for us attempting to remove one there, except if the program's
762 code somehow got modified in memory. */
763 gdb_assert (slotnum == 2);
764 warning (_("Cannot remove breakpoint at address %s from non-existing "
765 "X-type slot, memory has changed underneath"),
766 paddress (gdbarch, bp_tgt->placed_address));
767 return -1;
768 }
769 if (template_encoding_table[templ][slotnum] == L)
770 {
771 /* L unit types can only be used in slot 1. But the breakpoint
772 was actually saved using slot 2, so update the slot number
773 accordingly. */
774 gdb_assert (slotnum == 1);
775 slotnum = 2;
776 }
777
778 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
779
780 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
781 if (instr_breakpoint != IA64_BREAKPOINT)
782 {
783 warning (_("Cannot remove breakpoint at address %s, "
784 "no break instruction at such address."),
785 paddress (gdbarch, bp_tgt->placed_address));
786 return -1;
787 }
788
789 /* Extract the original saved instruction from SLOTNUM normalizing its
790 bit-shift for INSTR_SAVED. */
791 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
792 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
793 bp_tgt->shadow_len);
794 instr_saved = slotN_contents (bundle_saved, slotnum);
795
796 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
797 and not any of the other ones that are stored in SHADOW_CONTENTS. */
798 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
799 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
800
801 return val;
802 }
803
804 /* Implement the breakpoint_kind_from_pc gdbarch method. */
805
806 static int
807 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
808 {
809 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
810 return 0;
811 }
812
813 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
814 instruction slots ranges are bit-granular (41 bits) we have to provide an
815 extended range as described for ia64_memory_insert_breakpoint. We also take
816 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
817 make a match for permanent breakpoints. */
818
819 static const gdb_byte *
820 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
821 CORE_ADDR *pcptr, int *lenptr)
822 {
823 CORE_ADDR addr = *pcptr;
824 static gdb_byte bundle[BUNDLE_LEN];
825 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
826 long long instr_fetched;
827 int val;
828 int templ;
829
830 if (slotnum > 2)
831 error (_("Can't insert breakpoint for slot numbers greater than 2."));
832
833 addr &= ~0x0f;
834
835 /* Enable the automatic memory restoration from breakpoints while
836 we read our instruction bundle to match bp_loc_is_permanent. */
837 {
838 scoped_restore restore_memory_0
839 = make_scoped_restore_show_memory_breakpoints (0);
840 val = target_read_memory (addr, bundle, BUNDLE_LEN);
841 }
842
843 /* The memory might be unreachable. This can happen, for instance,
844 when the user inserts a breakpoint at an invalid address. */
845 if (val != 0)
846 return NULL;
847
848 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
849 for addressing the SHADOW_CONTENTS placement. */
850 shadow_slotnum = slotnum;
851
852 /* Cover always the last byte of the bundle for the L-X slot case. */
853 *lenptr = BUNDLE_LEN - shadow_slotnum;
854
855 /* Check for L type instruction in slot 1, if present then bump up the slot
856 number to the slot 2. */
857 templ = extract_bit_field (bundle, 0, 5);
858 if (template_encoding_table[templ][slotnum] == X)
859 {
860 gdb_assert (slotnum == 2);
861 error (_("Can't insert breakpoint for non-existing slot X"));
862 }
863 if (template_encoding_table[templ][slotnum] == L)
864 {
865 gdb_assert (slotnum == 1);
866 slotnum = 2;
867 }
868
869 /* A break instruction has its all its opcode bits cleared except for
870 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
871 we should not touch the L slot - the upper 41 bits of the parameter. */
872 instr_fetched = slotN_contents (bundle, slotnum);
873 instr_fetched &= 0x1003ffffc0LL;
874 replace_slotN_contents (bundle, instr_fetched, slotnum);
875
876 return bundle + shadow_slotnum;
877 }
878
879 static CORE_ADDR
880 ia64_read_pc (readable_regcache *regcache)
881 {
882 ULONGEST psr_value, pc_value;
883 int slot_num;
884
885 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
886 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
887 slot_num = (psr_value >> 41) & 3;
888
889 return pc_value | (slot_num * SLOT_MULTIPLIER);
890 }
891
892 void
893 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
894 {
895 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
896 ULONGEST psr_value;
897
898 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
899 psr_value &= ~(3LL << 41);
900 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
901
902 new_pc &= ~0xfLL;
903
904 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
905 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
906 }
907
908 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
909
910 /* Returns the address of the slot that's NSLOTS slots away from
911 the address ADDR. NSLOTS may be positive or negative. */
912 static CORE_ADDR
913 rse_address_add(CORE_ADDR addr, int nslots)
914 {
915 CORE_ADDR new_addr;
916 int mandatory_nat_slots = nslots / 63;
917 int direction = nslots < 0 ? -1 : 1;
918
919 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
920
921 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
922 new_addr += 8 * direction;
923
924 if (IS_NaT_COLLECTION_ADDR(new_addr))
925 new_addr += 8 * direction;
926
927 return new_addr;
928 }
929
930 static enum register_status
931 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
932 int regnum, gdb_byte *buf)
933 {
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935 enum register_status status;
936
937 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
938 {
939 #ifdef HAVE_LIBUNWIND_IA64_H
940 /* First try and use the libunwind special reg accessor,
941 otherwise fallback to standard logic. */
942 if (!libunwind_is_initialized ()
943 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
944 #endif
945 {
946 /* The fallback position is to assume that r32-r127 are
947 found sequentially in memory starting at $bof. This
948 isn't always true, but without libunwind, this is the
949 best we can do. */
950 ULONGEST cfm;
951 ULONGEST bsp;
952 CORE_ADDR reg;
953
954 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
955 if (status != REG_VALID)
956 return status;
957
958 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
959 if (status != REG_VALID)
960 return status;
961
962 /* The bsp points at the end of the register frame so we
963 subtract the size of frame from it to get start of
964 register frame. */
965 bsp = rse_address_add (bsp, -(cfm & 0x7f));
966
967 if ((cfm & 0x7f) > regnum - V32_REGNUM)
968 {
969 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
970 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
971 store_unsigned_integer (buf, register_size (gdbarch, regnum),
972 byte_order, reg);
973 }
974 else
975 store_unsigned_integer (buf, register_size (gdbarch, regnum),
976 byte_order, 0);
977 }
978 }
979 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
980 {
981 ULONGEST unatN_val;
982 ULONGEST unat;
983
984 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
985 if (status != REG_VALID)
986 return status;
987 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
988 store_unsigned_integer (buf, register_size (gdbarch, regnum),
989 byte_order, unatN_val);
990 }
991 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
992 {
993 ULONGEST natN_val = 0;
994 ULONGEST bsp;
995 ULONGEST cfm;
996 CORE_ADDR gr_addr = 0;
997
998 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
999 if (status != REG_VALID)
1000 return status;
1001
1002 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1003 if (status != REG_VALID)
1004 return status;
1005
1006 /* The bsp points at the end of the register frame so we
1007 subtract the size of frame from it to get start of register frame. */
1008 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1009
1010 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1011 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1012
1013 if (gr_addr != 0)
1014 {
1015 /* Compute address of nat collection bits. */
1016 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1017 ULONGEST nat_collection;
1018 int nat_bit;
1019 /* If our nat collection address is bigger than bsp, we have to get
1020 the nat collection from rnat. Otherwise, we fetch the nat
1021 collection from the computed address. */
1022 if (nat_addr >= bsp)
1023 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1024 else
1025 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1026 nat_bit = (gr_addr >> 3) & 0x3f;
1027 natN_val = (nat_collection >> nat_bit) & 1;
1028 }
1029
1030 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1031 byte_order, natN_val);
1032 }
1033 else if (regnum == VBOF_REGNUM)
1034 {
1035 /* A virtual register frame start is provided for user convenience.
1036 It can be calculated as the bsp - sof (sizeof frame). */
1037 ULONGEST bsp, vbsp;
1038 ULONGEST cfm;
1039
1040 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1041 if (status != REG_VALID)
1042 return status;
1043 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1044 if (status != REG_VALID)
1045 return status;
1046
1047 /* The bsp points at the end of the register frame so we
1048 subtract the size of frame from it to get beginning of frame. */
1049 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1050 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1051 byte_order, vbsp);
1052 }
1053 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1054 {
1055 ULONGEST pr;
1056 ULONGEST cfm;
1057 ULONGEST prN_val;
1058
1059 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1060 if (status != REG_VALID)
1061 return status;
1062 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1063 if (status != REG_VALID)
1064 return status;
1065
1066 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1067 {
1068 /* Fetch predicate register rename base from current frame
1069 marker for this frame. */
1070 int rrb_pr = (cfm >> 32) & 0x3f;
1071
1072 /* Adjust the register number to account for register rotation. */
1073 regnum = VP16_REGNUM
1074 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1075 }
1076 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1077 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1078 byte_order, prN_val);
1079 }
1080 else
1081 memset (buf, 0, register_size (gdbarch, regnum));
1082
1083 return REG_VALID;
1084 }
1085
1086 static void
1087 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1088 int regnum, const gdb_byte *buf)
1089 {
1090 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1091
1092 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1093 {
1094 ULONGEST bsp;
1095 ULONGEST cfm;
1096 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1097 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1098
1099 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1100
1101 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1102 {
1103 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1104 write_memory (reg_addr, buf, 8);
1105 }
1106 }
1107 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1108 {
1109 ULONGEST unatN_val, unat, unatN_mask;
1110 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1111 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1112 regnum),
1113 byte_order);
1114 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1115 if (unatN_val == 0)
1116 unat &= ~unatN_mask;
1117 else if (unatN_val == 1)
1118 unat |= unatN_mask;
1119 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1120 }
1121 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1122 {
1123 ULONGEST natN_val;
1124 ULONGEST bsp;
1125 ULONGEST cfm;
1126 CORE_ADDR gr_addr = 0;
1127 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1128 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1129
1130 /* The bsp points at the end of the register frame so we
1131 subtract the size of frame from it to get start of register frame. */
1132 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1133
1134 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1135 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1136
1137 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1138 regnum),
1139 byte_order);
1140
1141 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1142 {
1143 /* Compute address of nat collection bits. */
1144 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1145 CORE_ADDR nat_collection;
1146 int natN_bit = (gr_addr >> 3) & 0x3f;
1147 ULONGEST natN_mask = (1LL << natN_bit);
1148 /* If our nat collection address is bigger than bsp, we have to get
1149 the nat collection from rnat. Otherwise, we fetch the nat
1150 collection from the computed address. */
1151 if (nat_addr >= bsp)
1152 {
1153 regcache_cooked_read_unsigned (regcache,
1154 IA64_RNAT_REGNUM,
1155 &nat_collection);
1156 if (natN_val)
1157 nat_collection |= natN_mask;
1158 else
1159 nat_collection &= ~natN_mask;
1160 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1161 nat_collection);
1162 }
1163 else
1164 {
1165 gdb_byte nat_buf[8];
1166 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1167 if (natN_val)
1168 nat_collection |= natN_mask;
1169 else
1170 nat_collection &= ~natN_mask;
1171 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1172 byte_order, nat_collection);
1173 write_memory (nat_addr, nat_buf, 8);
1174 }
1175 }
1176 }
1177 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1178 {
1179 ULONGEST pr;
1180 ULONGEST cfm;
1181 ULONGEST prN_val;
1182 ULONGEST prN_mask;
1183
1184 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1185 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1186
1187 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1188 {
1189 /* Fetch predicate register rename base from current frame
1190 marker for this frame. */
1191 int rrb_pr = (cfm >> 32) & 0x3f;
1192
1193 /* Adjust the register number to account for register rotation. */
1194 regnum = VP16_REGNUM
1195 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1196 }
1197 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1198 byte_order);
1199 prN_mask = (1LL << (regnum - VP0_REGNUM));
1200 if (prN_val == 0)
1201 pr &= ~prN_mask;
1202 else if (prN_val == 1)
1203 pr |= prN_mask;
1204 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1205 }
1206 }
1207
1208 /* The ia64 needs to convert between various ieee floating-point formats
1209 and the special ia64 floating point register format. */
1210
1211 static int
1212 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1213 {
1214 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1215 && TYPE_CODE (type) == TYPE_CODE_FLT
1216 && type != ia64_ext_type (gdbarch));
1217 }
1218
1219 static int
1220 ia64_register_to_value (struct frame_info *frame, int regnum,
1221 struct type *valtype, gdb_byte *out,
1222 int *optimizedp, int *unavailablep)
1223 {
1224 struct gdbarch *gdbarch = get_frame_arch (frame);
1225 gdb_byte in[IA64_FP_REGISTER_SIZE];
1226
1227 /* Convert to TYPE. */
1228 if (!get_frame_register_bytes (frame, regnum, 0,
1229 register_size (gdbarch, regnum),
1230 in, optimizedp, unavailablep))
1231 return 0;
1232
1233 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1234 *optimizedp = *unavailablep = 0;
1235 return 1;
1236 }
1237
1238 static void
1239 ia64_value_to_register (struct frame_info *frame, int regnum,
1240 struct type *valtype, const gdb_byte *in)
1241 {
1242 struct gdbarch *gdbarch = get_frame_arch (frame);
1243 gdb_byte out[IA64_FP_REGISTER_SIZE];
1244 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1245 put_frame_register (frame, regnum, out);
1246 }
1247
1248
1249 /* Limit the number of skipped non-prologue instructions since examining
1250 of the prologue is expensive. */
1251 static int max_skip_non_prologue_insns = 40;
1252
1253 /* Given PC representing the starting address of a function, and
1254 LIM_PC which is the (sloppy) limit to which to scan when looking
1255 for a prologue, attempt to further refine this limit by using
1256 the line data in the symbol table. If successful, a better guess
1257 on where the prologue ends is returned, otherwise the previous
1258 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1259 which will be set to indicate whether the returned limit may be
1260 used with no further scanning in the event that the function is
1261 frameless. */
1262
1263 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1264 superseded by skip_prologue_using_sal. */
1265
1266 static CORE_ADDR
1267 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1268 {
1269 struct symtab_and_line prologue_sal;
1270 CORE_ADDR start_pc = pc;
1271 CORE_ADDR end_pc;
1272
1273 /* The prologue can not possibly go past the function end itself,
1274 so we can already adjust LIM_PC accordingly. */
1275 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1276 lim_pc = end_pc;
1277
1278 /* Start off not trusting the limit. */
1279 *trust_limit = 0;
1280
1281 prologue_sal = find_pc_line (pc, 0);
1282 if (prologue_sal.line != 0)
1283 {
1284 int i;
1285 CORE_ADDR addr = prologue_sal.end;
1286
1287 /* Handle the case in which compiler's optimizer/scheduler
1288 has moved instructions into the prologue. We scan ahead
1289 in the function looking for address ranges whose corresponding
1290 line number is less than or equal to the first one that we
1291 found for the function. (It can be less than when the
1292 scheduler puts a body instruction before the first prologue
1293 instruction.) */
1294 for (i = 2 * max_skip_non_prologue_insns;
1295 i > 0 && (lim_pc == 0 || addr < lim_pc);
1296 i--)
1297 {
1298 struct symtab_and_line sal;
1299
1300 sal = find_pc_line (addr, 0);
1301 if (sal.line == 0)
1302 break;
1303 if (sal.line <= prologue_sal.line
1304 && sal.symtab == prologue_sal.symtab)
1305 {
1306 prologue_sal = sal;
1307 }
1308 addr = sal.end;
1309 }
1310
1311 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1312 {
1313 lim_pc = prologue_sal.end;
1314 if (start_pc == get_pc_function_start (lim_pc))
1315 *trust_limit = 1;
1316 }
1317 }
1318 return lim_pc;
1319 }
1320
1321 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1322 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1323 || (14 <= (_regnum_) && (_regnum_) <= 31))
1324 #define imm9(_instr_) \
1325 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1326 | (((_instr_) & 0x00008000000LL) >> 20) \
1327 | (((_instr_) & 0x00000001fc0LL) >> 6))
1328
1329 /* Allocate and initialize a frame cache. */
1330
1331 static struct ia64_frame_cache *
1332 ia64_alloc_frame_cache (void)
1333 {
1334 struct ia64_frame_cache *cache;
1335 int i;
1336
1337 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1338
1339 /* Base address. */
1340 cache->base = 0;
1341 cache->pc = 0;
1342 cache->cfm = 0;
1343 cache->prev_cfm = 0;
1344 cache->sof = 0;
1345 cache->sol = 0;
1346 cache->sor = 0;
1347 cache->bsp = 0;
1348 cache->fp_reg = 0;
1349 cache->frameless = 1;
1350
1351 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1352 cache->saved_regs[i] = 0;
1353
1354 return cache;
1355 }
1356
1357 static CORE_ADDR
1358 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1359 struct frame_info *this_frame,
1360 struct ia64_frame_cache *cache)
1361 {
1362 CORE_ADDR next_pc;
1363 CORE_ADDR last_prologue_pc = pc;
1364 instruction_type it;
1365 long long instr;
1366 int cfm_reg = 0;
1367 int ret_reg = 0;
1368 int fp_reg = 0;
1369 int unat_save_reg = 0;
1370 int pr_save_reg = 0;
1371 int mem_stack_frame_size = 0;
1372 int spill_reg = 0;
1373 CORE_ADDR spill_addr = 0;
1374 char instores[8];
1375 char infpstores[8];
1376 char reg_contents[256];
1377 int trust_limit;
1378 int frameless = 1;
1379 int i;
1380 CORE_ADDR addr;
1381 gdb_byte buf[8];
1382 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1383
1384 memset (instores, 0, sizeof instores);
1385 memset (infpstores, 0, sizeof infpstores);
1386 memset (reg_contents, 0, sizeof reg_contents);
1387
1388 if (cache->after_prologue != 0
1389 && cache->after_prologue <= lim_pc)
1390 return cache->after_prologue;
1391
1392 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1393 next_pc = fetch_instruction (pc, &it, &instr);
1394
1395 /* We want to check if we have a recognizable function start before we
1396 look ahead for a prologue. */
1397 if (pc < lim_pc && next_pc
1398 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1399 {
1400 /* alloc - start of a regular function. */
1401 int sol_bits = (int) ((instr & 0x00007f00000LL) >> 20);
1402 int sof_bits = (int) ((instr & 0x000000fe000LL) >> 13);
1403 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1404
1405 /* Verify that the current cfm matches what we think is the
1406 function start. If we have somehow jumped within a function,
1407 we do not want to interpret the prologue and calculate the
1408 addresses of various registers such as the return address.
1409 We will instead treat the frame as frameless. */
1410 if (!this_frame ||
1411 (sof_bits == (cache->cfm & 0x7f) &&
1412 sol_bits == ((cache->cfm >> 7) & 0x7f)))
1413 frameless = 0;
1414
1415 cfm_reg = rN;
1416 last_prologue_pc = next_pc;
1417 pc = next_pc;
1418 }
1419 else
1420 {
1421 /* Look for a leaf routine. */
1422 if (pc < lim_pc && next_pc
1423 && (it == I || it == M)
1424 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1425 {
1426 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1427 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1428 | ((instr & 0x001f8000000LL) >> 20)
1429 | ((instr & 0x000000fe000LL) >> 13));
1430 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1431 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1432 int qp = (int) (instr & 0x0000000003fLL);
1433 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1434 {
1435 /* mov r2, r12 - beginning of leaf routine. */
1436 fp_reg = rN;
1437 last_prologue_pc = next_pc;
1438 }
1439 }
1440
1441 /* If we don't recognize a regular function or leaf routine, we are
1442 done. */
1443 if (!fp_reg)
1444 {
1445 pc = lim_pc;
1446 if (trust_limit)
1447 last_prologue_pc = lim_pc;
1448 }
1449 }
1450
1451 /* Loop, looking for prologue instructions, keeping track of
1452 where preserved registers were spilled. */
1453 while (pc < lim_pc)
1454 {
1455 next_pc = fetch_instruction (pc, &it, &instr);
1456 if (next_pc == 0)
1457 break;
1458
1459 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1460 {
1461 /* Exit loop upon hitting a non-nop branch instruction. */
1462 if (trust_limit)
1463 lim_pc = pc;
1464 break;
1465 }
1466 else if (((instr & 0x3fLL) != 0LL) &&
1467 (frameless || ret_reg != 0))
1468 {
1469 /* Exit loop upon hitting a predicated instruction if
1470 we already have the return register or if we are frameless. */
1471 if (trust_limit)
1472 lim_pc = pc;
1473 break;
1474 }
1475 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1476 {
1477 /* Move from BR */
1478 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1479 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1480 int qp = (int) (instr & 0x0000000003f);
1481
1482 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1483 {
1484 ret_reg = rN;
1485 last_prologue_pc = next_pc;
1486 }
1487 }
1488 else if ((it == I || it == M)
1489 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1490 {
1491 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1492 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1493 | ((instr & 0x001f8000000LL) >> 20)
1494 | ((instr & 0x000000fe000LL) >> 13));
1495 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1496 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1497 int qp = (int) (instr & 0x0000000003fLL);
1498
1499 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1500 {
1501 /* mov rN, r12 */
1502 fp_reg = rN;
1503 last_prologue_pc = next_pc;
1504 }
1505 else if (qp == 0 && rN == 12 && rM == 12)
1506 {
1507 /* adds r12, -mem_stack_frame_size, r12 */
1508 mem_stack_frame_size -= imm;
1509 last_prologue_pc = next_pc;
1510 }
1511 else if (qp == 0 && rN == 2
1512 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1513 {
1514 CORE_ADDR saved_sp = 0;
1515 /* adds r2, spilloffset, rFramePointer
1516 or
1517 adds r2, spilloffset, r12
1518
1519 Get ready for stf.spill or st8.spill instructions.
1520 The address to start spilling at is loaded into r2.
1521 FIXME: Why r2? That's what gcc currently uses; it
1522 could well be different for other compilers. */
1523
1524 /* Hmm... whether or not this will work will depend on
1525 where the pc is. If it's still early in the prologue
1526 this'll be wrong. FIXME */
1527 if (this_frame)
1528 saved_sp = get_frame_register_unsigned (this_frame,
1529 sp_regnum);
1530 spill_addr = saved_sp
1531 + (rM == 12 ? 0 : mem_stack_frame_size)
1532 + imm;
1533 spill_reg = rN;
1534 last_prologue_pc = next_pc;
1535 }
1536 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1537 rN < 256 && imm == 0)
1538 {
1539 /* mov rN, rM where rM is an input register. */
1540 reg_contents[rN] = rM;
1541 last_prologue_pc = next_pc;
1542 }
1543 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1544 rM == 2)
1545 {
1546 /* mov r12, r2 */
1547 last_prologue_pc = next_pc;
1548 break;
1549 }
1550 }
1551 else if (it == M
1552 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1553 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1554 {
1555 /* stf.spill [rN] = fM, imm9
1556 or
1557 stf.spill [rN] = fM */
1558
1559 int imm = imm9(instr);
1560 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1561 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1562 int qp = (int) (instr & 0x0000000003fLL);
1563 if (qp == 0 && rN == spill_reg && spill_addr != 0
1564 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1565 {
1566 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1567
1568 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1569 spill_addr += imm;
1570 else
1571 spill_addr = 0; /* last one; must be done. */
1572 last_prologue_pc = next_pc;
1573 }
1574 }
1575 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1576 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1577 {
1578 /* mov.m rN = arM
1579 or
1580 mov.i rN = arM */
1581
1582 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1583 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1584 int qp = (int) (instr & 0x0000000003fLL);
1585 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1586 {
1587 /* We have something like "mov.m r3 = ar.unat". Remember the
1588 r3 (or whatever) and watch for a store of this register... */
1589 unat_save_reg = rN;
1590 last_prologue_pc = next_pc;
1591 }
1592 }
1593 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1594 {
1595 /* mov rN = pr */
1596 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1597 int qp = (int) (instr & 0x0000000003fLL);
1598 if (qp == 0 && isScratch (rN))
1599 {
1600 pr_save_reg = rN;
1601 last_prologue_pc = next_pc;
1602 }
1603 }
1604 else if (it == M
1605 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1606 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1607 {
1608 /* st8 [rN] = rM
1609 or
1610 st8 [rN] = rM, imm9 */
1611 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1612 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1613 int qp = (int) (instr & 0x0000000003fLL);
1614 int indirect = rM < 256 ? reg_contents[rM] : 0;
1615 if (qp == 0 && rN == spill_reg && spill_addr != 0
1616 && (rM == unat_save_reg || rM == pr_save_reg))
1617 {
1618 /* We've found a spill of either the UNAT register or the PR
1619 register. (Well, not exactly; what we've actually found is
1620 a spill of the register that UNAT or PR was moved to).
1621 Record that fact and move on... */
1622 if (rM == unat_save_reg)
1623 {
1624 /* Track UNAT register. */
1625 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1626 unat_save_reg = 0;
1627 }
1628 else
1629 {
1630 /* Track PR register. */
1631 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1632 pr_save_reg = 0;
1633 }
1634 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1635 /* st8 [rN] = rM, imm9 */
1636 spill_addr += imm9(instr);
1637 else
1638 spill_addr = 0; /* Must be done spilling. */
1639 last_prologue_pc = next_pc;
1640 }
1641 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1642 {
1643 /* Allow up to one store of each input register. */
1644 instores[rM-32] = 1;
1645 last_prologue_pc = next_pc;
1646 }
1647 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1648 !instores[indirect-32])
1649 {
1650 /* Allow an indirect store of an input register. */
1651 instores[indirect-32] = 1;
1652 last_prologue_pc = next_pc;
1653 }
1654 }
1655 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1656 {
1657 /* One of
1658 st1 [rN] = rM
1659 st2 [rN] = rM
1660 st4 [rN] = rM
1661 st8 [rN] = rM
1662 Note that the st8 case is handled in the clause above.
1663
1664 Advance over stores of input registers. One store per input
1665 register is permitted. */
1666 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1667 int qp = (int) (instr & 0x0000000003fLL);
1668 int indirect = rM < 256 ? reg_contents[rM] : 0;
1669 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1670 {
1671 instores[rM-32] = 1;
1672 last_prologue_pc = next_pc;
1673 }
1674 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1675 !instores[indirect-32])
1676 {
1677 /* Allow an indirect store of an input register. */
1678 instores[indirect-32] = 1;
1679 last_prologue_pc = next_pc;
1680 }
1681 }
1682 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1683 {
1684 /* Either
1685 stfs [rN] = fM
1686 or
1687 stfd [rN] = fM
1688
1689 Advance over stores of floating point input registers. Again
1690 one store per register is permitted. */
1691 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1692 int qp = (int) (instr & 0x0000000003fLL);
1693 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1694 {
1695 infpstores[fM-8] = 1;
1696 last_prologue_pc = next_pc;
1697 }
1698 }
1699 else if (it == M
1700 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1701 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1702 {
1703 /* st8.spill [rN] = rM
1704 or
1705 st8.spill [rN] = rM, imm9 */
1706 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1707 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1708 int qp = (int) (instr & 0x0000000003fLL);
1709 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1710 {
1711 /* We've found a spill of one of the preserved general purpose
1712 regs. Record the spill address and advance the spill
1713 register if appropriate. */
1714 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1715 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1716 /* st8.spill [rN] = rM, imm9 */
1717 spill_addr += imm9(instr);
1718 else
1719 spill_addr = 0; /* Done spilling. */
1720 last_prologue_pc = next_pc;
1721 }
1722 }
1723
1724 pc = next_pc;
1725 }
1726
1727 /* If not frameless and we aren't called by skip_prologue, then we need
1728 to calculate registers for the previous frame which will be needed
1729 later. */
1730
1731 if (!frameless && this_frame)
1732 {
1733 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1734 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1735
1736 /* Extract the size of the rotating portion of the stack
1737 frame and the register rename base from the current
1738 frame marker. */
1739 cfm = cache->cfm;
1740 sor = cache->sor;
1741 sof = cache->sof;
1742 sol = cache->sol;
1743 rrb_gr = (cfm >> 18) & 0x7f;
1744
1745 /* Find the bof (beginning of frame). */
1746 bof = rse_address_add (cache->bsp, -sof);
1747
1748 for (i = 0, addr = bof;
1749 i < sof;
1750 i++, addr += 8)
1751 {
1752 if (IS_NaT_COLLECTION_ADDR (addr))
1753 {
1754 addr += 8;
1755 }
1756 if (i+32 == cfm_reg)
1757 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1758 if (i+32 == ret_reg)
1759 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1760 if (i+32 == fp_reg)
1761 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1762 }
1763
1764 /* For the previous argument registers we require the previous bof.
1765 If we can't find the previous cfm, then we can do nothing. */
1766 cfm = 0;
1767 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1768 {
1769 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1770 8, byte_order);
1771 }
1772 else if (cfm_reg != 0)
1773 {
1774 get_frame_register (this_frame, cfm_reg, buf);
1775 cfm = extract_unsigned_integer (buf, 8, byte_order);
1776 }
1777 cache->prev_cfm = cfm;
1778
1779 if (cfm != 0)
1780 {
1781 sor = ((cfm >> 14) & 0xf) * 8;
1782 sof = (cfm & 0x7f);
1783 sol = (cfm >> 7) & 0x7f;
1784 rrb_gr = (cfm >> 18) & 0x7f;
1785
1786 /* The previous bof only requires subtraction of the sol (size of
1787 locals) due to the overlap between output and input of
1788 subsequent frames. */
1789 bof = rse_address_add (bof, -sol);
1790
1791 for (i = 0, addr = bof;
1792 i < sof;
1793 i++, addr += 8)
1794 {
1795 if (IS_NaT_COLLECTION_ADDR (addr))
1796 {
1797 addr += 8;
1798 }
1799 if (i < sor)
1800 cache->saved_regs[IA64_GR32_REGNUM
1801 + ((i + (sor - rrb_gr)) % sor)]
1802 = addr;
1803 else
1804 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1805 }
1806
1807 }
1808 }
1809
1810 /* Try and trust the lim_pc value whenever possible. */
1811 if (trust_limit && lim_pc >= last_prologue_pc)
1812 last_prologue_pc = lim_pc;
1813
1814 cache->frameless = frameless;
1815 cache->after_prologue = last_prologue_pc;
1816 cache->mem_stack_frame_size = mem_stack_frame_size;
1817 cache->fp_reg = fp_reg;
1818
1819 return last_prologue_pc;
1820 }
1821
1822 CORE_ADDR
1823 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1824 {
1825 struct ia64_frame_cache cache;
1826 cache.base = 0;
1827 cache.after_prologue = 0;
1828 cache.cfm = 0;
1829 cache.bsp = 0;
1830
1831 /* Call examine_prologue with - as third argument since we don't
1832 have a next frame pointer to send. */
1833 return examine_prologue (pc, pc+1024, 0, &cache);
1834 }
1835
1836
1837 /* Normal frames. */
1838
1839 static struct ia64_frame_cache *
1840 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1841 {
1842 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1843 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1844 struct ia64_frame_cache *cache;
1845 gdb_byte buf[8];
1846 CORE_ADDR cfm;
1847
1848 if (*this_cache)
1849 return (struct ia64_frame_cache *) *this_cache;
1850
1851 cache = ia64_alloc_frame_cache ();
1852 *this_cache = cache;
1853
1854 get_frame_register (this_frame, sp_regnum, buf);
1855 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1856
1857 /* We always want the bsp to point to the end of frame.
1858 This way, we can always get the beginning of frame (bof)
1859 by subtracting frame size. */
1860 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1861 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1862
1863 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1864
1865 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1866 cfm = extract_unsigned_integer (buf, 8, byte_order);
1867
1868 cache->sof = (cfm & 0x7f);
1869 cache->sol = (cfm >> 7) & 0x7f;
1870 cache->sor = ((cfm >> 14) & 0xf) * 8;
1871
1872 cache->cfm = cfm;
1873
1874 cache->pc = get_frame_func (this_frame);
1875
1876 if (cache->pc != 0)
1877 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1878
1879 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1880
1881 return cache;
1882 }
1883
1884 static void
1885 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1886 struct frame_id *this_id)
1887 {
1888 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1889 struct ia64_frame_cache *cache =
1890 ia64_frame_cache (this_frame, this_cache);
1891
1892 /* If outermost frame, mark with null frame id. */
1893 if (cache->base != 0)
1894 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1895 if (gdbarch_debug >= 1)
1896 fprintf_unfiltered (gdb_stdlog,
1897 "regular frame id: code %s, stack %s, "
1898 "special %s, this_frame %s\n",
1899 paddress (gdbarch, this_id->code_addr),
1900 paddress (gdbarch, this_id->stack_addr),
1901 paddress (gdbarch, cache->bsp),
1902 host_address_to_string (this_frame));
1903 }
1904
1905 static struct value *
1906 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1907 int regnum)
1908 {
1909 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1910 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1911 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1912 gdb_byte buf[8];
1913
1914 gdb_assert (regnum >= 0);
1915
1916 if (!target_has_registers)
1917 error (_("No registers."));
1918
1919 if (regnum == gdbarch_sp_regnum (gdbarch))
1920 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1921
1922 else if (regnum == IA64_BSP_REGNUM)
1923 {
1924 struct value *val;
1925 CORE_ADDR prev_cfm, bsp, prev_bsp;
1926
1927 /* We want to calculate the previous bsp as the end of the previous
1928 register stack frame. This corresponds to what the hardware bsp
1929 register will be if we pop the frame back which is why we might
1930 have been called. We know the beginning of the current frame is
1931 cache->bsp - cache->sof. This value in the previous frame points
1932 to the start of the output registers. We can calculate the end of
1933 that frame by adding the size of output:
1934 (sof (size of frame) - sol (size of locals)). */
1935 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1936 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1937 8, byte_order);
1938 bsp = rse_address_add (cache->bsp, -(cache->sof));
1939 prev_bsp =
1940 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1941
1942 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1943 }
1944
1945 else if (regnum == IA64_CFM_REGNUM)
1946 {
1947 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1948
1949 if (addr != 0)
1950 return frame_unwind_got_memory (this_frame, regnum, addr);
1951
1952 if (cache->prev_cfm)
1953 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1954
1955 if (cache->frameless)
1956 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1957 IA64_PFS_REGNUM);
1958 return frame_unwind_got_register (this_frame, regnum, 0);
1959 }
1960
1961 else if (regnum == IA64_VFP_REGNUM)
1962 {
1963 /* If the function in question uses an automatic register (r32-r127)
1964 for the frame pointer, it'll be found by ia64_find_saved_register()
1965 above. If the function lacks one of these frame pointers, we can
1966 still provide a value since we know the size of the frame. */
1967 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1968 }
1969
1970 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1971 {
1972 struct value *pr_val;
1973 ULONGEST prN;
1974
1975 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1976 IA64_PR_REGNUM);
1977 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1978 {
1979 /* Fetch predicate register rename base from current frame
1980 marker for this frame. */
1981 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1982
1983 /* Adjust the register number to account for register rotation. */
1984 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1985 }
1986 prN = extract_bit_field (value_contents_all (pr_val),
1987 regnum - VP0_REGNUM, 1);
1988 return frame_unwind_got_constant (this_frame, regnum, prN);
1989 }
1990
1991 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1992 {
1993 struct value *unat_val;
1994 ULONGEST unatN;
1995 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1996 IA64_UNAT_REGNUM);
1997 unatN = extract_bit_field (value_contents_all (unat_val),
1998 regnum - IA64_NAT0_REGNUM, 1);
1999 return frame_unwind_got_constant (this_frame, regnum, unatN);
2000 }
2001
2002 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2003 {
2004 int natval = 0;
2005 /* Find address of general register corresponding to nat bit we're
2006 interested in. */
2007 CORE_ADDR gr_addr;
2008
2009 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2010
2011 if (gr_addr != 0)
2012 {
2013 /* Compute address of nat collection bits. */
2014 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2015 CORE_ADDR bsp;
2016 CORE_ADDR nat_collection;
2017 int nat_bit;
2018
2019 /* If our nat collection address is bigger than bsp, we have to get
2020 the nat collection from rnat. Otherwise, we fetch the nat
2021 collection from the computed address. */
2022 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2023 bsp = extract_unsigned_integer (buf, 8, byte_order);
2024 if (nat_addr >= bsp)
2025 {
2026 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2027 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2028 }
2029 else
2030 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2031 nat_bit = (gr_addr >> 3) & 0x3f;
2032 natval = (nat_collection >> nat_bit) & 1;
2033 }
2034
2035 return frame_unwind_got_constant (this_frame, regnum, natval);
2036 }
2037
2038 else if (regnum == IA64_IP_REGNUM)
2039 {
2040 CORE_ADDR pc = 0;
2041 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2042
2043 if (addr != 0)
2044 {
2045 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2046 pc = extract_unsigned_integer (buf, 8, byte_order);
2047 }
2048 else if (cache->frameless)
2049 {
2050 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2051 pc = extract_unsigned_integer (buf, 8, byte_order);
2052 }
2053 pc &= ~0xf;
2054 return frame_unwind_got_constant (this_frame, regnum, pc);
2055 }
2056
2057 else if (regnum == IA64_PSR_REGNUM)
2058 {
2059 /* We don't know how to get the complete previous PSR, but we need it
2060 for the slot information when we unwind the pc (pc is formed of IP
2061 register plus slot information from PSR). To get the previous
2062 slot information, we mask it off the return address. */
2063 ULONGEST slot_num = 0;
2064 CORE_ADDR pc = 0;
2065 CORE_ADDR psr = 0;
2066 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2067
2068 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2069 psr = extract_unsigned_integer (buf, 8, byte_order);
2070
2071 if (addr != 0)
2072 {
2073 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2074 pc = extract_unsigned_integer (buf, 8, byte_order);
2075 }
2076 else if (cache->frameless)
2077 {
2078 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2079 pc = extract_unsigned_integer (buf, 8, byte_order);
2080 }
2081 psr &= ~(3LL << 41);
2082 slot_num = pc & 0x3LL;
2083 psr |= (CORE_ADDR)slot_num << 41;
2084 return frame_unwind_got_constant (this_frame, regnum, psr);
2085 }
2086
2087 else if (regnum == IA64_BR0_REGNUM)
2088 {
2089 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2090
2091 if (addr != 0)
2092 return frame_unwind_got_memory (this_frame, regnum, addr);
2093
2094 return frame_unwind_got_constant (this_frame, regnum, 0);
2095 }
2096
2097 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2098 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2099 {
2100 CORE_ADDR addr = 0;
2101
2102 if (regnum >= V32_REGNUM)
2103 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2104 addr = cache->saved_regs[regnum];
2105 if (addr != 0)
2106 return frame_unwind_got_memory (this_frame, regnum, addr);
2107
2108 if (cache->frameless)
2109 {
2110 struct value *reg_val;
2111 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2112
2113 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2114 with the same code above? */
2115 if (regnum >= V32_REGNUM)
2116 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2117 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2118 IA64_CFM_REGNUM);
2119 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2120 8, byte_order);
2121 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2122 IA64_BSP_REGNUM);
2123 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2124 8, byte_order);
2125 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2126
2127 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2128 return frame_unwind_got_memory (this_frame, regnum, addr);
2129 }
2130
2131 return frame_unwind_got_constant (this_frame, regnum, 0);
2132 }
2133
2134 else /* All other registers. */
2135 {
2136 CORE_ADDR addr = 0;
2137
2138 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2139 {
2140 /* Fetch floating point register rename base from current
2141 frame marker for this frame. */
2142 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2143
2144 /* Adjust the floating point register number to account for
2145 register rotation. */
2146 regnum = IA64_FR32_REGNUM
2147 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2148 }
2149
2150 /* If we have stored a memory address, access the register. */
2151 addr = cache->saved_regs[regnum];
2152 if (addr != 0)
2153 return frame_unwind_got_memory (this_frame, regnum, addr);
2154 /* Otherwise, punt and get the current value of the register. */
2155 else
2156 return frame_unwind_got_register (this_frame, regnum, regnum);
2157 }
2158 }
2159
2160 static const struct frame_unwind ia64_frame_unwind =
2161 {
2162 NORMAL_FRAME,
2163 default_frame_unwind_stop_reason,
2164 &ia64_frame_this_id,
2165 &ia64_frame_prev_register,
2166 NULL,
2167 default_frame_sniffer
2168 };
2169
2170 /* Signal trampolines. */
2171
2172 static void
2173 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2174 struct ia64_frame_cache *cache)
2175 {
2176 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2177 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2178
2179 if (tdep->sigcontext_register_address)
2180 {
2181 int regno;
2182
2183 cache->saved_regs[IA64_VRAP_REGNUM]
2184 = tdep->sigcontext_register_address (gdbarch, cache->base,
2185 IA64_IP_REGNUM);
2186 cache->saved_regs[IA64_CFM_REGNUM]
2187 = tdep->sigcontext_register_address (gdbarch, cache->base,
2188 IA64_CFM_REGNUM);
2189 cache->saved_regs[IA64_PSR_REGNUM]
2190 = tdep->sigcontext_register_address (gdbarch, cache->base,
2191 IA64_PSR_REGNUM);
2192 cache->saved_regs[IA64_BSP_REGNUM]
2193 = tdep->sigcontext_register_address (gdbarch, cache->base,
2194 IA64_BSP_REGNUM);
2195 cache->saved_regs[IA64_RNAT_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_RNAT_REGNUM);
2198 cache->saved_regs[IA64_CCV_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_CCV_REGNUM);
2201 cache->saved_regs[IA64_UNAT_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_UNAT_REGNUM);
2204 cache->saved_regs[IA64_FPSR_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_FPSR_REGNUM);
2207 cache->saved_regs[IA64_PFS_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_PFS_REGNUM);
2210 cache->saved_regs[IA64_LC_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_LC_REGNUM);
2213
2214 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2215 cache->saved_regs[regno] =
2216 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2217 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2218 cache->saved_regs[regno] =
2219 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2220 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2221 cache->saved_regs[regno] =
2222 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2223 }
2224 }
2225
2226 static struct ia64_frame_cache *
2227 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2228 {
2229 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2230 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2231 struct ia64_frame_cache *cache;
2232 gdb_byte buf[8];
2233
2234 if (*this_cache)
2235 return (struct ia64_frame_cache *) *this_cache;
2236
2237 cache = ia64_alloc_frame_cache ();
2238
2239 get_frame_register (this_frame, sp_regnum, buf);
2240 /* Note that frame size is hard-coded below. We cannot calculate it
2241 via prologue examination. */
2242 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2243
2244 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2245 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2246
2247 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2248 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2249 cache->sof = cache->cfm & 0x7f;
2250
2251 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2252
2253 *this_cache = cache;
2254 return cache;
2255 }
2256
2257 static void
2258 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2259 void **this_cache, struct frame_id *this_id)
2260 {
2261 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2262 struct ia64_frame_cache *cache =
2263 ia64_sigtramp_frame_cache (this_frame, this_cache);
2264
2265 (*this_id) = frame_id_build_special (cache->base,
2266 get_frame_pc (this_frame),
2267 cache->bsp);
2268 if (gdbarch_debug >= 1)
2269 fprintf_unfiltered (gdb_stdlog,
2270 "sigtramp frame id: code %s, stack %s, "
2271 "special %s, this_frame %s\n",
2272 paddress (gdbarch, this_id->code_addr),
2273 paddress (gdbarch, this_id->stack_addr),
2274 paddress (gdbarch, cache->bsp),
2275 host_address_to_string (this_frame));
2276 }
2277
2278 static struct value *
2279 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2280 void **this_cache, int regnum)
2281 {
2282 struct ia64_frame_cache *cache =
2283 ia64_sigtramp_frame_cache (this_frame, this_cache);
2284
2285 gdb_assert (regnum >= 0);
2286
2287 if (!target_has_registers)
2288 error (_("No registers."));
2289
2290 if (regnum == IA64_IP_REGNUM)
2291 {
2292 CORE_ADDR pc = 0;
2293 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2294
2295 if (addr != 0)
2296 {
2297 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2298 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2299 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2300 }
2301 pc &= ~0xf;
2302 return frame_unwind_got_constant (this_frame, regnum, pc);
2303 }
2304
2305 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2306 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2307 {
2308 CORE_ADDR addr = 0;
2309
2310 if (regnum >= V32_REGNUM)
2311 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2312 addr = cache->saved_regs[regnum];
2313 if (addr != 0)
2314 return frame_unwind_got_memory (this_frame, regnum, addr);
2315
2316 return frame_unwind_got_constant (this_frame, regnum, 0);
2317 }
2318
2319 else /* All other registers not listed above. */
2320 {
2321 CORE_ADDR addr = cache->saved_regs[regnum];
2322
2323 if (addr != 0)
2324 return frame_unwind_got_memory (this_frame, regnum, addr);
2325
2326 return frame_unwind_got_constant (this_frame, regnum, 0);
2327 }
2328 }
2329
2330 static int
2331 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2332 struct frame_info *this_frame,
2333 void **this_cache)
2334 {
2335 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2336 if (tdep->pc_in_sigtramp)
2337 {
2338 CORE_ADDR pc = get_frame_pc (this_frame);
2339
2340 if (tdep->pc_in_sigtramp (pc))
2341 return 1;
2342 }
2343
2344 return 0;
2345 }
2346
2347 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2348 {
2349 SIGTRAMP_FRAME,
2350 default_frame_unwind_stop_reason,
2351 ia64_sigtramp_frame_this_id,
2352 ia64_sigtramp_frame_prev_register,
2353 NULL,
2354 ia64_sigtramp_frame_sniffer
2355 };
2356
2357 \f
2358
2359 static CORE_ADDR
2360 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2361 {
2362 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2363
2364 return cache->base;
2365 }
2366
2367 static const struct frame_base ia64_frame_base =
2368 {
2369 &ia64_frame_unwind,
2370 ia64_frame_base_address,
2371 ia64_frame_base_address,
2372 ia64_frame_base_address
2373 };
2374
2375 #ifdef HAVE_LIBUNWIND_IA64_H
2376
2377 struct ia64_unwind_table_entry
2378 {
2379 unw_word_t start_offset;
2380 unw_word_t end_offset;
2381 unw_word_t info_offset;
2382 };
2383
2384 static __inline__ uint64_t
2385 ia64_rse_slot_num (uint64_t addr)
2386 {
2387 return (addr >> 3) & 0x3f;
2388 }
2389
2390 /* Skip over a designated number of registers in the backing
2391 store, remembering every 64th position is for NAT. */
2392 static __inline__ uint64_t
2393 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2394 {
2395 long delta = ia64_rse_slot_num(addr) + num_regs;
2396
2397 if (num_regs < 0)
2398 delta -= 0x3e;
2399 return addr + ((num_regs + delta/0x3f) << 3);
2400 }
2401
2402 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2403 register number to a libunwind register number. */
2404 static int
2405 ia64_gdb2uw_regnum (int regnum)
2406 {
2407 if (regnum == sp_regnum)
2408 return UNW_IA64_SP;
2409 else if (regnum == IA64_BSP_REGNUM)
2410 return UNW_IA64_BSP;
2411 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2412 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2413 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2414 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2415 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2416 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2417 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2418 return -1;
2419 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2420 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2421 else if (regnum == IA64_PR_REGNUM)
2422 return UNW_IA64_PR;
2423 else if (regnum == IA64_IP_REGNUM)
2424 return UNW_REG_IP;
2425 else if (regnum == IA64_CFM_REGNUM)
2426 return UNW_IA64_CFM;
2427 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2428 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2429 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2430 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2431 else
2432 return -1;
2433 }
2434
2435 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2436 register number to a ia64 gdb register number. */
2437 static int
2438 ia64_uw2gdb_regnum (int uw_regnum)
2439 {
2440 if (uw_regnum == UNW_IA64_SP)
2441 return sp_regnum;
2442 else if (uw_regnum == UNW_IA64_BSP)
2443 return IA64_BSP_REGNUM;
2444 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2445 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2446 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2447 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2448 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2449 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2450 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2451 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2452 else if (uw_regnum == UNW_IA64_PR)
2453 return IA64_PR_REGNUM;
2454 else if (uw_regnum == UNW_REG_IP)
2455 return IA64_IP_REGNUM;
2456 else if (uw_regnum == UNW_IA64_CFM)
2457 return IA64_CFM_REGNUM;
2458 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2459 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2460 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2461 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2462 else
2463 return -1;
2464 }
2465
2466 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2467 a float register or not. */
2468 static int
2469 ia64_is_fpreg (int uw_regnum)
2470 {
2471 return unw_is_fpreg (uw_regnum);
2472 }
2473
2474 /* Libunwind callback accessor function for general registers. */
2475 static int
2476 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2477 int write, void *arg)
2478 {
2479 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2480 unw_word_t bsp, sof, cfm, psr, ip;
2481 struct frame_info *this_frame = (struct frame_info *) arg;
2482 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2483
2484 /* We never call any libunwind routines that need to write registers. */
2485 gdb_assert (!write);
2486
2487 switch (uw_regnum)
2488 {
2489 case UNW_REG_IP:
2490 /* Libunwind expects to see the pc value which means the slot number
2491 from the psr must be merged with the ip word address. */
2492 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2493 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2494 *val = ip | ((psr >> 41) & 0x3);
2495 break;
2496
2497 case UNW_IA64_AR_BSP:
2498 /* Libunwind expects to see the beginning of the current
2499 register frame so we must account for the fact that
2500 ptrace() will return a value for bsp that points *after*
2501 the current register frame. */
2502 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2503 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2504 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2505 *val = ia64_rse_skip_regs (bsp, -sof);
2506 break;
2507
2508 case UNW_IA64_AR_BSPSTORE:
2509 /* Libunwind wants bspstore to be after the current register frame.
2510 This is what ptrace() and gdb treats as the regular bsp value. */
2511 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2512 break;
2513
2514 default:
2515 /* For all other registers, just unwind the value directly. */
2516 *val = get_frame_register_unsigned (this_frame, regnum);
2517 break;
2518 }
2519
2520 if (gdbarch_debug >= 1)
2521 fprintf_unfiltered (gdb_stdlog,
2522 " access_reg: from cache: %4s=%s\n",
2523 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2524 ? ia64_register_names[regnum] : "r??"),
2525 paddress (gdbarch, *val));
2526 return 0;
2527 }
2528
2529 /* Libunwind callback accessor function for floating-point registers. */
2530 static int
2531 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2532 unw_fpreg_t *val, int write, void *arg)
2533 {
2534 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2535 struct frame_info *this_frame = (struct frame_info *) arg;
2536
2537 /* We never call any libunwind routines that need to write registers. */
2538 gdb_assert (!write);
2539
2540 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2541
2542 return 0;
2543 }
2544
2545 /* Libunwind callback accessor function for top-level rse registers. */
2546 static int
2547 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2548 unw_word_t *val, int write, void *arg)
2549 {
2550 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2551 unw_word_t bsp, sof, cfm, psr, ip;
2552 struct regcache *regcache = (struct regcache *) arg;
2553 struct gdbarch *gdbarch = regcache->arch ();
2554
2555 /* We never call any libunwind routines that need to write registers. */
2556 gdb_assert (!write);
2557
2558 switch (uw_regnum)
2559 {
2560 case UNW_REG_IP:
2561 /* Libunwind expects to see the pc value which means the slot number
2562 from the psr must be merged with the ip word address. */
2563 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2564 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2565 *val = ip | ((psr >> 41) & 0x3);
2566 break;
2567
2568 case UNW_IA64_AR_BSP:
2569 /* Libunwind expects to see the beginning of the current
2570 register frame so we must account for the fact that
2571 ptrace() will return a value for bsp that points *after*
2572 the current register frame. */
2573 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2574 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2575 sof = (cfm & 0x7f);
2576 *val = ia64_rse_skip_regs (bsp, -sof);
2577 break;
2578
2579 case UNW_IA64_AR_BSPSTORE:
2580 /* Libunwind wants bspstore to be after the current register frame.
2581 This is what ptrace() and gdb treats as the regular bsp value. */
2582 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2583 break;
2584
2585 default:
2586 /* For all other registers, just unwind the value directly. */
2587 regcache_cooked_read_unsigned (regcache, regnum, val);
2588 break;
2589 }
2590
2591 if (gdbarch_debug >= 1)
2592 fprintf_unfiltered (gdb_stdlog,
2593 " access_rse_reg: from cache: %4s=%s\n",
2594 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2595 ? ia64_register_names[regnum] : "r??"),
2596 paddress (gdbarch, *val));
2597
2598 return 0;
2599 }
2600
2601 /* Libunwind callback accessor function for top-level fp registers. */
2602 static int
2603 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2604 unw_fpreg_t *val, int write, void *arg)
2605 {
2606 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2607 struct regcache *regcache = (struct regcache *) arg;
2608
2609 /* We never call any libunwind routines that need to write registers. */
2610 gdb_assert (!write);
2611
2612 regcache->cooked_read (regnum, (gdb_byte *) val);
2613
2614 return 0;
2615 }
2616
2617 /* Libunwind callback accessor function for accessing memory. */
2618 static int
2619 ia64_access_mem (unw_addr_space_t as,
2620 unw_word_t addr, unw_word_t *val,
2621 int write, void *arg)
2622 {
2623 if (addr - KERNEL_START < ktab_size)
2624 {
2625 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2626 + (addr - KERNEL_START));
2627
2628 if (write)
2629 *laddr = *val;
2630 else
2631 *val = *laddr;
2632 return 0;
2633 }
2634
2635 /* XXX do we need to normalize byte-order here? */
2636 if (write)
2637 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2638 else
2639 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2640 }
2641
2642 /* Call low-level function to access the kernel unwind table. */
2643 static gdb::optional<gdb::byte_vector>
2644 getunwind_table ()
2645 {
2646 /* FIXME drow/2005-09-10: This code used to call
2647 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2648 for the currently running ia64-linux kernel. That data should
2649 come from the core file and be accessed via the auxv vector; if
2650 we want to preserve fall back to the running kernel's table, then
2651 we should find a way to override the corefile layer's
2652 xfer_partial method. */
2653
2654 return target_read_alloc (current_top_target (), TARGET_OBJECT_UNWIND_TABLE,
2655 NULL);
2656 }
2657
2658 /* Get the kernel unwind table. */
2659 static int
2660 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2661 {
2662 static struct ia64_table_entry *etab;
2663
2664 if (!ktab)
2665 {
2666 ktab_buf = getunwind_table ();
2667 if (!ktab_buf)
2668 return -UNW_ENOINFO;
2669
2670 ktab = (struct ia64_table_entry *) ktab_buf->data ();
2671 ktab_size = ktab_buf->size ();
2672
2673 for (etab = ktab; etab->start_offset; ++etab)
2674 etab->info_offset += KERNEL_START;
2675 }
2676
2677 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2678 return -UNW_ENOINFO;
2679
2680 di->format = UNW_INFO_FORMAT_TABLE;
2681 di->gp = 0;
2682 di->start_ip = ktab[0].start_offset;
2683 di->end_ip = etab[-1].end_offset;
2684 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2685 di->u.ti.segbase = 0;
2686 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2687 di->u.ti.table_data = (unw_word_t *) ktab;
2688
2689 if (gdbarch_debug >= 1)
2690 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2691 "segbase=%s, length=%s, gp=%s\n",
2692 (char *) di->u.ti.name_ptr,
2693 hex_string (di->u.ti.segbase),
2694 pulongest (di->u.ti.table_len),
2695 hex_string (di->gp));
2696 return 0;
2697 }
2698
2699 /* Find the unwind table entry for a specified address. */
2700 static int
2701 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2702 unw_dyn_info_t *dip, void **buf)
2703 {
2704 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2705 Elf_Internal_Ehdr *ehdr;
2706 unw_word_t segbase = 0;
2707 CORE_ADDR load_base;
2708 bfd *bfd;
2709 int i;
2710
2711 bfd = objfile->obfd;
2712
2713 ehdr = elf_tdata (bfd)->elf_header;
2714 phdr = elf_tdata (bfd)->phdr;
2715
2716 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2717
2718 for (i = 0; i < ehdr->e_phnum; ++i)
2719 {
2720 switch (phdr[i].p_type)
2721 {
2722 case PT_LOAD:
2723 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2724 < phdr[i].p_memsz)
2725 p_text = phdr + i;
2726 break;
2727
2728 case PT_IA_64_UNWIND:
2729 p_unwind = phdr + i;
2730 break;
2731
2732 default:
2733 break;
2734 }
2735 }
2736
2737 if (!p_text || !p_unwind)
2738 return -UNW_ENOINFO;
2739
2740 /* Verify that the segment that contains the IP also contains
2741 the static unwind table. If not, we may be in the Linux kernel's
2742 DSO gate page in which case the unwind table is another segment.
2743 Otherwise, we are dealing with runtime-generated code, for which we
2744 have no info here. */
2745 segbase = p_text->p_vaddr + load_base;
2746
2747 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2748 {
2749 int ok = 0;
2750 for (i = 0; i < ehdr->e_phnum; ++i)
2751 {
2752 if (phdr[i].p_type == PT_LOAD
2753 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2754 {
2755 ok = 1;
2756 /* Get the segbase from the section containing the
2757 libunwind table. */
2758 segbase = phdr[i].p_vaddr + load_base;
2759 }
2760 }
2761 if (!ok)
2762 return -UNW_ENOINFO;
2763 }
2764
2765 dip->start_ip = p_text->p_vaddr + load_base;
2766 dip->end_ip = dip->start_ip + p_text->p_memsz;
2767 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2768 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2769 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2770 dip->u.rti.segbase = segbase;
2771 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2772 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2773
2774 return 0;
2775 }
2776
2777 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2778 static int
2779 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2780 int need_unwind_info, void *arg)
2781 {
2782 struct obj_section *sec = find_pc_section (ip);
2783 unw_dyn_info_t di;
2784 int ret;
2785 void *buf = NULL;
2786
2787 if (!sec)
2788 {
2789 /* XXX This only works if the host and the target architecture are
2790 both ia64 and if the have (more or less) the same kernel
2791 version. */
2792 if (get_kernel_table (ip, &di) < 0)
2793 return -UNW_ENOINFO;
2794
2795 if (gdbarch_debug >= 1)
2796 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2797 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2798 "length=%s,data=%s)\n",
2799 hex_string (ip), (char *)di.u.ti.name_ptr,
2800 hex_string (di.u.ti.segbase),
2801 hex_string (di.start_ip), hex_string (di.end_ip),
2802 hex_string (di.gp),
2803 pulongest (di.u.ti.table_len),
2804 hex_string ((CORE_ADDR)di.u.ti.table_data));
2805 }
2806 else
2807 {
2808 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2809 if (ret < 0)
2810 return ret;
2811
2812 if (gdbarch_debug >= 1)
2813 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2814 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2815 "length=%s,data=%s)\n",
2816 hex_string (ip), (char *)di.u.rti.name_ptr,
2817 hex_string (di.u.rti.segbase),
2818 hex_string (di.start_ip), hex_string (di.end_ip),
2819 hex_string (di.gp),
2820 pulongest (di.u.rti.table_len),
2821 hex_string (di.u.rti.table_data));
2822 }
2823
2824 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2825 arg);
2826
2827 /* We no longer need the dyn info storage so free it. */
2828 xfree (buf);
2829
2830 return ret;
2831 }
2832
2833 /* Libunwind callback accessor function for cleanup. */
2834 static void
2835 ia64_put_unwind_info (unw_addr_space_t as,
2836 unw_proc_info_t *pip, void *arg)
2837 {
2838 /* Nothing required for now. */
2839 }
2840
2841 /* Libunwind callback accessor function to get head of the dynamic
2842 unwind-info registration list. */
2843 static int
2844 ia64_get_dyn_info_list (unw_addr_space_t as,
2845 unw_word_t *dilap, void *arg)
2846 {
2847 struct obj_section *text_sec;
2848 struct objfile *objfile;
2849 unw_word_t ip, addr;
2850 unw_dyn_info_t di;
2851 int ret;
2852
2853 if (!libunwind_is_initialized ())
2854 return -UNW_ENOINFO;
2855
2856 for (objfile = object_files; objfile; objfile = objfile->next)
2857 {
2858 void *buf = NULL;
2859
2860 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2861 ip = obj_section_addr (text_sec);
2862 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2863 if (ret >= 0)
2864 {
2865 addr = libunwind_find_dyn_list (as, &di, arg);
2866 /* We no longer need the dyn info storage so free it. */
2867 xfree (buf);
2868
2869 if (addr)
2870 {
2871 if (gdbarch_debug >= 1)
2872 fprintf_unfiltered (gdb_stdlog,
2873 "dynamic unwind table in objfile %s "
2874 "at %s (gp=%s)\n",
2875 bfd_get_filename (objfile->obfd),
2876 hex_string (addr), hex_string (di.gp));
2877 *dilap = addr;
2878 return 0;
2879 }
2880 }
2881 }
2882 return -UNW_ENOINFO;
2883 }
2884
2885
2886 /* Frame interface functions for libunwind. */
2887
2888 static void
2889 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2890 struct frame_id *this_id)
2891 {
2892 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2893 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2894 struct frame_id id = outer_frame_id;
2895 gdb_byte buf[8];
2896 CORE_ADDR bsp;
2897
2898 libunwind_frame_this_id (this_frame, this_cache, &id);
2899 if (frame_id_eq (id, outer_frame_id))
2900 {
2901 (*this_id) = outer_frame_id;
2902 return;
2903 }
2904
2905 /* We must add the bsp as the special address for frame comparison
2906 purposes. */
2907 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2908 bsp = extract_unsigned_integer (buf, 8, byte_order);
2909
2910 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2911
2912 if (gdbarch_debug >= 1)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "libunwind frame id: code %s, stack %s, "
2915 "special %s, this_frame %s\n",
2916 paddress (gdbarch, id.code_addr),
2917 paddress (gdbarch, id.stack_addr),
2918 paddress (gdbarch, bsp),
2919 host_address_to_string (this_frame));
2920 }
2921
2922 static struct value *
2923 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2924 void **this_cache, int regnum)
2925 {
2926 int reg = regnum;
2927 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2928 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2929 struct value *val;
2930
2931 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2932 reg = IA64_PR_REGNUM;
2933 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2934 reg = IA64_UNAT_REGNUM;
2935
2936 /* Let libunwind do most of the work. */
2937 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2938
2939 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2940 {
2941 ULONGEST prN_val;
2942
2943 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2944 {
2945 int rrb_pr = 0;
2946 ULONGEST cfm;
2947
2948 /* Fetch predicate register rename base from current frame
2949 marker for this frame. */
2950 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2951 rrb_pr = (cfm >> 32) & 0x3f;
2952
2953 /* Adjust the register number to account for register rotation. */
2954 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2955 }
2956 prN_val = extract_bit_field (value_contents_all (val),
2957 regnum - VP0_REGNUM, 1);
2958 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2959 }
2960
2961 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2962 {
2963 ULONGEST unatN_val;
2964
2965 unatN_val = extract_bit_field (value_contents_all (val),
2966 regnum - IA64_NAT0_REGNUM, 1);
2967 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2968 }
2969
2970 else if (regnum == IA64_BSP_REGNUM)
2971 {
2972 struct value *cfm_val;
2973 CORE_ADDR prev_bsp, prev_cfm;
2974
2975 /* We want to calculate the previous bsp as the end of the previous
2976 register stack frame. This corresponds to what the hardware bsp
2977 register will be if we pop the frame back which is why we might
2978 have been called. We know that libunwind will pass us back the
2979 beginning of the current frame so we should just add sof to it. */
2980 prev_bsp = extract_unsigned_integer (value_contents_all (val),
2981 8, byte_order);
2982 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2983 IA64_CFM_REGNUM);
2984 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
2985 8, byte_order);
2986 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2987
2988 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2989 }
2990 else
2991 return val;
2992 }
2993
2994 static int
2995 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
2996 struct frame_info *this_frame,
2997 void **this_cache)
2998 {
2999 if (libunwind_is_initialized ()
3000 && libunwind_frame_sniffer (self, this_frame, this_cache))
3001 return 1;
3002
3003 return 0;
3004 }
3005
3006 static const struct frame_unwind ia64_libunwind_frame_unwind =
3007 {
3008 NORMAL_FRAME,
3009 default_frame_unwind_stop_reason,
3010 ia64_libunwind_frame_this_id,
3011 ia64_libunwind_frame_prev_register,
3012 NULL,
3013 ia64_libunwind_frame_sniffer,
3014 libunwind_frame_dealloc_cache
3015 };
3016
3017 static void
3018 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3019 void **this_cache,
3020 struct frame_id *this_id)
3021 {
3022 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3023 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3024 gdb_byte buf[8];
3025 CORE_ADDR bsp;
3026 struct frame_id id = outer_frame_id;
3027
3028 libunwind_frame_this_id (this_frame, this_cache, &id);
3029 if (frame_id_eq (id, outer_frame_id))
3030 {
3031 (*this_id) = outer_frame_id;
3032 return;
3033 }
3034
3035 /* We must add the bsp as the special address for frame comparison
3036 purposes. */
3037 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3038 bsp = extract_unsigned_integer (buf, 8, byte_order);
3039
3040 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3041 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3042
3043 if (gdbarch_debug >= 1)
3044 fprintf_unfiltered (gdb_stdlog,
3045 "libunwind sigtramp frame id: code %s, "
3046 "stack %s, special %s, this_frame %s\n",
3047 paddress (gdbarch, id.code_addr),
3048 paddress (gdbarch, id.stack_addr),
3049 paddress (gdbarch, bsp),
3050 host_address_to_string (this_frame));
3051 }
3052
3053 static struct value *
3054 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3055 void **this_cache, int regnum)
3056 {
3057 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3058 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3059 struct value *prev_ip_val;
3060 CORE_ADDR prev_ip;
3061
3062 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3063 method of getting previous registers. */
3064 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3065 IA64_IP_REGNUM);
3066 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3067 8, byte_order);
3068
3069 if (prev_ip == 0)
3070 {
3071 void *tmp_cache = NULL;
3072 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3073 regnum);
3074 }
3075 else
3076 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3077 }
3078
3079 static int
3080 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3081 struct frame_info *this_frame,
3082 void **this_cache)
3083 {
3084 if (libunwind_is_initialized ())
3085 {
3086 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3087 return 1;
3088 return 0;
3089 }
3090 else
3091 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3092 }
3093
3094 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3095 {
3096 SIGTRAMP_FRAME,
3097 default_frame_unwind_stop_reason,
3098 ia64_libunwind_sigtramp_frame_this_id,
3099 ia64_libunwind_sigtramp_frame_prev_register,
3100 NULL,
3101 ia64_libunwind_sigtramp_frame_sniffer
3102 };
3103
3104 /* Set of libunwind callback acccessor functions. */
3105 unw_accessors_t ia64_unw_accessors =
3106 {
3107 ia64_find_proc_info_x,
3108 ia64_put_unwind_info,
3109 ia64_get_dyn_info_list,
3110 ia64_access_mem,
3111 ia64_access_reg,
3112 ia64_access_fpreg,
3113 /* resume */
3114 /* get_proc_name */
3115 };
3116
3117 /* Set of special libunwind callback acccessor functions specific for accessing
3118 the rse registers. At the top of the stack, we want libunwind to figure out
3119 how to read r32 - r127. Though usually they are found sequentially in
3120 memory starting from $bof, this is not always true. */
3121 unw_accessors_t ia64_unw_rse_accessors =
3122 {
3123 ia64_find_proc_info_x,
3124 ia64_put_unwind_info,
3125 ia64_get_dyn_info_list,
3126 ia64_access_mem,
3127 ia64_access_rse_reg,
3128 ia64_access_rse_fpreg,
3129 /* resume */
3130 /* get_proc_name */
3131 };
3132
3133 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3134 ia64-libunwind-tdep code to use. */
3135 struct libunwind_descr ia64_libunwind_descr =
3136 {
3137 ia64_gdb2uw_regnum,
3138 ia64_uw2gdb_regnum,
3139 ia64_is_fpreg,
3140 &ia64_unw_accessors,
3141 &ia64_unw_rse_accessors,
3142 };
3143
3144 #endif /* HAVE_LIBUNWIND_IA64_H */
3145
3146 static int
3147 ia64_use_struct_convention (struct type *type)
3148 {
3149 struct type *float_elt_type;
3150
3151 /* Don't use the struct convention for anything but structure,
3152 union, or array types. */
3153 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3154 || TYPE_CODE (type) == TYPE_CODE_UNION
3155 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3156 return 0;
3157
3158 /* HFAs are structures (or arrays) consisting entirely of floating
3159 point values of the same length. Up to 8 of these are returned
3160 in registers. Don't use the struct convention when this is the
3161 case. */
3162 float_elt_type = is_float_or_hfa_type (type);
3163 if (float_elt_type != NULL
3164 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3165 return 0;
3166
3167 /* Other structs of length 32 or less are returned in r8-r11.
3168 Don't use the struct convention for those either. */
3169 return TYPE_LENGTH (type) > 32;
3170 }
3171
3172 /* Return non-zero if TYPE is a structure or union type. */
3173
3174 static int
3175 ia64_struct_type_p (const struct type *type)
3176 {
3177 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3178 || TYPE_CODE (type) == TYPE_CODE_UNION);
3179 }
3180
3181 static void
3182 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3183 gdb_byte *valbuf)
3184 {
3185 struct gdbarch *gdbarch = regcache->arch ();
3186 struct type *float_elt_type;
3187
3188 float_elt_type = is_float_or_hfa_type (type);
3189 if (float_elt_type != NULL)
3190 {
3191 gdb_byte from[IA64_FP_REGISTER_SIZE];
3192 int offset = 0;
3193 int regnum = IA64_FR8_REGNUM;
3194 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3195
3196 while (n-- > 0)
3197 {
3198 regcache->cooked_read (regnum, from);
3199 target_float_convert (from, ia64_ext_type (gdbarch),
3200 valbuf + offset, float_elt_type);
3201 offset += TYPE_LENGTH (float_elt_type);
3202 regnum++;
3203 }
3204 }
3205 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3206 {
3207 /* This is an integral value, and its size is less than 8 bytes.
3208 These values are LSB-aligned, so extract the relevant bytes,
3209 and copy them into VALBUF. */
3210 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3211 so I suppose we should also add handling here for integral values
3212 whose size is greater than 8. But I wasn't able to create such
3213 a type, neither in C nor in Ada, so not worrying about these yet. */
3214 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3215 ULONGEST val;
3216
3217 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3218 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3219 }
3220 else
3221 {
3222 ULONGEST val;
3223 int offset = 0;
3224 int regnum = IA64_GR8_REGNUM;
3225 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3226 int n = TYPE_LENGTH (type) / reglen;
3227 int m = TYPE_LENGTH (type) % reglen;
3228
3229 while (n-- > 0)
3230 {
3231 ULONGEST regval;
3232 regcache_cooked_read_unsigned (regcache, regnum, &regval);
3233 memcpy ((char *)valbuf + offset, &regval, reglen);
3234 offset += reglen;
3235 regnum++;
3236 }
3237
3238 if (m)
3239 {
3240 regcache_cooked_read_unsigned (regcache, regnum, &val);
3241 memcpy ((char *)valbuf + offset, &val, m);
3242 }
3243 }
3244 }
3245
3246 static void
3247 ia64_store_return_value (struct type *type, struct regcache *regcache,
3248 const gdb_byte *valbuf)
3249 {
3250 struct gdbarch *gdbarch = regcache->arch ();
3251 struct type *float_elt_type;
3252
3253 float_elt_type = is_float_or_hfa_type (type);
3254 if (float_elt_type != NULL)
3255 {
3256 gdb_byte to[IA64_FP_REGISTER_SIZE];
3257 int offset = 0;
3258 int regnum = IA64_FR8_REGNUM;
3259 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3260
3261 while (n-- > 0)
3262 {
3263 target_float_convert (valbuf + offset, float_elt_type,
3264 to, ia64_ext_type (gdbarch));
3265 regcache->cooked_write (regnum, to);
3266 offset += TYPE_LENGTH (float_elt_type);
3267 regnum++;
3268 }
3269 }
3270 else
3271 {
3272 int offset = 0;
3273 int regnum = IA64_GR8_REGNUM;
3274 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3275 int n = TYPE_LENGTH (type) / reglen;
3276 int m = TYPE_LENGTH (type) % reglen;
3277
3278 while (n-- > 0)
3279 {
3280 ULONGEST val;
3281 memcpy (&val, (char *)valbuf + offset, reglen);
3282 regcache_cooked_write_unsigned (regcache, regnum, val);
3283 offset += reglen;
3284 regnum++;
3285 }
3286
3287 if (m)
3288 {
3289 ULONGEST val;
3290 memcpy (&val, (char *)valbuf + offset, m);
3291 regcache_cooked_write_unsigned (regcache, regnum, val);
3292 }
3293 }
3294 }
3295
3296 static enum return_value_convention
3297 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3298 struct type *valtype, struct regcache *regcache,
3299 gdb_byte *readbuf, const gdb_byte *writebuf)
3300 {
3301 int struct_return = ia64_use_struct_convention (valtype);
3302
3303 if (writebuf != NULL)
3304 {
3305 gdb_assert (!struct_return);
3306 ia64_store_return_value (valtype, regcache, writebuf);
3307 }
3308
3309 if (readbuf != NULL)
3310 {
3311 gdb_assert (!struct_return);
3312 ia64_extract_return_value (valtype, regcache, readbuf);
3313 }
3314
3315 if (struct_return)
3316 return RETURN_VALUE_STRUCT_CONVENTION;
3317 else
3318 return RETURN_VALUE_REGISTER_CONVENTION;
3319 }
3320
3321 static int
3322 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3323 {
3324 switch (TYPE_CODE (t))
3325 {
3326 case TYPE_CODE_FLT:
3327 if (*etp)
3328 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3329 else
3330 {
3331 *etp = t;
3332 return 1;
3333 }
3334 break;
3335 case TYPE_CODE_ARRAY:
3336 return
3337 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3338 etp);
3339 break;
3340 case TYPE_CODE_STRUCT:
3341 {
3342 int i;
3343
3344 for (i = 0; i < TYPE_NFIELDS (t); i++)
3345 if (!is_float_or_hfa_type_recurse
3346 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3347 return 0;
3348 return 1;
3349 }
3350 break;
3351 default:
3352 return 0;
3353 break;
3354 }
3355 }
3356
3357 /* Determine if the given type is one of the floating point types or
3358 and HFA (which is a struct, array, or combination thereof whose
3359 bottom-most elements are all of the same floating point type). */
3360
3361 static struct type *
3362 is_float_or_hfa_type (struct type *t)
3363 {
3364 struct type *et = 0;
3365
3366 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3367 }
3368
3369
3370 /* Return 1 if the alignment of T is such that the next even slot
3371 should be used. Return 0, if the next available slot should
3372 be used. (See section 8.5.1 of the IA-64 Software Conventions
3373 and Runtime manual). */
3374
3375 static int
3376 slot_alignment_is_next_even (struct type *t)
3377 {
3378 switch (TYPE_CODE (t))
3379 {
3380 case TYPE_CODE_INT:
3381 case TYPE_CODE_FLT:
3382 if (TYPE_LENGTH (t) > 8)
3383 return 1;
3384 else
3385 return 0;
3386 case TYPE_CODE_ARRAY:
3387 return
3388 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3389 case TYPE_CODE_STRUCT:
3390 {
3391 int i;
3392
3393 for (i = 0; i < TYPE_NFIELDS (t); i++)
3394 if (slot_alignment_is_next_even
3395 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3396 return 1;
3397 return 0;
3398 }
3399 default:
3400 return 0;
3401 }
3402 }
3403
3404 /* Attempt to find (and return) the global pointer for the given
3405 function.
3406
3407 This is a rather nasty bit of code searchs for the .dynamic section
3408 in the objfile corresponding to the pc of the function we're trying
3409 to call. Once it finds the addresses at which the .dynamic section
3410 lives in the child process, it scans the Elf64_Dyn entries for a
3411 DT_PLTGOT tag. If it finds one of these, the corresponding
3412 d_un.d_ptr value is the global pointer. */
3413
3414 static CORE_ADDR
3415 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3416 CORE_ADDR faddr)
3417 {
3418 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3419 struct obj_section *faddr_sect;
3420
3421 faddr_sect = find_pc_section (faddr);
3422 if (faddr_sect != NULL)
3423 {
3424 struct obj_section *osect;
3425
3426 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3427 {
3428 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3429 break;
3430 }
3431
3432 if (osect < faddr_sect->objfile->sections_end)
3433 {
3434 CORE_ADDR addr, endaddr;
3435
3436 addr = obj_section_addr (osect);
3437 endaddr = obj_section_endaddr (osect);
3438
3439 while (addr < endaddr)
3440 {
3441 int status;
3442 LONGEST tag;
3443 gdb_byte buf[8];
3444
3445 status = target_read_memory (addr, buf, sizeof (buf));
3446 if (status != 0)
3447 break;
3448 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3449
3450 if (tag == DT_PLTGOT)
3451 {
3452 CORE_ADDR global_pointer;
3453
3454 status = target_read_memory (addr + 8, buf, sizeof (buf));
3455 if (status != 0)
3456 break;
3457 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3458 byte_order);
3459
3460 /* The payoff... */
3461 return global_pointer;
3462 }
3463
3464 if (tag == DT_NULL)
3465 break;
3466
3467 addr += 16;
3468 }
3469 }
3470 }
3471 return 0;
3472 }
3473
3474 /* Attempt to find (and return) the global pointer for the given
3475 function. We first try the find_global_pointer_from_solib routine
3476 from the gdbarch tdep vector, if provided. And if that does not
3477 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3478
3479 static CORE_ADDR
3480 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3481 {
3482 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3483 CORE_ADDR addr = 0;
3484
3485 if (tdep->find_global_pointer_from_solib)
3486 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3487 if (addr == 0)
3488 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3489 return addr;
3490 }
3491
3492 /* Given a function's address, attempt to find (and return) the
3493 corresponding (canonical) function descriptor. Return 0 if
3494 not found. */
3495 static CORE_ADDR
3496 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3497 {
3498 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3499 struct obj_section *faddr_sect;
3500
3501 /* Return early if faddr is already a function descriptor. */
3502 faddr_sect = find_pc_section (faddr);
3503 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3504 return faddr;
3505
3506 if (faddr_sect != NULL)
3507 {
3508 struct obj_section *osect;
3509 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3510 {
3511 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3512 break;
3513 }
3514
3515 if (osect < faddr_sect->objfile->sections_end)
3516 {
3517 CORE_ADDR addr, endaddr;
3518
3519 addr = obj_section_addr (osect);
3520 endaddr = obj_section_endaddr (osect);
3521
3522 while (addr < endaddr)
3523 {
3524 int status;
3525 LONGEST faddr2;
3526 gdb_byte buf[8];
3527
3528 status = target_read_memory (addr, buf, sizeof (buf));
3529 if (status != 0)
3530 break;
3531 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3532
3533 if (faddr == faddr2)
3534 return addr;
3535
3536 addr += 16;
3537 }
3538 }
3539 }
3540 return 0;
3541 }
3542
3543 /* Attempt to find a function descriptor corresponding to the
3544 given address. If none is found, construct one on the
3545 stack using the address at fdaptr. */
3546
3547 static CORE_ADDR
3548 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3549 {
3550 struct gdbarch *gdbarch = regcache->arch ();
3551 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3552 CORE_ADDR fdesc;
3553
3554 fdesc = find_extant_func_descr (gdbarch, faddr);
3555
3556 if (fdesc == 0)
3557 {
3558 ULONGEST global_pointer;
3559 gdb_byte buf[16];
3560
3561 fdesc = *fdaptr;
3562 *fdaptr += 16;
3563
3564 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3565
3566 if (global_pointer == 0)
3567 regcache_cooked_read_unsigned (regcache,
3568 IA64_GR1_REGNUM, &global_pointer);
3569
3570 store_unsigned_integer (buf, 8, byte_order, faddr);
3571 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3572
3573 write_memory (fdesc, buf, 16);
3574 }
3575
3576 return fdesc;
3577 }
3578
3579 /* Use the following routine when printing out function pointers
3580 so the user can see the function address rather than just the
3581 function descriptor. */
3582 static CORE_ADDR
3583 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3584 struct target_ops *targ)
3585 {
3586 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3587 struct obj_section *s;
3588 gdb_byte buf[8];
3589
3590 s = find_pc_section (addr);
3591
3592 /* check if ADDR points to a function descriptor. */
3593 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3594 return read_memory_unsigned_integer (addr, 8, byte_order);
3595
3596 /* Normally, functions live inside a section that is executable.
3597 So, if ADDR points to a non-executable section, then treat it
3598 as a function descriptor and return the target address iff
3599 the target address itself points to a section that is executable.
3600 Check first the memory of the whole length of 8 bytes is readable. */
3601 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3602 && target_read_memory (addr, buf, 8) == 0)
3603 {
3604 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3605 struct obj_section *pc_section = find_pc_section (pc);
3606
3607 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3608 return pc;
3609 }
3610
3611 /* There are also descriptors embedded in vtables. */
3612 if (s)
3613 {
3614 struct bound_minimal_symbol minsym;
3615
3616 minsym = lookup_minimal_symbol_by_pc (addr);
3617
3618 if (minsym.minsym
3619 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3620 return read_memory_unsigned_integer (addr, 8, byte_order);
3621 }
3622
3623 return addr;
3624 }
3625
3626 static CORE_ADDR
3627 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3628 {
3629 return sp & ~0xfLL;
3630 }
3631
3632 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3633
3634 static void
3635 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3636 {
3637 ULONGEST cfm, pfs, new_bsp;
3638
3639 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3640
3641 new_bsp = rse_address_add (bsp, sof);
3642 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3643
3644 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3645 pfs &= 0xc000000000000000LL;
3646 pfs |= (cfm & 0xffffffffffffLL);
3647 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3648
3649 cfm &= 0xc000000000000000LL;
3650 cfm |= sof;
3651 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3652 }
3653
3654 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3655 ia64. */
3656
3657 static void
3658 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3659 int slotnum, gdb_byte *buf)
3660 {
3661 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3662 }
3663
3664 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3665
3666 static void
3667 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3668 {
3669 /* Nothing needed. */
3670 }
3671
3672 static CORE_ADDR
3673 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3674 struct regcache *regcache, CORE_ADDR bp_addr,
3675 int nargs, struct value **args, CORE_ADDR sp,
3676 function_call_return_method return_method,
3677 CORE_ADDR struct_addr)
3678 {
3679 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3680 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3681 int argno;
3682 struct value *arg;
3683 struct type *type;
3684 int len, argoffset;
3685 int nslots, rseslots, memslots, slotnum, nfuncargs;
3686 int floatreg;
3687 ULONGEST bsp;
3688 CORE_ADDR funcdescaddr, global_pointer;
3689 CORE_ADDR func_addr = find_function_addr (function, NULL);
3690
3691 nslots = 0;
3692 nfuncargs = 0;
3693 /* Count the number of slots needed for the arguments. */
3694 for (argno = 0; argno < nargs; argno++)
3695 {
3696 arg = args[argno];
3697 type = check_typedef (value_type (arg));
3698 len = TYPE_LENGTH (type);
3699
3700 if ((nslots & 1) && slot_alignment_is_next_even (type))
3701 nslots++;
3702
3703 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3704 nfuncargs++;
3705
3706 nslots += (len + 7) / 8;
3707 }
3708
3709 /* Divvy up the slots between the RSE and the memory stack. */
3710 rseslots = (nslots > 8) ? 8 : nslots;
3711 memslots = nslots - rseslots;
3712
3713 /* Allocate a new RSE frame. */
3714 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3715 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3716
3717 /* We will attempt to find function descriptors in the .opd segment,
3718 but if we can't we'll construct them ourselves. That being the
3719 case, we'll need to reserve space on the stack for them. */
3720 funcdescaddr = sp - nfuncargs * 16;
3721 funcdescaddr &= ~0xfLL;
3722
3723 /* Adjust the stack pointer to it's new value. The calling conventions
3724 require us to have 16 bytes of scratch, plus whatever space is
3725 necessary for the memory slots and our function descriptors. */
3726 sp = sp - 16 - (memslots + nfuncargs) * 8;
3727 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3728
3729 /* Place the arguments where they belong. The arguments will be
3730 either placed in the RSE backing store or on the memory stack.
3731 In addition, floating point arguments or HFAs are placed in
3732 floating point registers. */
3733 slotnum = 0;
3734 floatreg = IA64_FR8_REGNUM;
3735 for (argno = 0; argno < nargs; argno++)
3736 {
3737 struct type *float_elt_type;
3738
3739 arg = args[argno];
3740 type = check_typedef (value_type (arg));
3741 len = TYPE_LENGTH (type);
3742
3743 /* Special handling for function parameters. */
3744 if (len == 8
3745 && TYPE_CODE (type) == TYPE_CODE_PTR
3746 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3747 {
3748 gdb_byte val_buf[8];
3749 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3750 8, byte_order);
3751 store_unsigned_integer (val_buf, 8, byte_order,
3752 find_func_descr (regcache, faddr,
3753 &funcdescaddr));
3754 if (slotnum < rseslots)
3755 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3756 slotnum, val_buf);
3757 else
3758 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3759 slotnum++;
3760 continue;
3761 }
3762
3763 /* Normal slots. */
3764
3765 /* Skip odd slot if necessary... */
3766 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3767 slotnum++;
3768
3769 argoffset = 0;
3770 while (len > 0)
3771 {
3772 gdb_byte val_buf[8];
3773
3774 memset (val_buf, 0, 8);
3775 if (!ia64_struct_type_p (type) && len < 8)
3776 {
3777 /* Integral types are LSB-aligned, so we have to be careful
3778 to insert the argument on the correct side of the buffer.
3779 This is why we use store_unsigned_integer. */
3780 store_unsigned_integer
3781 (val_buf, 8, byte_order,
3782 extract_unsigned_integer (value_contents (arg), len,
3783 byte_order));
3784 }
3785 else
3786 {
3787 /* This is either an 8bit integral type, or an aggregate.
3788 For 8bit integral type, there is no problem, we just
3789 copy the value over.
3790
3791 For aggregates, the only potentially tricky portion
3792 is to write the last one if it is less than 8 bytes.
3793 In this case, the data is Byte0-aligned. Happy news,
3794 this means that we don't need to differentiate the
3795 handling of 8byte blocks and less-than-8bytes blocks. */
3796 memcpy (val_buf, value_contents (arg) + argoffset,
3797 (len > 8) ? 8 : len);
3798 }
3799
3800 if (slotnum < rseslots)
3801 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3802 slotnum, val_buf);
3803 else
3804 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3805
3806 argoffset += 8;
3807 len -= 8;
3808 slotnum++;
3809 }
3810
3811 /* Handle floating point types (including HFAs). */
3812 float_elt_type = is_float_or_hfa_type (type);
3813 if (float_elt_type != NULL)
3814 {
3815 argoffset = 0;
3816 len = TYPE_LENGTH (type);
3817 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3818 {
3819 gdb_byte to[IA64_FP_REGISTER_SIZE];
3820 target_float_convert (value_contents (arg) + argoffset,
3821 float_elt_type, to,
3822 ia64_ext_type (gdbarch));
3823 regcache->cooked_write (floatreg, to);
3824 floatreg++;
3825 argoffset += TYPE_LENGTH (float_elt_type);
3826 len -= TYPE_LENGTH (float_elt_type);
3827 }
3828 }
3829 }
3830
3831 /* Store the struct return value in r8 if necessary. */
3832 if (return_method == return_method_struct)
3833 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3834 (ULONGEST) struct_addr);
3835
3836 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3837
3838 if (global_pointer != 0)
3839 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3840
3841 /* The following is not necessary on HP-UX, because we're using
3842 a dummy code sequence pushed on the stack to make the call, and
3843 this sequence doesn't need b0 to be set in order for our dummy
3844 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3845 it's needed for other OSes, so we do this unconditionaly. */
3846 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3847
3848 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3849
3850 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3851
3852 return sp;
3853 }
3854
3855 static const struct ia64_infcall_ops ia64_infcall_ops =
3856 {
3857 ia64_allocate_new_rse_frame,
3858 ia64_store_argument_in_slot,
3859 ia64_set_function_addr
3860 };
3861
3862 static struct frame_id
3863 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3864 {
3865 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3866 gdb_byte buf[8];
3867 CORE_ADDR sp, bsp;
3868
3869 get_frame_register (this_frame, sp_regnum, buf);
3870 sp = extract_unsigned_integer (buf, 8, byte_order);
3871
3872 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3873 bsp = extract_unsigned_integer (buf, 8, byte_order);
3874
3875 if (gdbarch_debug >= 1)
3876 fprintf_unfiltered (gdb_stdlog,
3877 "dummy frame id: code %s, stack %s, special %s\n",
3878 paddress (gdbarch, get_frame_pc (this_frame)),
3879 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3880
3881 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3882 }
3883
3884 static CORE_ADDR
3885 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3886 {
3887 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3888 gdb_byte buf[8];
3889 CORE_ADDR ip, psr, pc;
3890
3891 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3892 ip = extract_unsigned_integer (buf, 8, byte_order);
3893 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3894 psr = extract_unsigned_integer (buf, 8, byte_order);
3895
3896 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3897 return pc;
3898 }
3899
3900 static int
3901 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3902 {
3903 info->bytes_per_line = SLOT_MULTIPLIER;
3904 return default_print_insn (memaddr, info);
3905 }
3906
3907 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3908
3909 static int
3910 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3911 {
3912 return (cfm & 0x7f);
3913 }
3914
3915 static struct gdbarch *
3916 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3917 {
3918 struct gdbarch *gdbarch;
3919 struct gdbarch_tdep *tdep;
3920
3921 /* If there is already a candidate, use it. */
3922 arches = gdbarch_list_lookup_by_info (arches, &info);
3923 if (arches != NULL)
3924 return arches->gdbarch;
3925
3926 tdep = XCNEW (struct gdbarch_tdep);
3927 gdbarch = gdbarch_alloc (&info, tdep);
3928
3929 tdep->size_of_register_frame = ia64_size_of_register_frame;
3930
3931 /* According to the ia64 specs, instructions that store long double
3932 floats in memory use a long-double format different than that
3933 used in the floating registers. The memory format matches the
3934 x86 extended float format which is 80 bits. An OS may choose to
3935 use this format (e.g. GNU/Linux) or choose to use a different
3936 format for storing long doubles (e.g. HPUX). In the latter case,
3937 the setting of the format may be moved/overridden in an
3938 OS-specific tdep file. */
3939 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3940
3941 set_gdbarch_short_bit (gdbarch, 16);
3942 set_gdbarch_int_bit (gdbarch, 32);
3943 set_gdbarch_long_bit (gdbarch, 64);
3944 set_gdbarch_long_long_bit (gdbarch, 64);
3945 set_gdbarch_float_bit (gdbarch, 32);
3946 set_gdbarch_double_bit (gdbarch, 64);
3947 set_gdbarch_long_double_bit (gdbarch, 128);
3948 set_gdbarch_ptr_bit (gdbarch, 64);
3949
3950 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3951 set_gdbarch_num_pseudo_regs (gdbarch,
3952 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3953 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3954 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3955
3956 set_gdbarch_register_name (gdbarch, ia64_register_name);
3957 set_gdbarch_register_type (gdbarch, ia64_register_type);
3958
3959 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3960 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3961 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3962 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3963 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3964 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3965 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3966
3967 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3968
3969 set_gdbarch_return_value (gdbarch, ia64_return_value);
3970
3971 set_gdbarch_memory_insert_breakpoint (gdbarch,
3972 ia64_memory_insert_breakpoint);
3973 set_gdbarch_memory_remove_breakpoint (gdbarch,
3974 ia64_memory_remove_breakpoint);
3975 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3976 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3977 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3978 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3979
3980 /* Settings for calling functions in the inferior. */
3981 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3982 tdep->infcall_ops = ia64_infcall_ops;
3983 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3984 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3985
3986 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3987 #ifdef HAVE_LIBUNWIND_IA64_H
3988 frame_unwind_append_unwinder (gdbarch,
3989 &ia64_libunwind_sigtramp_frame_unwind);
3990 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
3991 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3992 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
3993 #else
3994 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3995 #endif
3996 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
3997 frame_base_set_default (gdbarch, &ia64_frame_base);
3998
3999 /* Settings that should be unnecessary. */
4000 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4001
4002 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4003 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4004 ia64_convert_from_func_ptr_addr);
4005
4006 /* The virtual table contains 16-byte descriptors, not pointers to
4007 descriptors. */
4008 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4009
4010 /* Hook in ABI-specific overrides, if they have been registered. */
4011 gdbarch_init_osabi (info, gdbarch);
4012
4013 return gdbarch;
4014 }
4015
4016 void
4017 _initialize_ia64_tdep (void)
4018 {
4019 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4020 }
This page took 0.132935 seconds and 5 git commands to generate.