change minsym representation
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "doublest.h"
32 #include "value.h"
33 #include "gdb_assert.h"
34 #include "objfiles.h"
35 #include "elf/common.h" /* for DT_PLTGOT value */
36 #include "elf-bfd.h"
37 #include "dis-asm.h"
38 #include "infcall.h"
39 #include "osabi.h"
40 #include "ia64-tdep.h"
41 #include "cp-abi.h"
42
43 #ifdef HAVE_LIBUNWIND_IA64_H
44 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
45 #include "ia64-libunwind-tdep.h"
46
47 /* Note: KERNEL_START is supposed to be an address which is not going
48 to ever contain any valid unwind info. For ia64 linux, the choice
49 of 0xc000000000000000 is fairly safe since that's uncached space.
50
51 We use KERNEL_START as follows: after obtaining the kernel's
52 unwind table via getunwind(), we project its unwind data into
53 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
54 when ia64_access_mem() sees a memory access to this
55 address-range, we redirect it to ktab instead.
56
57 None of this hackery is needed with a modern kernel/libcs
58 which uses the kernel virtual DSO to provide access to the
59 kernel's unwind info. In that case, ktab_size remains 0 and
60 hence the value of KERNEL_START doesn't matter. */
61
62 #define KERNEL_START 0xc000000000000000ULL
63
64 static size_t ktab_size = 0;
65 struct ia64_table_entry
66 {
67 uint64_t start_offset;
68 uint64_t end_offset;
69 uint64_t info_offset;
70 };
71
72 static struct ia64_table_entry *ktab = NULL;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 typedef enum instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 } instruction_type;
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 static int sp_regnum = IA64_GR12_REGNUM;
130 static int fp_regnum = IA64_VFP_REGNUM;
131 static int lr_regnum = IA64_VRAP_REGNUM;
132
133 /* NOTE: we treat the register stack registers r32-r127 as
134 pseudo-registers because they may not be accessible via the ptrace
135 register get/set interfaces. */
136
137 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
138 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
139 V127_REGNUM = V32_REGNUM + 95,
140 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
141 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
142
143 /* Array of register names; There should be ia64_num_regs strings in
144 the initializer. */
145
146 static char *ia64_register_names[] =
147 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
148 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
149 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
150 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
151 "", "", "", "", "", "", "", "",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163
164 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
165 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
166 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
167 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
168 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
169 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
170 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
171 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
172 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
173 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
174 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
175 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
176 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
177 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
178 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
179 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
180
181 "", "", "", "", "", "", "", "",
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189
190 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
191
192 "vfp", "vrap",
193
194 "pr", "ip", "psr", "cfm",
195
196 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
197 "", "", "", "", "", "", "", "",
198 "rsc", "bsp", "bspstore", "rnat",
199 "", "fcr", "", "",
200 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
201 "ccv", "", "", "", "unat", "", "", "",
202 "fpsr", "", "", "", "itc",
203 "", "", "", "", "", "", "", "", "", "",
204 "", "", "", "", "", "", "", "", "",
205 "pfs", "lc", "ec",
206 "", "", "", "", "", "", "", "", "", "",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "",
213 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
214 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
215 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
216 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
217 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
218 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
219 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
220 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
221 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
222 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
223 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
224 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
225 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
226 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
227 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
228 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
229
230 "bof",
231
232 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
233 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
234 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
235 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
236 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
237 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
238 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
239 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
240 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
241 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
242 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
243 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
244
245 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
246 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
247 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
248 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
249 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
250 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
251 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
252 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
253 };
254
255 struct ia64_frame_cache
256 {
257 CORE_ADDR base; /* frame pointer base for frame */
258 CORE_ADDR pc; /* function start pc for frame */
259 CORE_ADDR saved_sp; /* stack pointer for frame */
260 CORE_ADDR bsp; /* points at r32 for the current frame */
261 CORE_ADDR cfm; /* cfm value for current frame */
262 CORE_ADDR prev_cfm; /* cfm value for previous frame */
263 int frameless;
264 int sof; /* Size of frame (decoded from cfm value). */
265 int sol; /* Size of locals (decoded from cfm value). */
266 int sor; /* Number of rotating registers (decoded from
267 cfm value). */
268 CORE_ADDR after_prologue;
269 /* Address of first instruction after the last
270 prologue instruction; Note that there may
271 be instructions from the function's body
272 intermingled with the prologue. */
273 int mem_stack_frame_size;
274 /* Size of the memory stack frame (may be zero),
275 or -1 if it has not been determined yet. */
276 int fp_reg; /* Register number (if any) used a frame pointer
277 for this frame. 0 if no register is being used
278 as the frame pointer. */
279
280 /* Saved registers. */
281 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
282
283 };
284
285 static int
286 floatformat_valid (const struct floatformat *fmt, const void *from)
287 {
288 return 1;
289 }
290
291 static const struct floatformat floatformat_ia64_ext_little =
292 {
293 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
294 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
295 };
296
297 static const struct floatformat floatformat_ia64_ext_big =
298 {
299 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
300 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
301 };
302
303 static const struct floatformat *floatformats_ia64_ext[2] =
304 {
305 &floatformat_ia64_ext_big,
306 &floatformat_ia64_ext_little
307 };
308
309 static struct type *
310 ia64_ext_type (struct gdbarch *gdbarch)
311 {
312 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
313
314 if (!tdep->ia64_ext_type)
315 tdep->ia64_ext_type
316 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
317 floatformats_ia64_ext);
318
319 return tdep->ia64_ext_type;
320 }
321
322 static int
323 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
324 struct reggroup *group)
325 {
326 int vector_p;
327 int float_p;
328 int raw_p;
329 if (group == all_reggroup)
330 return 1;
331 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
332 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
333 raw_p = regnum < NUM_IA64_RAW_REGS;
334 if (group == float_reggroup)
335 return float_p;
336 if (group == vector_reggroup)
337 return vector_p;
338 if (group == general_reggroup)
339 return (!vector_p && !float_p);
340 if (group == save_reggroup || group == restore_reggroup)
341 return raw_p;
342 return 0;
343 }
344
345 static const char *
346 ia64_register_name (struct gdbarch *gdbarch, int reg)
347 {
348 return ia64_register_names[reg];
349 }
350
351 struct type *
352 ia64_register_type (struct gdbarch *arch, int reg)
353 {
354 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
355 return ia64_ext_type (arch);
356 else
357 return builtin_type (arch)->builtin_long;
358 }
359
360 static int
361 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
362 {
363 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
364 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
365 return reg;
366 }
367
368
369 /* Extract ``len'' bits from an instruction bundle starting at
370 bit ``from''. */
371
372 static long long
373 extract_bit_field (const gdb_byte *bundle, int from, int len)
374 {
375 long long result = 0LL;
376 int to = from + len;
377 int from_byte = from / 8;
378 int to_byte = to / 8;
379 unsigned char *b = (unsigned char *) bundle;
380 unsigned char c;
381 int lshift;
382 int i;
383
384 c = b[from_byte];
385 if (from_byte == to_byte)
386 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
387 result = c >> (from % 8);
388 lshift = 8 - (from % 8);
389
390 for (i = from_byte+1; i < to_byte; i++)
391 {
392 result |= ((long long) b[i]) << lshift;
393 lshift += 8;
394 }
395
396 if (from_byte < to_byte && (to % 8 != 0))
397 {
398 c = b[to_byte];
399 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
400 result |= ((long long) c) << lshift;
401 }
402
403 return result;
404 }
405
406 /* Replace the specified bits in an instruction bundle. */
407
408 static void
409 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
410 {
411 int to = from + len;
412 int from_byte = from / 8;
413 int to_byte = to / 8;
414 unsigned char *b = (unsigned char *) bundle;
415 unsigned char c;
416
417 if (from_byte == to_byte)
418 {
419 unsigned char left, right;
420 c = b[from_byte];
421 left = (c >> (to % 8)) << (to % 8);
422 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
423 c = (unsigned char) (val & 0xff);
424 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
425 c |= right | left;
426 b[from_byte] = c;
427 }
428 else
429 {
430 int i;
431 c = b[from_byte];
432 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
433 c = c | (val << (from % 8));
434 b[from_byte] = c;
435 val >>= 8 - from % 8;
436
437 for (i = from_byte+1; i < to_byte; i++)
438 {
439 c = val & 0xff;
440 val >>= 8;
441 b[i] = c;
442 }
443
444 if (to % 8 != 0)
445 {
446 unsigned char cv = (unsigned char) val;
447 c = b[to_byte];
448 c = c >> (to % 8) << (to % 8);
449 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
450 b[to_byte] = c;
451 }
452 }
453 }
454
455 /* Return the contents of slot N (for N = 0, 1, or 2) in
456 and instruction bundle. */
457
458 static long long
459 slotN_contents (gdb_byte *bundle, int slotnum)
460 {
461 return extract_bit_field (bundle, 5+41*slotnum, 41);
462 }
463
464 /* Store an instruction in an instruction bundle. */
465
466 static void
467 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
468 {
469 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
470 }
471
472 static const enum instruction_type template_encoding_table[32][3] =
473 {
474 { M, I, I }, /* 00 */
475 { M, I, I }, /* 01 */
476 { M, I, I }, /* 02 */
477 { M, I, I }, /* 03 */
478 { M, L, X }, /* 04 */
479 { M, L, X }, /* 05 */
480 { undefined, undefined, undefined }, /* 06 */
481 { undefined, undefined, undefined }, /* 07 */
482 { M, M, I }, /* 08 */
483 { M, M, I }, /* 09 */
484 { M, M, I }, /* 0A */
485 { M, M, I }, /* 0B */
486 { M, F, I }, /* 0C */
487 { M, F, I }, /* 0D */
488 { M, M, F }, /* 0E */
489 { M, M, F }, /* 0F */
490 { M, I, B }, /* 10 */
491 { M, I, B }, /* 11 */
492 { M, B, B }, /* 12 */
493 { M, B, B }, /* 13 */
494 { undefined, undefined, undefined }, /* 14 */
495 { undefined, undefined, undefined }, /* 15 */
496 { B, B, B }, /* 16 */
497 { B, B, B }, /* 17 */
498 { M, M, B }, /* 18 */
499 { M, M, B }, /* 19 */
500 { undefined, undefined, undefined }, /* 1A */
501 { undefined, undefined, undefined }, /* 1B */
502 { M, F, B }, /* 1C */
503 { M, F, B }, /* 1D */
504 { undefined, undefined, undefined }, /* 1E */
505 { undefined, undefined, undefined }, /* 1F */
506 };
507
508 /* Fetch and (partially) decode an instruction at ADDR and return the
509 address of the next instruction to fetch. */
510
511 static CORE_ADDR
512 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
513 {
514 gdb_byte bundle[BUNDLE_LEN];
515 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
516 long long template;
517 int val;
518
519 /* Warn about slot numbers greater than 2. We used to generate
520 an error here on the assumption that the user entered an invalid
521 address. But, sometimes GDB itself requests an invalid address.
522 This can (easily) happen when execution stops in a function for
523 which there are no symbols. The prologue scanner will attempt to
524 find the beginning of the function - if the nearest symbol
525 happens to not be aligned on a bundle boundary (16 bytes), the
526 resulting starting address will cause GDB to think that the slot
527 number is too large.
528
529 So we warn about it and set the slot number to zero. It is
530 not necessarily a fatal condition, particularly if debugging
531 at the assembly language level. */
532 if (slotnum > 2)
533 {
534 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
535 "Using slot 0 instead"));
536 slotnum = 0;
537 }
538
539 addr &= ~0x0f;
540
541 val = target_read_memory (addr, bundle, BUNDLE_LEN);
542
543 if (val != 0)
544 return 0;
545
546 *instr = slotN_contents (bundle, slotnum);
547 template = extract_bit_field (bundle, 0, 5);
548 *it = template_encoding_table[(int)template][slotnum];
549
550 if (slotnum == 2 || (slotnum == 1 && *it == L))
551 addr += 16;
552 else
553 addr += (slotnum + 1) * SLOT_MULTIPLIER;
554
555 return addr;
556 }
557
558 /* There are 5 different break instructions (break.i, break.b,
559 break.m, break.f, and break.x), but they all have the same
560 encoding. (The five bit template in the low five bits of the
561 instruction bundle distinguishes one from another.)
562
563 The runtime architecture manual specifies that break instructions
564 used for debugging purposes must have the upper two bits of the 21
565 bit immediate set to a 0 and a 1 respectively. A breakpoint
566 instruction encodes the most significant bit of its 21 bit
567 immediate at bit 36 of the 41 bit instruction. The penultimate msb
568 is at bit 25 which leads to the pattern below.
569
570 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
571 it turns out that 0x80000 was used as the syscall break in the early
572 simulators. So I changed the pattern slightly to do "break.i 0x080001"
573 instead. But that didn't work either (I later found out that this
574 pattern was used by the simulator that I was using.) So I ended up
575 using the pattern seen below.
576
577 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
578 while we need bit-based addressing as the instructions length is 41 bits and
579 we must not modify/corrupt the adjacent slots in the same bundle.
580 Fortunately we may store larger memory incl. the adjacent bits with the
581 original memory content (not the possibly already stored breakpoints there).
582 We need to be careful in ia64_memory_remove_breakpoint to always restore
583 only the specific bits of this instruction ignoring any adjacent stored
584 bits.
585
586 We use the original addressing with the low nibble in the range <0..2> which
587 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
588 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
589 bytes just without these two possibly skipped bytes to not to exceed to the
590 next bundle.
591
592 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
593 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
594 In such case there is no other place where to store
595 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
596 SLOTNUM in ia64_memory_remove_breakpoint.
597
598 There is one special case where we need to be extra careful:
599 L-X instructions, which are instructions that occupy 2 slots
600 (The L part is always in slot 1, and the X part is always in
601 slot 2). We must refuse to insert breakpoints for an address
602 that points at slot 2 of a bundle where an L-X instruction is
603 present, since there is logically no instruction at that address.
604 However, to make things more interesting, the opcode of L-X
605 instructions is located in slot 2. This means that, to insert
606 a breakpoint at an address that points to slot 1, we actually
607 need to write the breakpoint in slot 2! Slot 1 is actually
608 the extended operand, so writing the breakpoint there would not
609 have the desired effect. Another side-effect of this issue
610 is that we need to make sure that the shadow contents buffer
611 does save byte 15 of our instruction bundle (this is the tail
612 end of slot 2, which wouldn't be saved if we were to insert
613 the breakpoint in slot 1).
614
615 ia64 16-byte bundle layout:
616 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
617
618 The current addressing used by the code below:
619 original PC placed_address placed_size required covered
620 == bp_tgt->shadow_len reqd \subset covered
621 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
622 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
623 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
624
625 L-X instructions are treated a little specially, as explained above:
626 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
627
628 `objdump -d' and some other tools show a bit unjustified offsets:
629 original PC byte where starts the instruction objdump offset
630 0xABCDE0 0xABCDE0 0xABCDE0
631 0xABCDE1 0xABCDE5 0xABCDE6
632 0xABCDE2 0xABCDEA 0xABCDEC
633 */
634
635 #define IA64_BREAKPOINT 0x00003333300LL
636
637 static int
638 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
639 struct bp_target_info *bp_tgt)
640 {
641 CORE_ADDR addr = bp_tgt->placed_address;
642 gdb_byte bundle[BUNDLE_LEN];
643 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
644 long long instr_breakpoint;
645 int val;
646 int template;
647 struct cleanup *cleanup;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 cleanup = make_show_memory_breakpoints_cleanup (0);
660 val = target_read_memory (addr, bundle, BUNDLE_LEN);
661 if (val != 0)
662 {
663 do_cleanups (cleanup);
664 return val;
665 }
666
667 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
668 for addressing the SHADOW_CONTENTS placement. */
669 shadow_slotnum = slotnum;
670
671 /* Always cover the last byte of the bundle in case we are inserting
672 a breakpoint on an L-X instruction. */
673 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
674
675 template = extract_bit_field (bundle, 0, 5);
676 if (template_encoding_table[template][slotnum] == X)
677 {
678 /* X unit types can only be used in slot 2, and are actually
679 part of a 2-slot L-X instruction. We cannot break at this
680 address, as this is the second half of an instruction that
681 lives in slot 1 of that bundle. */
682 gdb_assert (slotnum == 2);
683 error (_("Can't insert breakpoint for non-existing slot X"));
684 }
685 if (template_encoding_table[template][slotnum] == L)
686 {
687 /* L unit types can only be used in slot 1. But the associated
688 opcode for that instruction is in slot 2, so bump the slot number
689 accordingly. */
690 gdb_assert (slotnum == 1);
691 slotnum = 2;
692 }
693
694 /* Store the whole bundle, except for the initial skipped bytes by the slot
695 number interpreted as bytes offset in PLACED_ADDRESS. */
696 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
697 bp_tgt->shadow_len);
698
699 /* Re-read the same bundle as above except that, this time, read it in order
700 to compute the new bundle inside which we will be inserting the
701 breakpoint. Therefore, disable the automatic memory restoration from
702 breakpoints while we read our instruction bundle. Otherwise, the general
703 restoration mechanism kicks in and we would possibly remove parts of the
704 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
705 the real breakpoint instruction bits region. */
706 make_show_memory_breakpoints_cleanup (1);
707 val = target_read_memory (addr, bundle, BUNDLE_LEN);
708 if (val != 0)
709 {
710 do_cleanups (cleanup);
711 return val;
712 }
713
714 /* Breakpoints already present in the code will get deteacted and not get
715 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
716 location cannot induce the internal error as they are optimized into
717 a single instance by update_global_location_list. */
718 instr_breakpoint = slotN_contents (bundle, slotnum);
719 if (instr_breakpoint == IA64_BREAKPOINT)
720 internal_error (__FILE__, __LINE__,
721 _("Address %s already contains a breakpoint."),
722 paddress (gdbarch, bp_tgt->placed_address));
723 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
724
725 bp_tgt->placed_size = bp_tgt->shadow_len;
726
727 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
728 bp_tgt->shadow_len);
729
730 do_cleanups (cleanup);
731 return val;
732 }
733
734 static int
735 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
736 struct bp_target_info *bp_tgt)
737 {
738 CORE_ADDR addr = bp_tgt->placed_address;
739 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
740 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
741 long long instr_breakpoint, instr_saved;
742 int val;
743 int template;
744 struct cleanup *cleanup;
745
746 addr &= ~0x0f;
747
748 /* Disable the automatic memory restoration from breakpoints while
749 we read our instruction bundle. Otherwise, the general restoration
750 mechanism kicks in and we would possibly remove parts of the adjacent
751 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
752 breakpoint instruction bits region. */
753 cleanup = make_show_memory_breakpoints_cleanup (1);
754 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
755 if (val != 0)
756 {
757 do_cleanups (cleanup);
758 return val;
759 }
760
761 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
762 for addressing the SHADOW_CONTENTS placement. */
763 shadow_slotnum = slotnum;
764
765 template = extract_bit_field (bundle_mem, 0, 5);
766 if (template_encoding_table[template][slotnum] == X)
767 {
768 /* X unit types can only be used in slot 2, and are actually
769 part of a 2-slot L-X instruction. We refuse to insert
770 breakpoints at this address, so there should be no reason
771 for us attempting to remove one there, except if the program's
772 code somehow got modified in memory. */
773 gdb_assert (slotnum == 2);
774 warning (_("Cannot remove breakpoint at address %s from non-existing "
775 "X-type slot, memory has changed underneath"),
776 paddress (gdbarch, bp_tgt->placed_address));
777 do_cleanups (cleanup);
778 return -1;
779 }
780 if (template_encoding_table[template][slotnum] == L)
781 {
782 /* L unit types can only be used in slot 1. But the breakpoint
783 was actually saved using slot 2, so update the slot number
784 accordingly. */
785 gdb_assert (slotnum == 1);
786 slotnum = 2;
787 }
788
789 gdb_assert (bp_tgt->placed_size == BUNDLE_LEN - shadow_slotnum);
790 gdb_assert (bp_tgt->placed_size == bp_tgt->shadow_len);
791
792 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
793 if (instr_breakpoint != IA64_BREAKPOINT)
794 {
795 warning (_("Cannot remove breakpoint at address %s, "
796 "no break instruction at such address."),
797 paddress (gdbarch, bp_tgt->placed_address));
798 do_cleanups (cleanup);
799 return -1;
800 }
801
802 /* Extract the original saved instruction from SLOTNUM normalizing its
803 bit-shift for INSTR_SAVED. */
804 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
805 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
806 bp_tgt->shadow_len);
807 instr_saved = slotN_contents (bundle_saved, slotnum);
808
809 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
810 and not any of the other ones that are stored in SHADOW_CONTENTS. */
811 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
812 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
813
814 do_cleanups (cleanup);
815 return val;
816 }
817
818 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
819 instruction slots ranges are bit-granular (41 bits) we have to provide an
820 extended range as described for ia64_memory_insert_breakpoint. We also take
821 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
822 make a match for permanent breakpoints. */
823
824 static const gdb_byte *
825 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
826 CORE_ADDR *pcptr, int *lenptr)
827 {
828 CORE_ADDR addr = *pcptr;
829 static gdb_byte bundle[BUNDLE_LEN];
830 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
831 long long instr_fetched;
832 int val;
833 int template;
834 struct cleanup *cleanup;
835
836 if (slotnum > 2)
837 error (_("Can't insert breakpoint for slot numbers greater than 2."));
838
839 addr &= ~0x0f;
840
841 /* Enable the automatic memory restoration from breakpoints while
842 we read our instruction bundle to match bp_loc_is_permanent. */
843 cleanup = make_show_memory_breakpoints_cleanup (0);
844 val = target_read_memory (addr, bundle, BUNDLE_LEN);
845 do_cleanups (cleanup);
846
847 /* The memory might be unreachable. This can happen, for instance,
848 when the user inserts a breakpoint at an invalid address. */
849 if (val != 0)
850 return NULL;
851
852 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
853 for addressing the SHADOW_CONTENTS placement. */
854 shadow_slotnum = slotnum;
855
856 /* Cover always the last byte of the bundle for the L-X slot case. */
857 *lenptr = BUNDLE_LEN - shadow_slotnum;
858
859 /* Check for L type instruction in slot 1, if present then bump up the slot
860 number to the slot 2. */
861 template = extract_bit_field (bundle, 0, 5);
862 if (template_encoding_table[template][slotnum] == X)
863 {
864 gdb_assert (slotnum == 2);
865 error (_("Can't insert breakpoint for non-existing slot X"));
866 }
867 if (template_encoding_table[template][slotnum] == L)
868 {
869 gdb_assert (slotnum == 1);
870 slotnum = 2;
871 }
872
873 /* A break instruction has its all its opcode bits cleared except for
874 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
875 we should not touch the L slot - the upper 41 bits of the parameter. */
876 instr_fetched = slotN_contents (bundle, slotnum);
877 instr_fetched &= 0x1003ffffc0LL;
878 replace_slotN_contents (bundle, instr_fetched, slotnum);
879
880 return bundle + shadow_slotnum;
881 }
882
883 static CORE_ADDR
884 ia64_read_pc (struct regcache *regcache)
885 {
886 ULONGEST psr_value, pc_value;
887 int slot_num;
888
889 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
890 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
891 slot_num = (psr_value >> 41) & 3;
892
893 return pc_value | (slot_num * SLOT_MULTIPLIER);
894 }
895
896 void
897 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
898 {
899 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
900 ULONGEST psr_value;
901
902 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
903 psr_value &= ~(3LL << 41);
904 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
905
906 new_pc &= ~0xfLL;
907
908 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
909 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
910 }
911
912 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
913
914 /* Returns the address of the slot that's NSLOTS slots away from
915 the address ADDR. NSLOTS may be positive or negative. */
916 static CORE_ADDR
917 rse_address_add(CORE_ADDR addr, int nslots)
918 {
919 CORE_ADDR new_addr;
920 int mandatory_nat_slots = nslots / 63;
921 int direction = nslots < 0 ? -1 : 1;
922
923 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
924
925 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
926 new_addr += 8 * direction;
927
928 if (IS_NaT_COLLECTION_ADDR(new_addr))
929 new_addr += 8 * direction;
930
931 return new_addr;
932 }
933
934 static enum register_status
935 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
936 int regnum, gdb_byte *buf)
937 {
938 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
939 enum register_status status;
940
941 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
942 {
943 #ifdef HAVE_LIBUNWIND_IA64_H
944 /* First try and use the libunwind special reg accessor,
945 otherwise fallback to standard logic. */
946 if (!libunwind_is_initialized ()
947 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
948 #endif
949 {
950 /* The fallback position is to assume that r32-r127 are
951 found sequentially in memory starting at $bof. This
952 isn't always true, but without libunwind, this is the
953 best we can do. */
954 enum register_status status;
955 ULONGEST cfm;
956 ULONGEST bsp;
957 CORE_ADDR reg;
958
959 status = regcache_cooked_read_unsigned (regcache,
960 IA64_BSP_REGNUM, &bsp);
961 if (status != REG_VALID)
962 return status;
963
964 status = regcache_cooked_read_unsigned (regcache,
965 IA64_CFM_REGNUM, &cfm);
966 if (status != REG_VALID)
967 return status;
968
969 /* The bsp points at the end of the register frame so we
970 subtract the size of frame from it to get start of
971 register frame. */
972 bsp = rse_address_add (bsp, -(cfm & 0x7f));
973
974 if ((cfm & 0x7f) > regnum - V32_REGNUM)
975 {
976 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
977 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
978 store_unsigned_integer (buf, register_size (gdbarch, regnum),
979 byte_order, reg);
980 }
981 else
982 store_unsigned_integer (buf, register_size (gdbarch, regnum),
983 byte_order, 0);
984 }
985 }
986 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
987 {
988 ULONGEST unatN_val;
989 ULONGEST unat;
990 status = regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
991 if (status != REG_VALID)
992 return status;
993 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
994 store_unsigned_integer (buf, register_size (gdbarch, regnum),
995 byte_order, unatN_val);
996 }
997 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
998 {
999 ULONGEST natN_val = 0;
1000 ULONGEST bsp;
1001 ULONGEST cfm;
1002 CORE_ADDR gr_addr = 0;
1003 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1004 if (status != REG_VALID)
1005 return status;
1006 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1007 if (status != REG_VALID)
1008 return status;
1009
1010 /* The bsp points at the end of the register frame so we
1011 subtract the size of frame from it to get start of register frame. */
1012 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1013
1014 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1015 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1016
1017 if (gr_addr != 0)
1018 {
1019 /* Compute address of nat collection bits. */
1020 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1021 CORE_ADDR nat_collection;
1022 int nat_bit;
1023 /* If our nat collection address is bigger than bsp, we have to get
1024 the nat collection from rnat. Otherwise, we fetch the nat
1025 collection from the computed address. */
1026 if (nat_addr >= bsp)
1027 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1028 &nat_collection);
1029 else
1030 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1031 nat_bit = (gr_addr >> 3) & 0x3f;
1032 natN_val = (nat_collection >> nat_bit) & 1;
1033 }
1034
1035 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1036 byte_order, natN_val);
1037 }
1038 else if (regnum == VBOF_REGNUM)
1039 {
1040 /* A virtual register frame start is provided for user convenience.
1041 It can be calculated as the bsp - sof (sizeof frame). */
1042 ULONGEST bsp, vbsp;
1043 ULONGEST cfm;
1044 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1045 if (status != REG_VALID)
1046 return status;
1047 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1048 if (status != REG_VALID)
1049 return status;
1050
1051 /* The bsp points at the end of the register frame so we
1052 subtract the size of frame from it to get beginning of frame. */
1053 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1054 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1055 byte_order, vbsp);
1056 }
1057 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1058 {
1059 ULONGEST pr;
1060 ULONGEST cfm;
1061 ULONGEST prN_val;
1062 status = regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1063 if (status != REG_VALID)
1064 return status;
1065 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1066 if (status != REG_VALID)
1067 return status;
1068
1069 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1070 {
1071 /* Fetch predicate register rename base from current frame
1072 marker for this frame. */
1073 int rrb_pr = (cfm >> 32) & 0x3f;
1074
1075 /* Adjust the register number to account for register rotation. */
1076 regnum = VP16_REGNUM
1077 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1078 }
1079 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1080 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1081 byte_order, prN_val);
1082 }
1083 else
1084 memset (buf, 0, register_size (gdbarch, regnum));
1085
1086 return REG_VALID;
1087 }
1088
1089 static void
1090 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1091 int regnum, const gdb_byte *buf)
1092 {
1093 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1094
1095 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1096 {
1097 ULONGEST bsp;
1098 ULONGEST cfm;
1099 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1100 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1101
1102 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1103
1104 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1105 {
1106 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1107 write_memory (reg_addr, (void *) buf, 8);
1108 }
1109 }
1110 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1111 {
1112 ULONGEST unatN_val, unat, unatN_mask;
1113 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1114 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1115 regnum),
1116 byte_order);
1117 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1118 if (unatN_val == 0)
1119 unat &= ~unatN_mask;
1120 else if (unatN_val == 1)
1121 unat |= unatN_mask;
1122 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1123 }
1124 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1125 {
1126 ULONGEST natN_val;
1127 ULONGEST bsp;
1128 ULONGEST cfm;
1129 CORE_ADDR gr_addr = 0;
1130 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1131 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1132
1133 /* The bsp points at the end of the register frame so we
1134 subtract the size of frame from it to get start of register frame. */
1135 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1136
1137 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1138 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1139
1140 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1141 regnum),
1142 byte_order);
1143
1144 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1145 {
1146 /* Compute address of nat collection bits. */
1147 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1148 CORE_ADDR nat_collection;
1149 int natN_bit = (gr_addr >> 3) & 0x3f;
1150 ULONGEST natN_mask = (1LL << natN_bit);
1151 /* If our nat collection address is bigger than bsp, we have to get
1152 the nat collection from rnat. Otherwise, we fetch the nat
1153 collection from the computed address. */
1154 if (nat_addr >= bsp)
1155 {
1156 regcache_cooked_read_unsigned (regcache,
1157 IA64_RNAT_REGNUM,
1158 &nat_collection);
1159 if (natN_val)
1160 nat_collection |= natN_mask;
1161 else
1162 nat_collection &= ~natN_mask;
1163 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1164 nat_collection);
1165 }
1166 else
1167 {
1168 gdb_byte nat_buf[8];
1169 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1170 if (natN_val)
1171 nat_collection |= natN_mask;
1172 else
1173 nat_collection &= ~natN_mask;
1174 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1175 byte_order, nat_collection);
1176 write_memory (nat_addr, nat_buf, 8);
1177 }
1178 }
1179 }
1180 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1181 {
1182 ULONGEST pr;
1183 ULONGEST cfm;
1184 ULONGEST prN_val;
1185 ULONGEST prN_mask;
1186
1187 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1188 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1189
1190 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1191 {
1192 /* Fetch predicate register rename base from current frame
1193 marker for this frame. */
1194 int rrb_pr = (cfm >> 32) & 0x3f;
1195
1196 /* Adjust the register number to account for register rotation. */
1197 regnum = VP16_REGNUM
1198 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1199 }
1200 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1201 byte_order);
1202 prN_mask = (1LL << (regnum - VP0_REGNUM));
1203 if (prN_val == 0)
1204 pr &= ~prN_mask;
1205 else if (prN_val == 1)
1206 pr |= prN_mask;
1207 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1208 }
1209 }
1210
1211 /* The ia64 needs to convert between various ieee floating-point formats
1212 and the special ia64 floating point register format. */
1213
1214 static int
1215 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1216 {
1217 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1218 && type != ia64_ext_type (gdbarch));
1219 }
1220
1221 static int
1222 ia64_register_to_value (struct frame_info *frame, int regnum,
1223 struct type *valtype, gdb_byte *out,
1224 int *optimizedp, int *unavailablep)
1225 {
1226 struct gdbarch *gdbarch = get_frame_arch (frame);
1227 gdb_byte in[MAX_REGISTER_SIZE];
1228
1229 /* Convert to TYPE. */
1230 if (!get_frame_register_bytes (frame, regnum, 0,
1231 register_size (gdbarch, regnum),
1232 in, optimizedp, unavailablep))
1233 return 0;
1234
1235 convert_typed_floating (in, ia64_ext_type (gdbarch), out, valtype);
1236 *optimizedp = *unavailablep = 0;
1237 return 1;
1238 }
1239
1240 static void
1241 ia64_value_to_register (struct frame_info *frame, int regnum,
1242 struct type *valtype, const gdb_byte *in)
1243 {
1244 struct gdbarch *gdbarch = get_frame_arch (frame);
1245 gdb_byte out[MAX_REGISTER_SIZE];
1246 convert_typed_floating (in, valtype, out, ia64_ext_type (gdbarch));
1247 put_frame_register (frame, regnum, out);
1248 }
1249
1250
1251 /* Limit the number of skipped non-prologue instructions since examining
1252 of the prologue is expensive. */
1253 static int max_skip_non_prologue_insns = 40;
1254
1255 /* Given PC representing the starting address of a function, and
1256 LIM_PC which is the (sloppy) limit to which to scan when looking
1257 for a prologue, attempt to further refine this limit by using
1258 the line data in the symbol table. If successful, a better guess
1259 on where the prologue ends is returned, otherwise the previous
1260 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1261 which will be set to indicate whether the returned limit may be
1262 used with no further scanning in the event that the function is
1263 frameless. */
1264
1265 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1266 superseded by skip_prologue_using_sal. */
1267
1268 static CORE_ADDR
1269 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1270 {
1271 struct symtab_and_line prologue_sal;
1272 CORE_ADDR start_pc = pc;
1273 CORE_ADDR end_pc;
1274
1275 /* The prologue can not possibly go past the function end itself,
1276 so we can already adjust LIM_PC accordingly. */
1277 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1278 lim_pc = end_pc;
1279
1280 /* Start off not trusting the limit. */
1281 *trust_limit = 0;
1282
1283 prologue_sal = find_pc_line (pc, 0);
1284 if (prologue_sal.line != 0)
1285 {
1286 int i;
1287 CORE_ADDR addr = prologue_sal.end;
1288
1289 /* Handle the case in which compiler's optimizer/scheduler
1290 has moved instructions into the prologue. We scan ahead
1291 in the function looking for address ranges whose corresponding
1292 line number is less than or equal to the first one that we
1293 found for the function. (It can be less than when the
1294 scheduler puts a body instruction before the first prologue
1295 instruction.) */
1296 for (i = 2 * max_skip_non_prologue_insns;
1297 i > 0 && (lim_pc == 0 || addr < lim_pc);
1298 i--)
1299 {
1300 struct symtab_and_line sal;
1301
1302 sal = find_pc_line (addr, 0);
1303 if (sal.line == 0)
1304 break;
1305 if (sal.line <= prologue_sal.line
1306 && sal.symtab == prologue_sal.symtab)
1307 {
1308 prologue_sal = sal;
1309 }
1310 addr = sal.end;
1311 }
1312
1313 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1314 {
1315 lim_pc = prologue_sal.end;
1316 if (start_pc == get_pc_function_start (lim_pc))
1317 *trust_limit = 1;
1318 }
1319 }
1320 return lim_pc;
1321 }
1322
1323 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1324 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1325 || (14 <= (_regnum_) && (_regnum_) <= 31))
1326 #define imm9(_instr_) \
1327 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1328 | (((_instr_) & 0x00008000000LL) >> 20) \
1329 | (((_instr_) & 0x00000001fc0LL) >> 6))
1330
1331 /* Allocate and initialize a frame cache. */
1332
1333 static struct ia64_frame_cache *
1334 ia64_alloc_frame_cache (void)
1335 {
1336 struct ia64_frame_cache *cache;
1337 int i;
1338
1339 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1340
1341 /* Base address. */
1342 cache->base = 0;
1343 cache->pc = 0;
1344 cache->cfm = 0;
1345 cache->prev_cfm = 0;
1346 cache->sof = 0;
1347 cache->sol = 0;
1348 cache->sor = 0;
1349 cache->bsp = 0;
1350 cache->fp_reg = 0;
1351 cache->frameless = 1;
1352
1353 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1354 cache->saved_regs[i] = 0;
1355
1356 return cache;
1357 }
1358
1359 static CORE_ADDR
1360 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1361 struct frame_info *this_frame,
1362 struct ia64_frame_cache *cache)
1363 {
1364 CORE_ADDR next_pc;
1365 CORE_ADDR last_prologue_pc = pc;
1366 instruction_type it;
1367 long long instr;
1368 int cfm_reg = 0;
1369 int ret_reg = 0;
1370 int fp_reg = 0;
1371 int unat_save_reg = 0;
1372 int pr_save_reg = 0;
1373 int mem_stack_frame_size = 0;
1374 int spill_reg = 0;
1375 CORE_ADDR spill_addr = 0;
1376 char instores[8];
1377 char infpstores[8];
1378 char reg_contents[256];
1379 int trust_limit;
1380 int frameless = 1;
1381 int i;
1382 CORE_ADDR addr;
1383 gdb_byte buf[8];
1384 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1385
1386 memset (instores, 0, sizeof instores);
1387 memset (infpstores, 0, sizeof infpstores);
1388 memset (reg_contents, 0, sizeof reg_contents);
1389
1390 if (cache->after_prologue != 0
1391 && cache->after_prologue <= lim_pc)
1392 return cache->after_prologue;
1393
1394 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1395 next_pc = fetch_instruction (pc, &it, &instr);
1396
1397 /* We want to check if we have a recognizable function start before we
1398 look ahead for a prologue. */
1399 if (pc < lim_pc && next_pc
1400 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1401 {
1402 /* alloc - start of a regular function. */
1403 int sor = (int) ((instr & 0x00078000000LL) >> 27);
1404 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1405 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1406 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1407
1408 /* Verify that the current cfm matches what we think is the
1409 function start. If we have somehow jumped within a function,
1410 we do not want to interpret the prologue and calculate the
1411 addresses of various registers such as the return address.
1412 We will instead treat the frame as frameless. */
1413 if (!this_frame ||
1414 (sof == (cache->cfm & 0x7f) &&
1415 sol == ((cache->cfm >> 7) & 0x7f)))
1416 frameless = 0;
1417
1418 cfm_reg = rN;
1419 last_prologue_pc = next_pc;
1420 pc = next_pc;
1421 }
1422 else
1423 {
1424 /* Look for a leaf routine. */
1425 if (pc < lim_pc && next_pc
1426 && (it == I || it == M)
1427 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1428 {
1429 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1430 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1431 | ((instr & 0x001f8000000LL) >> 20)
1432 | ((instr & 0x000000fe000LL) >> 13));
1433 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1434 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1435 int qp = (int) (instr & 0x0000000003fLL);
1436 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1437 {
1438 /* mov r2, r12 - beginning of leaf routine. */
1439 fp_reg = rN;
1440 last_prologue_pc = next_pc;
1441 }
1442 }
1443
1444 /* If we don't recognize a regular function or leaf routine, we are
1445 done. */
1446 if (!fp_reg)
1447 {
1448 pc = lim_pc;
1449 if (trust_limit)
1450 last_prologue_pc = lim_pc;
1451 }
1452 }
1453
1454 /* Loop, looking for prologue instructions, keeping track of
1455 where preserved registers were spilled. */
1456 while (pc < lim_pc)
1457 {
1458 next_pc = fetch_instruction (pc, &it, &instr);
1459 if (next_pc == 0)
1460 break;
1461
1462 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1463 {
1464 /* Exit loop upon hitting a non-nop branch instruction. */
1465 if (trust_limit)
1466 lim_pc = pc;
1467 break;
1468 }
1469 else if (((instr & 0x3fLL) != 0LL) &&
1470 (frameless || ret_reg != 0))
1471 {
1472 /* Exit loop upon hitting a predicated instruction if
1473 we already have the return register or if we are frameless. */
1474 if (trust_limit)
1475 lim_pc = pc;
1476 break;
1477 }
1478 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1479 {
1480 /* Move from BR */
1481 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1482 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1483 int qp = (int) (instr & 0x0000000003f);
1484
1485 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1486 {
1487 ret_reg = rN;
1488 last_prologue_pc = next_pc;
1489 }
1490 }
1491 else if ((it == I || it == M)
1492 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1493 {
1494 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1495 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1496 | ((instr & 0x001f8000000LL) >> 20)
1497 | ((instr & 0x000000fe000LL) >> 13));
1498 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1499 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1500 int qp = (int) (instr & 0x0000000003fLL);
1501
1502 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1503 {
1504 /* mov rN, r12 */
1505 fp_reg = rN;
1506 last_prologue_pc = next_pc;
1507 }
1508 else if (qp == 0 && rN == 12 && rM == 12)
1509 {
1510 /* adds r12, -mem_stack_frame_size, r12 */
1511 mem_stack_frame_size -= imm;
1512 last_prologue_pc = next_pc;
1513 }
1514 else if (qp == 0 && rN == 2
1515 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1516 {
1517 gdb_byte buf[MAX_REGISTER_SIZE];
1518 CORE_ADDR saved_sp = 0;
1519 /* adds r2, spilloffset, rFramePointer
1520 or
1521 adds r2, spilloffset, r12
1522
1523 Get ready for stf.spill or st8.spill instructions.
1524 The address to start spilling at is loaded into r2.
1525 FIXME: Why r2? That's what gcc currently uses; it
1526 could well be different for other compilers. */
1527
1528 /* Hmm... whether or not this will work will depend on
1529 where the pc is. If it's still early in the prologue
1530 this'll be wrong. FIXME */
1531 if (this_frame)
1532 {
1533 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1534 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1535 get_frame_register (this_frame, sp_regnum, buf);
1536 saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1537 }
1538 spill_addr = saved_sp
1539 + (rM == 12 ? 0 : mem_stack_frame_size)
1540 + imm;
1541 spill_reg = rN;
1542 last_prologue_pc = next_pc;
1543 }
1544 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1545 rN < 256 && imm == 0)
1546 {
1547 /* mov rN, rM where rM is an input register. */
1548 reg_contents[rN] = rM;
1549 last_prologue_pc = next_pc;
1550 }
1551 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1552 rM == 2)
1553 {
1554 /* mov r12, r2 */
1555 last_prologue_pc = next_pc;
1556 break;
1557 }
1558 }
1559 else if (it == M
1560 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1561 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1562 {
1563 /* stf.spill [rN] = fM, imm9
1564 or
1565 stf.spill [rN] = fM */
1566
1567 int imm = imm9(instr);
1568 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1569 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1570 int qp = (int) (instr & 0x0000000003fLL);
1571 if (qp == 0 && rN == spill_reg && spill_addr != 0
1572 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1573 {
1574 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1575
1576 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1577 spill_addr += imm;
1578 else
1579 spill_addr = 0; /* last one; must be done. */
1580 last_prologue_pc = next_pc;
1581 }
1582 }
1583 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1584 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1585 {
1586 /* mov.m rN = arM
1587 or
1588 mov.i rN = arM */
1589
1590 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1591 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1592 int qp = (int) (instr & 0x0000000003fLL);
1593 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1594 {
1595 /* We have something like "mov.m r3 = ar.unat". Remember the
1596 r3 (or whatever) and watch for a store of this register... */
1597 unat_save_reg = rN;
1598 last_prologue_pc = next_pc;
1599 }
1600 }
1601 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1602 {
1603 /* mov rN = pr */
1604 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1605 int qp = (int) (instr & 0x0000000003fLL);
1606 if (qp == 0 && isScratch (rN))
1607 {
1608 pr_save_reg = rN;
1609 last_prologue_pc = next_pc;
1610 }
1611 }
1612 else if (it == M
1613 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1614 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1615 {
1616 /* st8 [rN] = rM
1617 or
1618 st8 [rN] = rM, imm9 */
1619 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1620 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1621 int qp = (int) (instr & 0x0000000003fLL);
1622 int indirect = rM < 256 ? reg_contents[rM] : 0;
1623 if (qp == 0 && rN == spill_reg && spill_addr != 0
1624 && (rM == unat_save_reg || rM == pr_save_reg))
1625 {
1626 /* We've found a spill of either the UNAT register or the PR
1627 register. (Well, not exactly; what we've actually found is
1628 a spill of the register that UNAT or PR was moved to).
1629 Record that fact and move on... */
1630 if (rM == unat_save_reg)
1631 {
1632 /* Track UNAT register. */
1633 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1634 unat_save_reg = 0;
1635 }
1636 else
1637 {
1638 /* Track PR register. */
1639 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1640 pr_save_reg = 0;
1641 }
1642 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1643 /* st8 [rN] = rM, imm9 */
1644 spill_addr += imm9(instr);
1645 else
1646 spill_addr = 0; /* Must be done spilling. */
1647 last_prologue_pc = next_pc;
1648 }
1649 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1650 {
1651 /* Allow up to one store of each input register. */
1652 instores[rM-32] = 1;
1653 last_prologue_pc = next_pc;
1654 }
1655 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1656 !instores[indirect-32])
1657 {
1658 /* Allow an indirect store of an input register. */
1659 instores[indirect-32] = 1;
1660 last_prologue_pc = next_pc;
1661 }
1662 }
1663 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1664 {
1665 /* One of
1666 st1 [rN] = rM
1667 st2 [rN] = rM
1668 st4 [rN] = rM
1669 st8 [rN] = rM
1670 Note that the st8 case is handled in the clause above.
1671
1672 Advance over stores of input registers. One store per input
1673 register is permitted. */
1674 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1675 int qp = (int) (instr & 0x0000000003fLL);
1676 int indirect = rM < 256 ? reg_contents[rM] : 0;
1677 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1678 {
1679 instores[rM-32] = 1;
1680 last_prologue_pc = next_pc;
1681 }
1682 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1683 !instores[indirect-32])
1684 {
1685 /* Allow an indirect store of an input register. */
1686 instores[indirect-32] = 1;
1687 last_prologue_pc = next_pc;
1688 }
1689 }
1690 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1691 {
1692 /* Either
1693 stfs [rN] = fM
1694 or
1695 stfd [rN] = fM
1696
1697 Advance over stores of floating point input registers. Again
1698 one store per register is permitted. */
1699 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1700 int qp = (int) (instr & 0x0000000003fLL);
1701 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1702 {
1703 infpstores[fM-8] = 1;
1704 last_prologue_pc = next_pc;
1705 }
1706 }
1707 else if (it == M
1708 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1709 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1710 {
1711 /* st8.spill [rN] = rM
1712 or
1713 st8.spill [rN] = rM, imm9 */
1714 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1715 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1716 int qp = (int) (instr & 0x0000000003fLL);
1717 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1718 {
1719 /* We've found a spill of one of the preserved general purpose
1720 regs. Record the spill address and advance the spill
1721 register if appropriate. */
1722 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1723 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1724 /* st8.spill [rN] = rM, imm9 */
1725 spill_addr += imm9(instr);
1726 else
1727 spill_addr = 0; /* Done spilling. */
1728 last_prologue_pc = next_pc;
1729 }
1730 }
1731
1732 pc = next_pc;
1733 }
1734
1735 /* If not frameless and we aren't called by skip_prologue, then we need
1736 to calculate registers for the previous frame which will be needed
1737 later. */
1738
1739 if (!frameless && this_frame)
1740 {
1741 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1742 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1743
1744 /* Extract the size of the rotating portion of the stack
1745 frame and the register rename base from the current
1746 frame marker. */
1747 cfm = cache->cfm;
1748 sor = cache->sor;
1749 sof = cache->sof;
1750 sol = cache->sol;
1751 rrb_gr = (cfm >> 18) & 0x7f;
1752
1753 /* Find the bof (beginning of frame). */
1754 bof = rse_address_add (cache->bsp, -sof);
1755
1756 for (i = 0, addr = bof;
1757 i < sof;
1758 i++, addr += 8)
1759 {
1760 if (IS_NaT_COLLECTION_ADDR (addr))
1761 {
1762 addr += 8;
1763 }
1764 if (i+32 == cfm_reg)
1765 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1766 if (i+32 == ret_reg)
1767 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1768 if (i+32 == fp_reg)
1769 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1770 }
1771
1772 /* For the previous argument registers we require the previous bof.
1773 If we can't find the previous cfm, then we can do nothing. */
1774 cfm = 0;
1775 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1776 {
1777 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1778 8, byte_order);
1779 }
1780 else if (cfm_reg != 0)
1781 {
1782 get_frame_register (this_frame, cfm_reg, buf);
1783 cfm = extract_unsigned_integer (buf, 8, byte_order);
1784 }
1785 cache->prev_cfm = cfm;
1786
1787 if (cfm != 0)
1788 {
1789 sor = ((cfm >> 14) & 0xf) * 8;
1790 sof = (cfm & 0x7f);
1791 sol = (cfm >> 7) & 0x7f;
1792 rrb_gr = (cfm >> 18) & 0x7f;
1793
1794 /* The previous bof only requires subtraction of the sol (size of
1795 locals) due to the overlap between output and input of
1796 subsequent frames. */
1797 bof = rse_address_add (bof, -sol);
1798
1799 for (i = 0, addr = bof;
1800 i < sof;
1801 i++, addr += 8)
1802 {
1803 if (IS_NaT_COLLECTION_ADDR (addr))
1804 {
1805 addr += 8;
1806 }
1807 if (i < sor)
1808 cache->saved_regs[IA64_GR32_REGNUM
1809 + ((i + (sor - rrb_gr)) % sor)]
1810 = addr;
1811 else
1812 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1813 }
1814
1815 }
1816 }
1817
1818 /* Try and trust the lim_pc value whenever possible. */
1819 if (trust_limit && lim_pc >= last_prologue_pc)
1820 last_prologue_pc = lim_pc;
1821
1822 cache->frameless = frameless;
1823 cache->after_prologue = last_prologue_pc;
1824 cache->mem_stack_frame_size = mem_stack_frame_size;
1825 cache->fp_reg = fp_reg;
1826
1827 return last_prologue_pc;
1828 }
1829
1830 CORE_ADDR
1831 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1832 {
1833 struct ia64_frame_cache cache;
1834 cache.base = 0;
1835 cache.after_prologue = 0;
1836 cache.cfm = 0;
1837 cache.bsp = 0;
1838
1839 /* Call examine_prologue with - as third argument since we don't
1840 have a next frame pointer to send. */
1841 return examine_prologue (pc, pc+1024, 0, &cache);
1842 }
1843
1844
1845 /* Normal frames. */
1846
1847 static struct ia64_frame_cache *
1848 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1849 {
1850 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1851 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1852 struct ia64_frame_cache *cache;
1853 gdb_byte buf[8];
1854 CORE_ADDR cfm, psr;
1855
1856 if (*this_cache)
1857 return *this_cache;
1858
1859 cache = ia64_alloc_frame_cache ();
1860 *this_cache = cache;
1861
1862 get_frame_register (this_frame, sp_regnum, buf);
1863 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1864
1865 /* We always want the bsp to point to the end of frame.
1866 This way, we can always get the beginning of frame (bof)
1867 by subtracting frame size. */
1868 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1869 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1870
1871 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1872 psr = extract_unsigned_integer (buf, 8, byte_order);
1873
1874 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1875 cfm = extract_unsigned_integer (buf, 8, byte_order);
1876
1877 cache->sof = (cfm & 0x7f);
1878 cache->sol = (cfm >> 7) & 0x7f;
1879 cache->sor = ((cfm >> 14) & 0xf) * 8;
1880
1881 cache->cfm = cfm;
1882
1883 cache->pc = get_frame_func (this_frame);
1884
1885 if (cache->pc != 0)
1886 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1887
1888 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1889
1890 return cache;
1891 }
1892
1893 static void
1894 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1895 struct frame_id *this_id)
1896 {
1897 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1898 struct ia64_frame_cache *cache =
1899 ia64_frame_cache (this_frame, this_cache);
1900
1901 /* If outermost frame, mark with null frame id. */
1902 if (cache->base != 0)
1903 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1904 if (gdbarch_debug >= 1)
1905 fprintf_unfiltered (gdb_stdlog,
1906 "regular frame id: code %s, stack %s, "
1907 "special %s, this_frame %s\n",
1908 paddress (gdbarch, this_id->code_addr),
1909 paddress (gdbarch, this_id->stack_addr),
1910 paddress (gdbarch, cache->bsp),
1911 host_address_to_string (this_frame));
1912 }
1913
1914 static struct value *
1915 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1916 int regnum)
1917 {
1918 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1919 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1920 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1921 gdb_byte buf[8];
1922
1923 gdb_assert (regnum >= 0);
1924
1925 if (!target_has_registers)
1926 error (_("No registers."));
1927
1928 if (regnum == gdbarch_sp_regnum (gdbarch))
1929 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1930
1931 else if (regnum == IA64_BSP_REGNUM)
1932 {
1933 struct value *val;
1934 CORE_ADDR prev_cfm, bsp, prev_bsp;
1935
1936 /* We want to calculate the previous bsp as the end of the previous
1937 register stack frame. This corresponds to what the hardware bsp
1938 register will be if we pop the frame back which is why we might
1939 have been called. We know the beginning of the current frame is
1940 cache->bsp - cache->sof. This value in the previous frame points
1941 to the start of the output registers. We can calculate the end of
1942 that frame by adding the size of output:
1943 (sof (size of frame) - sol (size of locals)). */
1944 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1945 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1946 8, byte_order);
1947 bsp = rse_address_add (cache->bsp, -(cache->sof));
1948 prev_bsp =
1949 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1950
1951 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1952 }
1953
1954 else if (regnum == IA64_CFM_REGNUM)
1955 {
1956 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1957
1958 if (addr != 0)
1959 return frame_unwind_got_memory (this_frame, regnum, addr);
1960
1961 if (cache->prev_cfm)
1962 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1963
1964 if (cache->frameless)
1965 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1966 IA64_PFS_REGNUM);
1967 return frame_unwind_got_register (this_frame, regnum, 0);
1968 }
1969
1970 else if (regnum == IA64_VFP_REGNUM)
1971 {
1972 /* If the function in question uses an automatic register (r32-r127)
1973 for the frame pointer, it'll be found by ia64_find_saved_register()
1974 above. If the function lacks one of these frame pointers, we can
1975 still provide a value since we know the size of the frame. */
1976 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1977 }
1978
1979 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1980 {
1981 struct value *pr_val;
1982 ULONGEST prN;
1983
1984 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1985 IA64_PR_REGNUM);
1986 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1987 {
1988 /* Fetch predicate register rename base from current frame
1989 marker for this frame. */
1990 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1991
1992 /* Adjust the register number to account for register rotation. */
1993 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1994 }
1995 prN = extract_bit_field (value_contents_all (pr_val),
1996 regnum - VP0_REGNUM, 1);
1997 return frame_unwind_got_constant (this_frame, regnum, prN);
1998 }
1999
2000 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
2001 {
2002 struct value *unat_val;
2003 ULONGEST unatN;
2004 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2005 IA64_UNAT_REGNUM);
2006 unatN = extract_bit_field (value_contents_all (unat_val),
2007 regnum - IA64_NAT0_REGNUM, 1);
2008 return frame_unwind_got_constant (this_frame, regnum, unatN);
2009 }
2010
2011 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2012 {
2013 int natval = 0;
2014 /* Find address of general register corresponding to nat bit we're
2015 interested in. */
2016 CORE_ADDR gr_addr;
2017
2018 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2019
2020 if (gr_addr != 0)
2021 {
2022 /* Compute address of nat collection bits. */
2023 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2024 CORE_ADDR bsp;
2025 CORE_ADDR nat_collection;
2026 int nat_bit;
2027
2028 /* If our nat collection address is bigger than bsp, we have to get
2029 the nat collection from rnat. Otherwise, we fetch the nat
2030 collection from the computed address. */
2031 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2032 bsp = extract_unsigned_integer (buf, 8, byte_order);
2033 if (nat_addr >= bsp)
2034 {
2035 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2036 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2037 }
2038 else
2039 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2040 nat_bit = (gr_addr >> 3) & 0x3f;
2041 natval = (nat_collection >> nat_bit) & 1;
2042 }
2043
2044 return frame_unwind_got_constant (this_frame, regnum, natval);
2045 }
2046
2047 else if (regnum == IA64_IP_REGNUM)
2048 {
2049 CORE_ADDR pc = 0;
2050 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2051
2052 if (addr != 0)
2053 {
2054 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2055 pc = extract_unsigned_integer (buf, 8, byte_order);
2056 }
2057 else if (cache->frameless)
2058 {
2059 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2060 pc = extract_unsigned_integer (buf, 8, byte_order);
2061 }
2062 pc &= ~0xf;
2063 return frame_unwind_got_constant (this_frame, regnum, pc);
2064 }
2065
2066 else if (regnum == IA64_PSR_REGNUM)
2067 {
2068 /* We don't know how to get the complete previous PSR, but we need it
2069 for the slot information when we unwind the pc (pc is formed of IP
2070 register plus slot information from PSR). To get the previous
2071 slot information, we mask it off the return address. */
2072 ULONGEST slot_num = 0;
2073 CORE_ADDR pc = 0;
2074 CORE_ADDR psr = 0;
2075 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2076
2077 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2078 psr = extract_unsigned_integer (buf, 8, byte_order);
2079
2080 if (addr != 0)
2081 {
2082 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2083 pc = extract_unsigned_integer (buf, 8, byte_order);
2084 }
2085 else if (cache->frameless)
2086 {
2087 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2088 pc = extract_unsigned_integer (buf, 8, byte_order);
2089 }
2090 psr &= ~(3LL << 41);
2091 slot_num = pc & 0x3LL;
2092 psr |= (CORE_ADDR)slot_num << 41;
2093 return frame_unwind_got_constant (this_frame, regnum, psr);
2094 }
2095
2096 else if (regnum == IA64_BR0_REGNUM)
2097 {
2098 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2099
2100 if (addr != 0)
2101 return frame_unwind_got_memory (this_frame, regnum, addr);
2102
2103 return frame_unwind_got_constant (this_frame, regnum, 0);
2104 }
2105
2106 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2107 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2108 {
2109 CORE_ADDR addr = 0;
2110
2111 if (regnum >= V32_REGNUM)
2112 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2113 addr = cache->saved_regs[regnum];
2114 if (addr != 0)
2115 return frame_unwind_got_memory (this_frame, regnum, addr);
2116
2117 if (cache->frameless)
2118 {
2119 struct value *reg_val;
2120 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2121
2122 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2123 with the same code above? */
2124 if (regnum >= V32_REGNUM)
2125 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2126 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2127 IA64_CFM_REGNUM);
2128 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2129 8, byte_order);
2130 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2131 IA64_BSP_REGNUM);
2132 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2133 8, byte_order);
2134 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2135
2136 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2137 return frame_unwind_got_memory (this_frame, regnum, addr);
2138 }
2139
2140 return frame_unwind_got_constant (this_frame, regnum, 0);
2141 }
2142
2143 else /* All other registers. */
2144 {
2145 CORE_ADDR addr = 0;
2146
2147 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2148 {
2149 /* Fetch floating point register rename base from current
2150 frame marker for this frame. */
2151 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2152
2153 /* Adjust the floating point register number to account for
2154 register rotation. */
2155 regnum = IA64_FR32_REGNUM
2156 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2157 }
2158
2159 /* If we have stored a memory address, access the register. */
2160 addr = cache->saved_regs[regnum];
2161 if (addr != 0)
2162 return frame_unwind_got_memory (this_frame, regnum, addr);
2163 /* Otherwise, punt and get the current value of the register. */
2164 else
2165 return frame_unwind_got_register (this_frame, regnum, regnum);
2166 }
2167 }
2168
2169 static const struct frame_unwind ia64_frame_unwind =
2170 {
2171 NORMAL_FRAME,
2172 default_frame_unwind_stop_reason,
2173 &ia64_frame_this_id,
2174 &ia64_frame_prev_register,
2175 NULL,
2176 default_frame_sniffer
2177 };
2178
2179 /* Signal trampolines. */
2180
2181 static void
2182 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2183 struct ia64_frame_cache *cache)
2184 {
2185 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2186 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2187
2188 if (tdep->sigcontext_register_address)
2189 {
2190 int regno;
2191
2192 cache->saved_regs[IA64_VRAP_REGNUM]
2193 = tdep->sigcontext_register_address (gdbarch, cache->base,
2194 IA64_IP_REGNUM);
2195 cache->saved_regs[IA64_CFM_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_CFM_REGNUM);
2198 cache->saved_regs[IA64_PSR_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_PSR_REGNUM);
2201 cache->saved_regs[IA64_BSP_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_BSP_REGNUM);
2204 cache->saved_regs[IA64_RNAT_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_RNAT_REGNUM);
2207 cache->saved_regs[IA64_CCV_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_CCV_REGNUM);
2210 cache->saved_regs[IA64_UNAT_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_UNAT_REGNUM);
2213 cache->saved_regs[IA64_FPSR_REGNUM]
2214 = tdep->sigcontext_register_address (gdbarch, cache->base,
2215 IA64_FPSR_REGNUM);
2216 cache->saved_regs[IA64_PFS_REGNUM]
2217 = tdep->sigcontext_register_address (gdbarch, cache->base,
2218 IA64_PFS_REGNUM);
2219 cache->saved_regs[IA64_LC_REGNUM]
2220 = tdep->sigcontext_register_address (gdbarch, cache->base,
2221 IA64_LC_REGNUM);
2222
2223 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2224 cache->saved_regs[regno] =
2225 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2226 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2227 cache->saved_regs[regno] =
2228 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2229 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2230 cache->saved_regs[regno] =
2231 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2232 }
2233 }
2234
2235 static struct ia64_frame_cache *
2236 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2237 {
2238 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2239 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2240 struct ia64_frame_cache *cache;
2241 gdb_byte buf[8];
2242
2243 if (*this_cache)
2244 return *this_cache;
2245
2246 cache = ia64_alloc_frame_cache ();
2247
2248 get_frame_register (this_frame, sp_regnum, buf);
2249 /* Note that frame size is hard-coded below. We cannot calculate it
2250 via prologue examination. */
2251 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2252
2253 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2254 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2255
2256 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2257 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2258 cache->sof = cache->cfm & 0x7f;
2259
2260 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2261
2262 *this_cache = cache;
2263 return cache;
2264 }
2265
2266 static void
2267 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2268 void **this_cache, struct frame_id *this_id)
2269 {
2270 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2271 struct ia64_frame_cache *cache =
2272 ia64_sigtramp_frame_cache (this_frame, this_cache);
2273
2274 (*this_id) = frame_id_build_special (cache->base,
2275 get_frame_pc (this_frame),
2276 cache->bsp);
2277 if (gdbarch_debug >= 1)
2278 fprintf_unfiltered (gdb_stdlog,
2279 "sigtramp frame id: code %s, stack %s, "
2280 "special %s, this_frame %s\n",
2281 paddress (gdbarch, this_id->code_addr),
2282 paddress (gdbarch, this_id->stack_addr),
2283 paddress (gdbarch, cache->bsp),
2284 host_address_to_string (this_frame));
2285 }
2286
2287 static struct value *
2288 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2289 void **this_cache, int regnum)
2290 {
2291 gdb_byte buf[MAX_REGISTER_SIZE];
2292
2293 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2294 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2295 struct ia64_frame_cache *cache =
2296 ia64_sigtramp_frame_cache (this_frame, this_cache);
2297
2298 gdb_assert (regnum >= 0);
2299
2300 if (!target_has_registers)
2301 error (_("No registers."));
2302
2303 if (regnum == IA64_IP_REGNUM)
2304 {
2305 CORE_ADDR pc = 0;
2306 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2307
2308 if (addr != 0)
2309 {
2310 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2311 pc = extract_unsigned_integer (buf, 8, byte_order);
2312 }
2313 pc &= ~0xf;
2314 return frame_unwind_got_constant (this_frame, regnum, pc);
2315 }
2316
2317 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2318 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2319 {
2320 CORE_ADDR addr = 0;
2321
2322 if (regnum >= V32_REGNUM)
2323 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2324 addr = cache->saved_regs[regnum];
2325 if (addr != 0)
2326 return frame_unwind_got_memory (this_frame, regnum, addr);
2327
2328 return frame_unwind_got_constant (this_frame, regnum, 0);
2329 }
2330
2331 else /* All other registers not listed above. */
2332 {
2333 CORE_ADDR addr = cache->saved_regs[regnum];
2334
2335 if (addr != 0)
2336 return frame_unwind_got_memory (this_frame, regnum, addr);
2337
2338 return frame_unwind_got_constant (this_frame, regnum, 0);
2339 }
2340 }
2341
2342 static int
2343 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2344 struct frame_info *this_frame,
2345 void **this_cache)
2346 {
2347 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2348 if (tdep->pc_in_sigtramp)
2349 {
2350 CORE_ADDR pc = get_frame_pc (this_frame);
2351
2352 if (tdep->pc_in_sigtramp (pc))
2353 return 1;
2354 }
2355
2356 return 0;
2357 }
2358
2359 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2360 {
2361 SIGTRAMP_FRAME,
2362 default_frame_unwind_stop_reason,
2363 ia64_sigtramp_frame_this_id,
2364 ia64_sigtramp_frame_prev_register,
2365 NULL,
2366 ia64_sigtramp_frame_sniffer
2367 };
2368
2369 \f
2370
2371 static CORE_ADDR
2372 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2373 {
2374 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2375
2376 return cache->base;
2377 }
2378
2379 static const struct frame_base ia64_frame_base =
2380 {
2381 &ia64_frame_unwind,
2382 ia64_frame_base_address,
2383 ia64_frame_base_address,
2384 ia64_frame_base_address
2385 };
2386
2387 #ifdef HAVE_LIBUNWIND_IA64_H
2388
2389 struct ia64_unwind_table_entry
2390 {
2391 unw_word_t start_offset;
2392 unw_word_t end_offset;
2393 unw_word_t info_offset;
2394 };
2395
2396 static __inline__ uint64_t
2397 ia64_rse_slot_num (uint64_t addr)
2398 {
2399 return (addr >> 3) & 0x3f;
2400 }
2401
2402 /* Skip over a designated number of registers in the backing
2403 store, remembering every 64th position is for NAT. */
2404 static __inline__ uint64_t
2405 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2406 {
2407 long delta = ia64_rse_slot_num(addr) + num_regs;
2408
2409 if (num_regs < 0)
2410 delta -= 0x3e;
2411 return addr + ((num_regs + delta/0x3f) << 3);
2412 }
2413
2414 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2415 register number to a libunwind register number. */
2416 static int
2417 ia64_gdb2uw_regnum (int regnum)
2418 {
2419 if (regnum == sp_regnum)
2420 return UNW_IA64_SP;
2421 else if (regnum == IA64_BSP_REGNUM)
2422 return UNW_IA64_BSP;
2423 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2424 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2425 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2426 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2427 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2428 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2429 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2430 return -1;
2431 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2432 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2433 else if (regnum == IA64_PR_REGNUM)
2434 return UNW_IA64_PR;
2435 else if (regnum == IA64_IP_REGNUM)
2436 return UNW_REG_IP;
2437 else if (regnum == IA64_CFM_REGNUM)
2438 return UNW_IA64_CFM;
2439 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2440 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2441 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2442 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2443 else
2444 return -1;
2445 }
2446
2447 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2448 register number to a ia64 gdb register number. */
2449 static int
2450 ia64_uw2gdb_regnum (int uw_regnum)
2451 {
2452 if (uw_regnum == UNW_IA64_SP)
2453 return sp_regnum;
2454 else if (uw_regnum == UNW_IA64_BSP)
2455 return IA64_BSP_REGNUM;
2456 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2457 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2458 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2459 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2460 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2461 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2462 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2463 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2464 else if (uw_regnum == UNW_IA64_PR)
2465 return IA64_PR_REGNUM;
2466 else if (uw_regnum == UNW_REG_IP)
2467 return IA64_IP_REGNUM;
2468 else if (uw_regnum == UNW_IA64_CFM)
2469 return IA64_CFM_REGNUM;
2470 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2471 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2472 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2473 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2474 else
2475 return -1;
2476 }
2477
2478 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2479 a float register or not. */
2480 static int
2481 ia64_is_fpreg (int uw_regnum)
2482 {
2483 return unw_is_fpreg (uw_regnum);
2484 }
2485
2486 /* Libunwind callback accessor function for general registers. */
2487 static int
2488 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2489 int write, void *arg)
2490 {
2491 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2492 unw_word_t bsp, sof, sol, cfm, psr, ip;
2493 struct frame_info *this_frame = arg;
2494 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2495 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2496 long new_sof, old_sof;
2497 gdb_byte buf[MAX_REGISTER_SIZE];
2498
2499 /* We never call any libunwind routines that need to write registers. */
2500 gdb_assert (!write);
2501
2502 switch (uw_regnum)
2503 {
2504 case UNW_REG_IP:
2505 /* Libunwind expects to see the pc value which means the slot number
2506 from the psr must be merged with the ip word address. */
2507 get_frame_register (this_frame, IA64_IP_REGNUM, buf);
2508 ip = extract_unsigned_integer (buf, 8, byte_order);
2509 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2510 psr = extract_unsigned_integer (buf, 8, byte_order);
2511 *val = ip | ((psr >> 41) & 0x3);
2512 break;
2513
2514 case UNW_IA64_AR_BSP:
2515 /* Libunwind expects to see the beginning of the current
2516 register frame so we must account for the fact that
2517 ptrace() will return a value for bsp that points *after*
2518 the current register frame. */
2519 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2520 bsp = extract_unsigned_integer (buf, 8, byte_order);
2521 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2522 cfm = extract_unsigned_integer (buf, 8, byte_order);
2523 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2524 *val = ia64_rse_skip_regs (bsp, -sof);
2525 break;
2526
2527 case UNW_IA64_AR_BSPSTORE:
2528 /* Libunwind wants bspstore to be after the current register frame.
2529 This is what ptrace() and gdb treats as the regular bsp value. */
2530 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2531 *val = extract_unsigned_integer (buf, 8, byte_order);
2532 break;
2533
2534 default:
2535 /* For all other registers, just unwind the value directly. */
2536 get_frame_register (this_frame, regnum, buf);
2537 *val = extract_unsigned_integer (buf, 8, byte_order);
2538 break;
2539 }
2540
2541 if (gdbarch_debug >= 1)
2542 fprintf_unfiltered (gdb_stdlog,
2543 " access_reg: from cache: %4s=%s\n",
2544 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2545 ? ia64_register_names[regnum] : "r??"),
2546 paddress (gdbarch, *val));
2547 return 0;
2548 }
2549
2550 /* Libunwind callback accessor function for floating-point registers. */
2551 static int
2552 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2553 unw_fpreg_t *val, int write, void *arg)
2554 {
2555 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2556 struct frame_info *this_frame = arg;
2557
2558 /* We never call any libunwind routines that need to write registers. */
2559 gdb_assert (!write);
2560
2561 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2562
2563 return 0;
2564 }
2565
2566 /* Libunwind callback accessor function for top-level rse registers. */
2567 static int
2568 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2569 unw_word_t *val, int write, void *arg)
2570 {
2571 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2572 unw_word_t bsp, sof, sol, cfm, psr, ip;
2573 struct regcache *regcache = arg;
2574 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2575 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2576 long new_sof, old_sof;
2577 gdb_byte buf[MAX_REGISTER_SIZE];
2578
2579 /* We never call any libunwind routines that need to write registers. */
2580 gdb_assert (!write);
2581
2582 switch (uw_regnum)
2583 {
2584 case UNW_REG_IP:
2585 /* Libunwind expects to see the pc value which means the slot number
2586 from the psr must be merged with the ip word address. */
2587 regcache_cooked_read (regcache, IA64_IP_REGNUM, buf);
2588 ip = extract_unsigned_integer (buf, 8, byte_order);
2589 regcache_cooked_read (regcache, IA64_PSR_REGNUM, buf);
2590 psr = extract_unsigned_integer (buf, 8, byte_order);
2591 *val = ip | ((psr >> 41) & 0x3);
2592 break;
2593
2594 case UNW_IA64_AR_BSP:
2595 /* Libunwind expects to see the beginning of the current
2596 register frame so we must account for the fact that
2597 ptrace() will return a value for bsp that points *after*
2598 the current register frame. */
2599 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2600 bsp = extract_unsigned_integer (buf, 8, byte_order);
2601 regcache_cooked_read (regcache, IA64_CFM_REGNUM, buf);
2602 cfm = extract_unsigned_integer (buf, 8, byte_order);
2603 sof = (cfm & 0x7f);
2604 *val = ia64_rse_skip_regs (bsp, -sof);
2605 break;
2606
2607 case UNW_IA64_AR_BSPSTORE:
2608 /* Libunwind wants bspstore to be after the current register frame.
2609 This is what ptrace() and gdb treats as the regular bsp value. */
2610 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2611 *val = extract_unsigned_integer (buf, 8, byte_order);
2612 break;
2613
2614 default:
2615 /* For all other registers, just unwind the value directly. */
2616 regcache_cooked_read (regcache, regnum, buf);
2617 *val = extract_unsigned_integer (buf, 8, byte_order);
2618 break;
2619 }
2620
2621 if (gdbarch_debug >= 1)
2622 fprintf_unfiltered (gdb_stdlog,
2623 " access_rse_reg: from cache: %4s=%s\n",
2624 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2625 ? ia64_register_names[regnum] : "r??"),
2626 paddress (gdbarch, *val));
2627
2628 return 0;
2629 }
2630
2631 /* Libunwind callback accessor function for top-level fp registers. */
2632 static int
2633 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2634 unw_fpreg_t *val, int write, void *arg)
2635 {
2636 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2637 struct regcache *regcache = arg;
2638
2639 /* We never call any libunwind routines that need to write registers. */
2640 gdb_assert (!write);
2641
2642 regcache_cooked_read (regcache, regnum, (gdb_byte *) val);
2643
2644 return 0;
2645 }
2646
2647 /* Libunwind callback accessor function for accessing memory. */
2648 static int
2649 ia64_access_mem (unw_addr_space_t as,
2650 unw_word_t addr, unw_word_t *val,
2651 int write, void *arg)
2652 {
2653 if (addr - KERNEL_START < ktab_size)
2654 {
2655 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2656 + (addr - KERNEL_START));
2657
2658 if (write)
2659 *laddr = *val;
2660 else
2661 *val = *laddr;
2662 return 0;
2663 }
2664
2665 /* XXX do we need to normalize byte-order here? */
2666 if (write)
2667 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2668 else
2669 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2670 }
2671
2672 /* Call low-level function to access the kernel unwind table. */
2673 static LONGEST
2674 getunwind_table (gdb_byte **buf_p)
2675 {
2676 LONGEST x;
2677
2678 /* FIXME drow/2005-09-10: This code used to call
2679 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2680 for the currently running ia64-linux kernel. That data should
2681 come from the core file and be accessed via the auxv vector; if
2682 we want to preserve fall back to the running kernel's table, then
2683 we should find a way to override the corefile layer's
2684 xfer_partial method. */
2685
2686 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2687 NULL, buf_p);
2688
2689 return x;
2690 }
2691
2692 /* Get the kernel unwind table. */
2693 static int
2694 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2695 {
2696 static struct ia64_table_entry *etab;
2697
2698 if (!ktab)
2699 {
2700 gdb_byte *ktab_buf;
2701 LONGEST size;
2702
2703 size = getunwind_table (&ktab_buf);
2704 if (size <= 0)
2705 return -UNW_ENOINFO;
2706
2707 ktab = (struct ia64_table_entry *) ktab_buf;
2708 ktab_size = size;
2709
2710 for (etab = ktab; etab->start_offset; ++etab)
2711 etab->info_offset += KERNEL_START;
2712 }
2713
2714 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2715 return -UNW_ENOINFO;
2716
2717 di->format = UNW_INFO_FORMAT_TABLE;
2718 di->gp = 0;
2719 di->start_ip = ktab[0].start_offset;
2720 di->end_ip = etab[-1].end_offset;
2721 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2722 di->u.ti.segbase = 0;
2723 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2724 di->u.ti.table_data = (unw_word_t *) ktab;
2725
2726 if (gdbarch_debug >= 1)
2727 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2728 "segbase=%s, length=%s, gp=%s\n",
2729 (char *) di->u.ti.name_ptr,
2730 hex_string (di->u.ti.segbase),
2731 pulongest (di->u.ti.table_len),
2732 hex_string (di->gp));
2733 return 0;
2734 }
2735
2736 /* Find the unwind table entry for a specified address. */
2737 static int
2738 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2739 unw_dyn_info_t *dip, void **buf)
2740 {
2741 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2742 Elf_Internal_Ehdr *ehdr;
2743 unw_word_t segbase = 0;
2744 CORE_ADDR load_base;
2745 bfd *bfd;
2746 int i;
2747
2748 bfd = objfile->obfd;
2749
2750 ehdr = elf_tdata (bfd)->elf_header;
2751 phdr = elf_tdata (bfd)->phdr;
2752
2753 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2754
2755 for (i = 0; i < ehdr->e_phnum; ++i)
2756 {
2757 switch (phdr[i].p_type)
2758 {
2759 case PT_LOAD:
2760 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2761 < phdr[i].p_memsz)
2762 p_text = phdr + i;
2763 break;
2764
2765 case PT_IA_64_UNWIND:
2766 p_unwind = phdr + i;
2767 break;
2768
2769 default:
2770 break;
2771 }
2772 }
2773
2774 if (!p_text || !p_unwind)
2775 return -UNW_ENOINFO;
2776
2777 /* Verify that the segment that contains the IP also contains
2778 the static unwind table. If not, we may be in the Linux kernel's
2779 DSO gate page in which case the unwind table is another segment.
2780 Otherwise, we are dealing with runtime-generated code, for which we
2781 have no info here. */
2782 segbase = p_text->p_vaddr + load_base;
2783
2784 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2785 {
2786 int ok = 0;
2787 for (i = 0; i < ehdr->e_phnum; ++i)
2788 {
2789 if (phdr[i].p_type == PT_LOAD
2790 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2791 {
2792 ok = 1;
2793 /* Get the segbase from the section containing the
2794 libunwind table. */
2795 segbase = phdr[i].p_vaddr + load_base;
2796 }
2797 }
2798 if (!ok)
2799 return -UNW_ENOINFO;
2800 }
2801
2802 dip->start_ip = p_text->p_vaddr + load_base;
2803 dip->end_ip = dip->start_ip + p_text->p_memsz;
2804 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2805 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2806 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2807 dip->u.rti.segbase = segbase;
2808 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2809 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2810
2811 return 0;
2812 }
2813
2814 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2815 static int
2816 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2817 int need_unwind_info, void *arg)
2818 {
2819 struct obj_section *sec = find_pc_section (ip);
2820 unw_dyn_info_t di;
2821 int ret;
2822 void *buf = NULL;
2823
2824 if (!sec)
2825 {
2826 /* XXX This only works if the host and the target architecture are
2827 both ia64 and if the have (more or less) the same kernel
2828 version. */
2829 if (get_kernel_table (ip, &di) < 0)
2830 return -UNW_ENOINFO;
2831
2832 if (gdbarch_debug >= 1)
2833 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2834 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2835 "length=%s,data=%s)\n",
2836 hex_string (ip), (char *)di.u.ti.name_ptr,
2837 hex_string (di.u.ti.segbase),
2838 hex_string (di.start_ip), hex_string (di.end_ip),
2839 hex_string (di.gp),
2840 pulongest (di.u.ti.table_len),
2841 hex_string ((CORE_ADDR)di.u.ti.table_data));
2842 }
2843 else
2844 {
2845 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2846 if (ret < 0)
2847 return ret;
2848
2849 if (gdbarch_debug >= 1)
2850 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2851 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2852 "length=%s,data=%s)\n",
2853 hex_string (ip), (char *)di.u.rti.name_ptr,
2854 hex_string (di.u.rti.segbase),
2855 hex_string (di.start_ip), hex_string (di.end_ip),
2856 hex_string (di.gp),
2857 pulongest (di.u.rti.table_len),
2858 hex_string (di.u.rti.table_data));
2859 }
2860
2861 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2862 arg);
2863
2864 /* We no longer need the dyn info storage so free it. */
2865 xfree (buf);
2866
2867 return ret;
2868 }
2869
2870 /* Libunwind callback accessor function for cleanup. */
2871 static void
2872 ia64_put_unwind_info (unw_addr_space_t as,
2873 unw_proc_info_t *pip, void *arg)
2874 {
2875 /* Nothing required for now. */
2876 }
2877
2878 /* Libunwind callback accessor function to get head of the dynamic
2879 unwind-info registration list. */
2880 static int
2881 ia64_get_dyn_info_list (unw_addr_space_t as,
2882 unw_word_t *dilap, void *arg)
2883 {
2884 struct obj_section *text_sec;
2885 struct objfile *objfile;
2886 unw_word_t ip, addr;
2887 unw_dyn_info_t di;
2888 int ret;
2889
2890 if (!libunwind_is_initialized ())
2891 return -UNW_ENOINFO;
2892
2893 for (objfile = object_files; objfile; objfile = objfile->next)
2894 {
2895 void *buf = NULL;
2896
2897 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2898 ip = obj_section_addr (text_sec);
2899 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2900 if (ret >= 0)
2901 {
2902 addr = libunwind_find_dyn_list (as, &di, arg);
2903 /* We no longer need the dyn info storage so free it. */
2904 xfree (buf);
2905
2906 if (addr)
2907 {
2908 if (gdbarch_debug >= 1)
2909 fprintf_unfiltered (gdb_stdlog,
2910 "dynamic unwind table in objfile %s "
2911 "at %s (gp=%s)\n",
2912 bfd_get_filename (objfile->obfd),
2913 hex_string (addr), hex_string (di.gp));
2914 *dilap = addr;
2915 return 0;
2916 }
2917 }
2918 }
2919 return -UNW_ENOINFO;
2920 }
2921
2922
2923 /* Frame interface functions for libunwind. */
2924
2925 static void
2926 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2927 struct frame_id *this_id)
2928 {
2929 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2930 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2931 struct frame_id id = outer_frame_id;
2932 gdb_byte buf[8];
2933 CORE_ADDR bsp;
2934
2935 libunwind_frame_this_id (this_frame, this_cache, &id);
2936 if (frame_id_eq (id, outer_frame_id))
2937 {
2938 (*this_id) = outer_frame_id;
2939 return;
2940 }
2941
2942 /* We must add the bsp as the special address for frame comparison
2943 purposes. */
2944 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2945 bsp = extract_unsigned_integer (buf, 8, byte_order);
2946
2947 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2948
2949 if (gdbarch_debug >= 1)
2950 fprintf_unfiltered (gdb_stdlog,
2951 "libunwind frame id: code %s, stack %s, "
2952 "special %s, this_frame %s\n",
2953 paddress (gdbarch, id.code_addr),
2954 paddress (gdbarch, id.stack_addr),
2955 paddress (gdbarch, bsp),
2956 host_address_to_string (this_frame));
2957 }
2958
2959 static struct value *
2960 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2961 void **this_cache, int regnum)
2962 {
2963 int reg = regnum;
2964 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2965 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2966 struct value *val;
2967
2968 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2969 reg = IA64_PR_REGNUM;
2970 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2971 reg = IA64_UNAT_REGNUM;
2972
2973 /* Let libunwind do most of the work. */
2974 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2975
2976 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2977 {
2978 ULONGEST prN_val;
2979
2980 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2981 {
2982 int rrb_pr = 0;
2983 ULONGEST cfm;
2984 gdb_byte buf[MAX_REGISTER_SIZE];
2985
2986 /* Fetch predicate register rename base from current frame
2987 marker for this frame. */
2988 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2989 cfm = extract_unsigned_integer (buf, 8, byte_order);
2990 rrb_pr = (cfm >> 32) & 0x3f;
2991
2992 /* Adjust the register number to account for register rotation. */
2993 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2994 }
2995 prN_val = extract_bit_field (value_contents_all (val),
2996 regnum - VP0_REGNUM, 1);
2997 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2998 }
2999
3000 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
3001 {
3002 ULONGEST unatN_val;
3003
3004 unatN_val = extract_bit_field (value_contents_all (val),
3005 regnum - IA64_NAT0_REGNUM, 1);
3006 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
3007 }
3008
3009 else if (regnum == IA64_BSP_REGNUM)
3010 {
3011 struct value *cfm_val;
3012 CORE_ADDR prev_bsp, prev_cfm;
3013
3014 /* We want to calculate the previous bsp as the end of the previous
3015 register stack frame. This corresponds to what the hardware bsp
3016 register will be if we pop the frame back which is why we might
3017 have been called. We know that libunwind will pass us back the
3018 beginning of the current frame so we should just add sof to it. */
3019 prev_bsp = extract_unsigned_integer (value_contents_all (val),
3020 8, byte_order);
3021 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
3022 IA64_CFM_REGNUM);
3023 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
3024 8, byte_order);
3025 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
3026
3027 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3028 }
3029 else
3030 return val;
3031 }
3032
3033 static int
3034 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3035 struct frame_info *this_frame,
3036 void **this_cache)
3037 {
3038 if (libunwind_is_initialized ()
3039 && libunwind_frame_sniffer (self, this_frame, this_cache))
3040 return 1;
3041
3042 return 0;
3043 }
3044
3045 static const struct frame_unwind ia64_libunwind_frame_unwind =
3046 {
3047 NORMAL_FRAME,
3048 default_frame_unwind_stop_reason,
3049 ia64_libunwind_frame_this_id,
3050 ia64_libunwind_frame_prev_register,
3051 NULL,
3052 ia64_libunwind_frame_sniffer,
3053 libunwind_frame_dealloc_cache
3054 };
3055
3056 static void
3057 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3058 void **this_cache,
3059 struct frame_id *this_id)
3060 {
3061 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3062 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3063 gdb_byte buf[8];
3064 CORE_ADDR bsp;
3065 struct frame_id id = outer_frame_id;
3066 CORE_ADDR prev_ip;
3067
3068 libunwind_frame_this_id (this_frame, this_cache, &id);
3069 if (frame_id_eq (id, outer_frame_id))
3070 {
3071 (*this_id) = outer_frame_id;
3072 return;
3073 }
3074
3075 /* We must add the bsp as the special address for frame comparison
3076 purposes. */
3077 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3078 bsp = extract_unsigned_integer (buf, 8, byte_order);
3079
3080 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3081 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3082
3083 if (gdbarch_debug >= 1)
3084 fprintf_unfiltered (gdb_stdlog,
3085 "libunwind sigtramp frame id: code %s, "
3086 "stack %s, special %s, this_frame %s\n",
3087 paddress (gdbarch, id.code_addr),
3088 paddress (gdbarch, id.stack_addr),
3089 paddress (gdbarch, bsp),
3090 host_address_to_string (this_frame));
3091 }
3092
3093 static struct value *
3094 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3095 void **this_cache, int regnum)
3096 {
3097 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3098 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3099 struct value *prev_ip_val;
3100 CORE_ADDR prev_ip;
3101
3102 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3103 method of getting previous registers. */
3104 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3105 IA64_IP_REGNUM);
3106 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3107 8, byte_order);
3108
3109 if (prev_ip == 0)
3110 {
3111 void *tmp_cache = NULL;
3112 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3113 regnum);
3114 }
3115 else
3116 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3117 }
3118
3119 static int
3120 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3121 struct frame_info *this_frame,
3122 void **this_cache)
3123 {
3124 if (libunwind_is_initialized ())
3125 {
3126 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3127 return 1;
3128 return 0;
3129 }
3130 else
3131 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3132 }
3133
3134 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3135 {
3136 SIGTRAMP_FRAME,
3137 default_frame_unwind_stop_reason,
3138 ia64_libunwind_sigtramp_frame_this_id,
3139 ia64_libunwind_sigtramp_frame_prev_register,
3140 NULL,
3141 ia64_libunwind_sigtramp_frame_sniffer
3142 };
3143
3144 /* Set of libunwind callback acccessor functions. */
3145 unw_accessors_t ia64_unw_accessors =
3146 {
3147 ia64_find_proc_info_x,
3148 ia64_put_unwind_info,
3149 ia64_get_dyn_info_list,
3150 ia64_access_mem,
3151 ia64_access_reg,
3152 ia64_access_fpreg,
3153 /* resume */
3154 /* get_proc_name */
3155 };
3156
3157 /* Set of special libunwind callback acccessor functions specific for accessing
3158 the rse registers. At the top of the stack, we want libunwind to figure out
3159 how to read r32 - r127. Though usually they are found sequentially in
3160 memory starting from $bof, this is not always true. */
3161 unw_accessors_t ia64_unw_rse_accessors =
3162 {
3163 ia64_find_proc_info_x,
3164 ia64_put_unwind_info,
3165 ia64_get_dyn_info_list,
3166 ia64_access_mem,
3167 ia64_access_rse_reg,
3168 ia64_access_rse_fpreg,
3169 /* resume */
3170 /* get_proc_name */
3171 };
3172
3173 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3174 ia64-libunwind-tdep code to use. */
3175 struct libunwind_descr ia64_libunwind_descr =
3176 {
3177 ia64_gdb2uw_regnum,
3178 ia64_uw2gdb_regnum,
3179 ia64_is_fpreg,
3180 &ia64_unw_accessors,
3181 &ia64_unw_rse_accessors,
3182 };
3183
3184 #endif /* HAVE_LIBUNWIND_IA64_H */
3185
3186 static int
3187 ia64_use_struct_convention (struct type *type)
3188 {
3189 struct type *float_elt_type;
3190
3191 /* Don't use the struct convention for anything but structure,
3192 union, or array types. */
3193 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3194 || TYPE_CODE (type) == TYPE_CODE_UNION
3195 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3196 return 0;
3197
3198 /* HFAs are structures (or arrays) consisting entirely of floating
3199 point values of the same length. Up to 8 of these are returned
3200 in registers. Don't use the struct convention when this is the
3201 case. */
3202 float_elt_type = is_float_or_hfa_type (type);
3203 if (float_elt_type != NULL
3204 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3205 return 0;
3206
3207 /* Other structs of length 32 or less are returned in r8-r11.
3208 Don't use the struct convention for those either. */
3209 return TYPE_LENGTH (type) > 32;
3210 }
3211
3212 /* Return non-zero if TYPE is a structure or union type. */
3213
3214 static int
3215 ia64_struct_type_p (const struct type *type)
3216 {
3217 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3218 || TYPE_CODE (type) == TYPE_CODE_UNION);
3219 }
3220
3221 static void
3222 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3223 gdb_byte *valbuf)
3224 {
3225 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3226 struct type *float_elt_type;
3227
3228 float_elt_type = is_float_or_hfa_type (type);
3229 if (float_elt_type != NULL)
3230 {
3231 gdb_byte from[MAX_REGISTER_SIZE];
3232 int offset = 0;
3233 int regnum = IA64_FR8_REGNUM;
3234 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3235
3236 while (n-- > 0)
3237 {
3238 regcache_cooked_read (regcache, regnum, from);
3239 convert_typed_floating (from, ia64_ext_type (gdbarch),
3240 (char *)valbuf + offset, float_elt_type);
3241 offset += TYPE_LENGTH (float_elt_type);
3242 regnum++;
3243 }
3244 }
3245 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3246 {
3247 /* This is an integral value, and its size is less than 8 bytes.
3248 These values are LSB-aligned, so extract the relevant bytes,
3249 and copy them into VALBUF. */
3250 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3251 so I suppose we should also add handling here for integral values
3252 whose size is greater than 8. But I wasn't able to create such
3253 a type, neither in C nor in Ada, so not worrying about these yet. */
3254 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3255 ULONGEST val;
3256
3257 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3258 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3259 }
3260 else
3261 {
3262 ULONGEST val;
3263 int offset = 0;
3264 int regnum = IA64_GR8_REGNUM;
3265 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3266 int n = TYPE_LENGTH (type) / reglen;
3267 int m = TYPE_LENGTH (type) % reglen;
3268
3269 while (n-- > 0)
3270 {
3271 ULONGEST val;
3272 regcache_cooked_read_unsigned (regcache, regnum, &val);
3273 memcpy ((char *)valbuf + offset, &val, reglen);
3274 offset += reglen;
3275 regnum++;
3276 }
3277
3278 if (m)
3279 {
3280 regcache_cooked_read_unsigned (regcache, regnum, &val);
3281 memcpy ((char *)valbuf + offset, &val, m);
3282 }
3283 }
3284 }
3285
3286 static void
3287 ia64_store_return_value (struct type *type, struct regcache *regcache,
3288 const gdb_byte *valbuf)
3289 {
3290 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3291 struct type *float_elt_type;
3292
3293 float_elt_type = is_float_or_hfa_type (type);
3294 if (float_elt_type != NULL)
3295 {
3296 gdb_byte to[MAX_REGISTER_SIZE];
3297 int offset = 0;
3298 int regnum = IA64_FR8_REGNUM;
3299 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3300
3301 while (n-- > 0)
3302 {
3303 convert_typed_floating ((char *)valbuf + offset, float_elt_type,
3304 to, ia64_ext_type (gdbarch));
3305 regcache_cooked_write (regcache, regnum, to);
3306 offset += TYPE_LENGTH (float_elt_type);
3307 regnum++;
3308 }
3309 }
3310 else
3311 {
3312 ULONGEST val;
3313 int offset = 0;
3314 int regnum = IA64_GR8_REGNUM;
3315 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3316 int n = TYPE_LENGTH (type) / reglen;
3317 int m = TYPE_LENGTH (type) % reglen;
3318
3319 while (n-- > 0)
3320 {
3321 ULONGEST val;
3322 memcpy (&val, (char *)valbuf + offset, reglen);
3323 regcache_cooked_write_unsigned (regcache, regnum, val);
3324 offset += reglen;
3325 regnum++;
3326 }
3327
3328 if (m)
3329 {
3330 memcpy (&val, (char *)valbuf + offset, m);
3331 regcache_cooked_write_unsigned (regcache, regnum, val);
3332 }
3333 }
3334 }
3335
3336 static enum return_value_convention
3337 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3338 struct type *valtype, struct regcache *regcache,
3339 gdb_byte *readbuf, const gdb_byte *writebuf)
3340 {
3341 int struct_return = ia64_use_struct_convention (valtype);
3342
3343 if (writebuf != NULL)
3344 {
3345 gdb_assert (!struct_return);
3346 ia64_store_return_value (valtype, regcache, writebuf);
3347 }
3348
3349 if (readbuf != NULL)
3350 {
3351 gdb_assert (!struct_return);
3352 ia64_extract_return_value (valtype, regcache, readbuf);
3353 }
3354
3355 if (struct_return)
3356 return RETURN_VALUE_STRUCT_CONVENTION;
3357 else
3358 return RETURN_VALUE_REGISTER_CONVENTION;
3359 }
3360
3361 static int
3362 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3363 {
3364 switch (TYPE_CODE (t))
3365 {
3366 case TYPE_CODE_FLT:
3367 if (*etp)
3368 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3369 else
3370 {
3371 *etp = t;
3372 return 1;
3373 }
3374 break;
3375 case TYPE_CODE_ARRAY:
3376 return
3377 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3378 etp);
3379 break;
3380 case TYPE_CODE_STRUCT:
3381 {
3382 int i;
3383
3384 for (i = 0; i < TYPE_NFIELDS (t); i++)
3385 if (!is_float_or_hfa_type_recurse
3386 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3387 return 0;
3388 return 1;
3389 }
3390 break;
3391 default:
3392 return 0;
3393 break;
3394 }
3395 }
3396
3397 /* Determine if the given type is one of the floating point types or
3398 and HFA (which is a struct, array, or combination thereof whose
3399 bottom-most elements are all of the same floating point type). */
3400
3401 static struct type *
3402 is_float_or_hfa_type (struct type *t)
3403 {
3404 struct type *et = 0;
3405
3406 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3407 }
3408
3409
3410 /* Return 1 if the alignment of T is such that the next even slot
3411 should be used. Return 0, if the next available slot should
3412 be used. (See section 8.5.1 of the IA-64 Software Conventions
3413 and Runtime manual). */
3414
3415 static int
3416 slot_alignment_is_next_even (struct type *t)
3417 {
3418 switch (TYPE_CODE (t))
3419 {
3420 case TYPE_CODE_INT:
3421 case TYPE_CODE_FLT:
3422 if (TYPE_LENGTH (t) > 8)
3423 return 1;
3424 else
3425 return 0;
3426 case TYPE_CODE_ARRAY:
3427 return
3428 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3429 case TYPE_CODE_STRUCT:
3430 {
3431 int i;
3432
3433 for (i = 0; i < TYPE_NFIELDS (t); i++)
3434 if (slot_alignment_is_next_even
3435 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3436 return 1;
3437 return 0;
3438 }
3439 default:
3440 return 0;
3441 }
3442 }
3443
3444 /* Attempt to find (and return) the global pointer for the given
3445 function.
3446
3447 This is a rather nasty bit of code searchs for the .dynamic section
3448 in the objfile corresponding to the pc of the function we're trying
3449 to call. Once it finds the addresses at which the .dynamic section
3450 lives in the child process, it scans the Elf64_Dyn entries for a
3451 DT_PLTGOT tag. If it finds one of these, the corresponding
3452 d_un.d_ptr value is the global pointer. */
3453
3454 static CORE_ADDR
3455 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3456 CORE_ADDR faddr)
3457 {
3458 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3459 struct obj_section *faddr_sect;
3460
3461 faddr_sect = find_pc_section (faddr);
3462 if (faddr_sect != NULL)
3463 {
3464 struct obj_section *osect;
3465
3466 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3467 {
3468 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3469 break;
3470 }
3471
3472 if (osect < faddr_sect->objfile->sections_end)
3473 {
3474 CORE_ADDR addr, endaddr;
3475
3476 addr = obj_section_addr (osect);
3477 endaddr = obj_section_endaddr (osect);
3478
3479 while (addr < endaddr)
3480 {
3481 int status;
3482 LONGEST tag;
3483 gdb_byte buf[8];
3484
3485 status = target_read_memory (addr, buf, sizeof (buf));
3486 if (status != 0)
3487 break;
3488 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3489
3490 if (tag == DT_PLTGOT)
3491 {
3492 CORE_ADDR global_pointer;
3493
3494 status = target_read_memory (addr + 8, buf, sizeof (buf));
3495 if (status != 0)
3496 break;
3497 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3498 byte_order);
3499
3500 /* The payoff... */
3501 return global_pointer;
3502 }
3503
3504 if (tag == DT_NULL)
3505 break;
3506
3507 addr += 16;
3508 }
3509 }
3510 }
3511 return 0;
3512 }
3513
3514 /* Attempt to find (and return) the global pointer for the given
3515 function. We first try the find_global_pointer_from_solib routine
3516 from the gdbarch tdep vector, if provided. And if that does not
3517 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3518
3519 static CORE_ADDR
3520 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3521 {
3522 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3523 CORE_ADDR addr = 0;
3524
3525 if (tdep->find_global_pointer_from_solib)
3526 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3527 if (addr == 0)
3528 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3529 return addr;
3530 }
3531
3532 /* Given a function's address, attempt to find (and return) the
3533 corresponding (canonical) function descriptor. Return 0 if
3534 not found. */
3535 static CORE_ADDR
3536 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3537 {
3538 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3539 struct obj_section *faddr_sect;
3540
3541 /* Return early if faddr is already a function descriptor. */
3542 faddr_sect = find_pc_section (faddr);
3543 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3544 return faddr;
3545
3546 if (faddr_sect != NULL)
3547 {
3548 struct obj_section *osect;
3549 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3550 {
3551 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3552 break;
3553 }
3554
3555 if (osect < faddr_sect->objfile->sections_end)
3556 {
3557 CORE_ADDR addr, endaddr;
3558
3559 addr = obj_section_addr (osect);
3560 endaddr = obj_section_endaddr (osect);
3561
3562 while (addr < endaddr)
3563 {
3564 int status;
3565 LONGEST faddr2;
3566 gdb_byte buf[8];
3567
3568 status = target_read_memory (addr, buf, sizeof (buf));
3569 if (status != 0)
3570 break;
3571 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3572
3573 if (faddr == faddr2)
3574 return addr;
3575
3576 addr += 16;
3577 }
3578 }
3579 }
3580 return 0;
3581 }
3582
3583 /* Attempt to find a function descriptor corresponding to the
3584 given address. If none is found, construct one on the
3585 stack using the address at fdaptr. */
3586
3587 static CORE_ADDR
3588 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3589 {
3590 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3591 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3592 CORE_ADDR fdesc;
3593
3594 fdesc = find_extant_func_descr (gdbarch, faddr);
3595
3596 if (fdesc == 0)
3597 {
3598 ULONGEST global_pointer;
3599 gdb_byte buf[16];
3600
3601 fdesc = *fdaptr;
3602 *fdaptr += 16;
3603
3604 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3605
3606 if (global_pointer == 0)
3607 regcache_cooked_read_unsigned (regcache,
3608 IA64_GR1_REGNUM, &global_pointer);
3609
3610 store_unsigned_integer (buf, 8, byte_order, faddr);
3611 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3612
3613 write_memory (fdesc, buf, 16);
3614 }
3615
3616 return fdesc;
3617 }
3618
3619 /* Use the following routine when printing out function pointers
3620 so the user can see the function address rather than just the
3621 function descriptor. */
3622 static CORE_ADDR
3623 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3624 struct target_ops *targ)
3625 {
3626 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3627 struct obj_section *s;
3628 gdb_byte buf[8];
3629
3630 s = find_pc_section (addr);
3631
3632 /* check if ADDR points to a function descriptor. */
3633 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3634 return read_memory_unsigned_integer (addr, 8, byte_order);
3635
3636 /* Normally, functions live inside a section that is executable.
3637 So, if ADDR points to a non-executable section, then treat it
3638 as a function descriptor and return the target address iff
3639 the target address itself points to a section that is executable.
3640 Check first the memory of the whole length of 8 bytes is readable. */
3641 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3642 && target_read_memory (addr, buf, 8) == 0)
3643 {
3644 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3645 struct obj_section *pc_section = find_pc_section (pc);
3646
3647 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3648 return pc;
3649 }
3650
3651 /* There are also descriptors embedded in vtables. */
3652 if (s)
3653 {
3654 struct bound_minimal_symbol minsym;
3655
3656 minsym = lookup_minimal_symbol_by_pc (addr);
3657
3658 if (minsym.minsym
3659 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3660 return read_memory_unsigned_integer (addr, 8, byte_order);
3661 }
3662
3663 return addr;
3664 }
3665
3666 static CORE_ADDR
3667 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3668 {
3669 return sp & ~0xfLL;
3670 }
3671
3672 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3673
3674 static void
3675 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3676 {
3677 ULONGEST cfm, pfs, new_bsp;
3678
3679 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3680
3681 new_bsp = rse_address_add (bsp, sof);
3682 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3683
3684 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3685 pfs &= 0xc000000000000000LL;
3686 pfs |= (cfm & 0xffffffffffffLL);
3687 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3688
3689 cfm &= 0xc000000000000000LL;
3690 cfm |= sof;
3691 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3692 }
3693
3694 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3695 ia64. */
3696
3697 static void
3698 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3699 int slotnum, gdb_byte *buf)
3700 {
3701 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3702 }
3703
3704 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3705
3706 static void
3707 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3708 {
3709 /* Nothing needed. */
3710 }
3711
3712 static CORE_ADDR
3713 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3714 struct regcache *regcache, CORE_ADDR bp_addr,
3715 int nargs, struct value **args, CORE_ADDR sp,
3716 int struct_return, CORE_ADDR struct_addr)
3717 {
3718 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3719 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3720 int argno;
3721 struct value *arg;
3722 struct type *type;
3723 int len, argoffset;
3724 int nslots, rseslots, memslots, slotnum, nfuncargs;
3725 int floatreg;
3726 ULONGEST bsp;
3727 CORE_ADDR funcdescaddr, pc, global_pointer;
3728 CORE_ADDR func_addr = find_function_addr (function, NULL);
3729
3730 nslots = 0;
3731 nfuncargs = 0;
3732 /* Count the number of slots needed for the arguments. */
3733 for (argno = 0; argno < nargs; argno++)
3734 {
3735 arg = args[argno];
3736 type = check_typedef (value_type (arg));
3737 len = TYPE_LENGTH (type);
3738
3739 if ((nslots & 1) && slot_alignment_is_next_even (type))
3740 nslots++;
3741
3742 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3743 nfuncargs++;
3744
3745 nslots += (len + 7) / 8;
3746 }
3747
3748 /* Divvy up the slots between the RSE and the memory stack. */
3749 rseslots = (nslots > 8) ? 8 : nslots;
3750 memslots = nslots - rseslots;
3751
3752 /* Allocate a new RSE frame. */
3753 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3754 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3755
3756 /* We will attempt to find function descriptors in the .opd segment,
3757 but if we can't we'll construct them ourselves. That being the
3758 case, we'll need to reserve space on the stack for them. */
3759 funcdescaddr = sp - nfuncargs * 16;
3760 funcdescaddr &= ~0xfLL;
3761
3762 /* Adjust the stack pointer to it's new value. The calling conventions
3763 require us to have 16 bytes of scratch, plus whatever space is
3764 necessary for the memory slots and our function descriptors. */
3765 sp = sp - 16 - (memslots + nfuncargs) * 8;
3766 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3767
3768 /* Place the arguments where they belong. The arguments will be
3769 either placed in the RSE backing store or on the memory stack.
3770 In addition, floating point arguments or HFAs are placed in
3771 floating point registers. */
3772 slotnum = 0;
3773 floatreg = IA64_FR8_REGNUM;
3774 for (argno = 0; argno < nargs; argno++)
3775 {
3776 struct type *float_elt_type;
3777
3778 arg = args[argno];
3779 type = check_typedef (value_type (arg));
3780 len = TYPE_LENGTH (type);
3781
3782 /* Special handling for function parameters. */
3783 if (len == 8
3784 && TYPE_CODE (type) == TYPE_CODE_PTR
3785 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3786 {
3787 gdb_byte val_buf[8];
3788 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3789 8, byte_order);
3790 store_unsigned_integer (val_buf, 8, byte_order,
3791 find_func_descr (regcache, faddr,
3792 &funcdescaddr));
3793 if (slotnum < rseslots)
3794 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3795 slotnum, val_buf);
3796 else
3797 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3798 slotnum++;
3799 continue;
3800 }
3801
3802 /* Normal slots. */
3803
3804 /* Skip odd slot if necessary... */
3805 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3806 slotnum++;
3807
3808 argoffset = 0;
3809 while (len > 0)
3810 {
3811 gdb_byte val_buf[8];
3812
3813 memset (val_buf, 0, 8);
3814 if (!ia64_struct_type_p (type) && len < 8)
3815 {
3816 /* Integral types are LSB-aligned, so we have to be careful
3817 to insert the argument on the correct side of the buffer.
3818 This is why we use store_unsigned_integer. */
3819 store_unsigned_integer
3820 (val_buf, 8, byte_order,
3821 extract_unsigned_integer (value_contents (arg), len,
3822 byte_order));
3823 }
3824 else
3825 {
3826 /* This is either an 8bit integral type, or an aggregate.
3827 For 8bit integral type, there is no problem, we just
3828 copy the value over.
3829
3830 For aggregates, the only potentially tricky portion
3831 is to write the last one if it is less than 8 bytes.
3832 In this case, the data is Byte0-aligned. Happy news,
3833 this means that we don't need to differentiate the
3834 handling of 8byte blocks and less-than-8bytes blocks. */
3835 memcpy (val_buf, value_contents (arg) + argoffset,
3836 (len > 8) ? 8 : len);
3837 }
3838
3839 if (slotnum < rseslots)
3840 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3841 slotnum, val_buf);
3842 else
3843 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3844
3845 argoffset += 8;
3846 len -= 8;
3847 slotnum++;
3848 }
3849
3850 /* Handle floating point types (including HFAs). */
3851 float_elt_type = is_float_or_hfa_type (type);
3852 if (float_elt_type != NULL)
3853 {
3854 argoffset = 0;
3855 len = TYPE_LENGTH (type);
3856 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3857 {
3858 char to[MAX_REGISTER_SIZE];
3859 convert_typed_floating (value_contents (arg) + argoffset,
3860 float_elt_type, to,
3861 ia64_ext_type (gdbarch));
3862 regcache_cooked_write (regcache, floatreg, (void *)to);
3863 floatreg++;
3864 argoffset += TYPE_LENGTH (float_elt_type);
3865 len -= TYPE_LENGTH (float_elt_type);
3866 }
3867 }
3868 }
3869
3870 /* Store the struct return value in r8 if necessary. */
3871 if (struct_return)
3872 {
3873 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3874 (ULONGEST) struct_addr);
3875 }
3876
3877 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3878
3879 if (global_pointer != 0)
3880 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3881
3882 /* The following is not necessary on HP-UX, because we're using
3883 a dummy code sequence pushed on the stack to make the call, and
3884 this sequence doesn't need b0 to be set in order for our dummy
3885 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3886 it's needed for other OSes, so we do this unconditionaly. */
3887 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3888
3889 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3890
3891 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3892
3893 return sp;
3894 }
3895
3896 static const struct ia64_infcall_ops ia64_infcall_ops =
3897 {
3898 ia64_allocate_new_rse_frame,
3899 ia64_store_argument_in_slot,
3900 ia64_set_function_addr
3901 };
3902
3903 static struct frame_id
3904 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3905 {
3906 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3907 gdb_byte buf[8];
3908 CORE_ADDR sp, bsp;
3909
3910 get_frame_register (this_frame, sp_regnum, buf);
3911 sp = extract_unsigned_integer (buf, 8, byte_order);
3912
3913 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3914 bsp = extract_unsigned_integer (buf, 8, byte_order);
3915
3916 if (gdbarch_debug >= 1)
3917 fprintf_unfiltered (gdb_stdlog,
3918 "dummy frame id: code %s, stack %s, special %s\n",
3919 paddress (gdbarch, get_frame_pc (this_frame)),
3920 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3921
3922 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3923 }
3924
3925 static CORE_ADDR
3926 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3927 {
3928 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3929 gdb_byte buf[8];
3930 CORE_ADDR ip, psr, pc;
3931
3932 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3933 ip = extract_unsigned_integer (buf, 8, byte_order);
3934 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3935 psr = extract_unsigned_integer (buf, 8, byte_order);
3936
3937 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3938 return pc;
3939 }
3940
3941 static int
3942 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3943 {
3944 info->bytes_per_line = SLOT_MULTIPLIER;
3945 return print_insn_ia64 (memaddr, info);
3946 }
3947
3948 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3949
3950 static int
3951 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3952 {
3953 return (cfm & 0x7f);
3954 }
3955
3956 static struct gdbarch *
3957 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3958 {
3959 struct gdbarch *gdbarch;
3960 struct gdbarch_tdep *tdep;
3961
3962 /* If there is already a candidate, use it. */
3963 arches = gdbarch_list_lookup_by_info (arches, &info);
3964 if (arches != NULL)
3965 return arches->gdbarch;
3966
3967 tdep = xzalloc (sizeof (struct gdbarch_tdep));
3968 gdbarch = gdbarch_alloc (&info, tdep);
3969
3970 tdep->size_of_register_frame = ia64_size_of_register_frame;
3971
3972 /* According to the ia64 specs, instructions that store long double
3973 floats in memory use a long-double format different than that
3974 used in the floating registers. The memory format matches the
3975 x86 extended float format which is 80 bits. An OS may choose to
3976 use this format (e.g. GNU/Linux) or choose to use a different
3977 format for storing long doubles (e.g. HPUX). In the latter case,
3978 the setting of the format may be moved/overridden in an
3979 OS-specific tdep file. */
3980 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3981
3982 set_gdbarch_short_bit (gdbarch, 16);
3983 set_gdbarch_int_bit (gdbarch, 32);
3984 set_gdbarch_long_bit (gdbarch, 64);
3985 set_gdbarch_long_long_bit (gdbarch, 64);
3986 set_gdbarch_float_bit (gdbarch, 32);
3987 set_gdbarch_double_bit (gdbarch, 64);
3988 set_gdbarch_long_double_bit (gdbarch, 128);
3989 set_gdbarch_ptr_bit (gdbarch, 64);
3990
3991 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3992 set_gdbarch_num_pseudo_regs (gdbarch,
3993 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3994 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3995 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3996
3997 set_gdbarch_register_name (gdbarch, ia64_register_name);
3998 set_gdbarch_register_type (gdbarch, ia64_register_type);
3999
4000 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
4001 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
4002 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
4003 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
4004 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
4005 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
4006 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
4007
4008 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
4009
4010 set_gdbarch_return_value (gdbarch, ia64_return_value);
4011
4012 set_gdbarch_memory_insert_breakpoint (gdbarch,
4013 ia64_memory_insert_breakpoint);
4014 set_gdbarch_memory_remove_breakpoint (gdbarch,
4015 ia64_memory_remove_breakpoint);
4016 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
4017 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
4018 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
4019
4020 /* Settings for calling functions in the inferior. */
4021 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
4022 tdep->infcall_ops = ia64_infcall_ops;
4023 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
4024 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
4025
4026 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4027 #ifdef HAVE_LIBUNWIND_IA64_H
4028 frame_unwind_append_unwinder (gdbarch,
4029 &ia64_libunwind_sigtramp_frame_unwind);
4030 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4031 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4032 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4033 #else
4034 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4035 #endif
4036 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4037 frame_base_set_default (gdbarch, &ia64_frame_base);
4038
4039 /* Settings that should be unnecessary. */
4040 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4041
4042 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4043 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4044 ia64_convert_from_func_ptr_addr);
4045
4046 /* The virtual table contains 16-byte descriptors, not pointers to
4047 descriptors. */
4048 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4049
4050 /* Hook in ABI-specific overrides, if they have been registered. */
4051 gdbarch_init_osabi (info, gdbarch);
4052
4053 return gdbarch;
4054 }
4055
4056 extern initialize_file_ftype _initialize_ia64_tdep; /* -Wmissing-prototypes */
4057
4058 void
4059 _initialize_ia64_tdep (void)
4060 {
4061 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4062 }
This page took 0.18006 seconds and 5 git commands to generate.