*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
4 2009, 2010, 2011 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "gdbcore.h"
24 #include "arch-utils.h"
25 #include "floatformat.h"
26 #include "gdbtypes.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "gdb_assert.h"
35 #include "objfiles.h"
36 #include "elf/common.h" /* for DT_PLTGOT value */
37 #include "elf-bfd.h"
38 #include "dis-asm.h"
39 #include "infcall.h"
40 #include "osabi.h"
41 #include "ia64-tdep.h"
42 #include "cp-abi.h"
43
44 #ifdef HAVE_LIBUNWIND_IA64_H
45 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
46 #include "libunwind-frame.h"
47 #include "libunwind-ia64.h"
48
49 /* Note: KERNEL_START is supposed to be an address which is not going
50 to ever contain any valid unwind info. For ia64 linux, the choice
51 of 0xc000000000000000 is fairly safe since that's uncached space.
52
53 We use KERNEL_START as follows: after obtaining the kernel's
54 unwind table via getunwind(), we project its unwind data into
55 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
56 when ia64_access_mem() sees a memory access to this
57 address-range, we redirect it to ktab instead.
58
59 None of this hackery is needed with a modern kernel/libcs
60 which uses the kernel virtual DSO to provide access to the
61 kernel's unwind info. In that case, ktab_size remains 0 and
62 hence the value of KERNEL_START doesn't matter. */
63
64 #define KERNEL_START 0xc000000000000000ULL
65
66 static size_t ktab_size = 0;
67 struct ia64_table_entry
68 {
69 uint64_t start_offset;
70 uint64_t end_offset;
71 uint64_t info_offset;
72 };
73
74 static struct ia64_table_entry *ktab = NULL;
75
76 #endif
77
78 /* An enumeration of the different IA-64 instruction types. */
79
80 typedef enum instruction_type
81 {
82 A, /* Integer ALU ; I-unit or M-unit */
83 I, /* Non-ALU integer; I-unit */
84 M, /* Memory ; M-unit */
85 F, /* Floating-point ; F-unit */
86 B, /* Branch ; B-unit */
87 L, /* Extended (L+X) ; I-unit */
88 X, /* Extended (L+X) ; I-unit */
89 undefined /* undefined or reserved */
90 } instruction_type;
91
92 /* We represent IA-64 PC addresses as the value of the instruction
93 pointer or'd with some bit combination in the low nibble which
94 represents the slot number in the bundle addressed by the
95 instruction pointer. The problem is that the Linux kernel
96 multiplies its slot numbers (for exceptions) by one while the
97 disassembler multiplies its slot numbers by 6. In addition, I've
98 heard it said that the simulator uses 1 as the multiplier.
99
100 I've fixed the disassembler so that the bytes_per_line field will
101 be the slot multiplier. If bytes_per_line comes in as zero, it
102 is set to six (which is how it was set up initially). -- objdump
103 displays pretty disassembly dumps with this value. For our purposes,
104 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
105 never want to also display the raw bytes the way objdump does. */
106
107 #define SLOT_MULTIPLIER 1
108
109 /* Length in bytes of an instruction bundle. */
110
111 #define BUNDLE_LEN 16
112
113 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
114
115 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
116 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
117 #endif
118
119 static gdbarch_init_ftype ia64_gdbarch_init;
120
121 static gdbarch_register_name_ftype ia64_register_name;
122 static gdbarch_register_type_ftype ia64_register_type;
123 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
124 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
125 static struct type *is_float_or_hfa_type (struct type *t);
126 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
127 CORE_ADDR faddr);
128
129 #define NUM_IA64_RAW_REGS 462
130
131 static int sp_regnum = IA64_GR12_REGNUM;
132 static int fp_regnum = IA64_VFP_REGNUM;
133 static int lr_regnum = IA64_VRAP_REGNUM;
134
135 /* NOTE: we treat the register stack registers r32-r127 as
136 pseudo-registers because they may not be accessible via the ptrace
137 register get/set interfaces. */
138
139 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
140 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
141 V127_REGNUM = V32_REGNUM + 95,
142 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
143 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
144
145 /* Array of register names; There should be ia64_num_regs strings in
146 the initializer. */
147
148 static char *ia64_register_names[] =
149 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
150 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
151 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
152 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164 "", "", "", "", "", "", "", "",
165
166 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
167 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
168 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
169 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
170 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
171 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
172 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
173 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
174 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
175 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
176 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
177 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
178 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
179 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
180 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
181 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
182
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190 "", "", "", "", "", "", "", "",
191
192 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
193
194 "vfp", "vrap",
195
196 "pr", "ip", "psr", "cfm",
197
198 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
199 "", "", "", "", "", "", "", "",
200 "rsc", "bsp", "bspstore", "rnat",
201 "", "fcr", "", "",
202 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
203 "ccv", "", "", "", "unat", "", "", "",
204 "fpsr", "", "", "", "itc",
205 "", "", "", "", "", "", "", "", "", "",
206 "", "", "", "", "", "", "", "", "",
207 "pfs", "lc", "ec",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "", "", "", "", "", "", "", "", "", "",
214 "",
215 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
216 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
217 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
218 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
219 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
220 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
221 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
222 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
223 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
224 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
225 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
226 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
227 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
228 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
229 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
230 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
231
232 "bof",
233
234 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
235 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
236 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
237 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
238 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
239 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
240 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
241 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
242 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
243 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
244 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
245 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
246
247 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
248 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
249 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
250 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
251 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
252 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
253 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
254 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
255 };
256
257 struct ia64_frame_cache
258 {
259 CORE_ADDR base; /* frame pointer base for frame */
260 CORE_ADDR pc; /* function start pc for frame */
261 CORE_ADDR saved_sp; /* stack pointer for frame */
262 CORE_ADDR bsp; /* points at r32 for the current frame */
263 CORE_ADDR cfm; /* cfm value for current frame */
264 CORE_ADDR prev_cfm; /* cfm value for previous frame */
265 int frameless;
266 int sof; /* Size of frame (decoded from cfm value). */
267 int sol; /* Size of locals (decoded from cfm value). */
268 int sor; /* Number of rotating registers (decoded from
269 cfm value). */
270 CORE_ADDR after_prologue;
271 /* Address of first instruction after the last
272 prologue instruction; Note that there may
273 be instructions from the function's body
274 intermingled with the prologue. */
275 int mem_stack_frame_size;
276 /* Size of the memory stack frame (may be zero),
277 or -1 if it has not been determined yet. */
278 int fp_reg; /* Register number (if any) used a frame pointer
279 for this frame. 0 if no register is being used
280 as the frame pointer. */
281
282 /* Saved registers. */
283 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
284
285 };
286
287 static int
288 floatformat_valid (const struct floatformat *fmt, const void *from)
289 {
290 return 1;
291 }
292
293 static const struct floatformat floatformat_ia64_ext_little =
294 {
295 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
296 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
297 };
298
299 static const struct floatformat floatformat_ia64_ext_big =
300 {
301 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
302 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
303 };
304
305 static const struct floatformat *floatformats_ia64_ext[2] =
306 {
307 &floatformat_ia64_ext_big,
308 &floatformat_ia64_ext_little
309 };
310
311 static struct type *
312 ia64_ext_type (struct gdbarch *gdbarch)
313 {
314 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
315
316 if (!tdep->ia64_ext_type)
317 tdep->ia64_ext_type
318 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
319 floatformats_ia64_ext);
320
321 return tdep->ia64_ext_type;
322 }
323
324 static int
325 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
326 struct reggroup *group)
327 {
328 int vector_p;
329 int float_p;
330 int raw_p;
331 if (group == all_reggroup)
332 return 1;
333 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
334 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
335 raw_p = regnum < NUM_IA64_RAW_REGS;
336 if (group == float_reggroup)
337 return float_p;
338 if (group == vector_reggroup)
339 return vector_p;
340 if (group == general_reggroup)
341 return (!vector_p && !float_p);
342 if (group == save_reggroup || group == restore_reggroup)
343 return raw_p;
344 return 0;
345 }
346
347 static const char *
348 ia64_register_name (struct gdbarch *gdbarch, int reg)
349 {
350 return ia64_register_names[reg];
351 }
352
353 struct type *
354 ia64_register_type (struct gdbarch *arch, int reg)
355 {
356 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
357 return ia64_ext_type (arch);
358 else
359 return builtin_type (arch)->builtin_long;
360 }
361
362 static int
363 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
364 {
365 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
366 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
367 return reg;
368 }
369
370
371 /* Extract ``len'' bits from an instruction bundle starting at
372 bit ``from''. */
373
374 static long long
375 extract_bit_field (const char *bundle, int from, int len)
376 {
377 long long result = 0LL;
378 int to = from + len;
379 int from_byte = from / 8;
380 int to_byte = to / 8;
381 unsigned char *b = (unsigned char *) bundle;
382 unsigned char c;
383 int lshift;
384 int i;
385
386 c = b[from_byte];
387 if (from_byte == to_byte)
388 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
389 result = c >> (from % 8);
390 lshift = 8 - (from % 8);
391
392 for (i = from_byte+1; i < to_byte; i++)
393 {
394 result |= ((long long) b[i]) << lshift;
395 lshift += 8;
396 }
397
398 if (from_byte < to_byte && (to % 8 != 0))
399 {
400 c = b[to_byte];
401 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
402 result |= ((long long) c) << lshift;
403 }
404
405 return result;
406 }
407
408 /* Replace the specified bits in an instruction bundle. */
409
410 static void
411 replace_bit_field (char *bundle, long long val, int from, int len)
412 {
413 int to = from + len;
414 int from_byte = from / 8;
415 int to_byte = to / 8;
416 unsigned char *b = (unsigned char *) bundle;
417 unsigned char c;
418
419 if (from_byte == to_byte)
420 {
421 unsigned char left, right;
422 c = b[from_byte];
423 left = (c >> (to % 8)) << (to % 8);
424 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
425 c = (unsigned char) (val & 0xff);
426 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
427 c |= right | left;
428 b[from_byte] = c;
429 }
430 else
431 {
432 int i;
433 c = b[from_byte];
434 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
435 c = c | (val << (from % 8));
436 b[from_byte] = c;
437 val >>= 8 - from % 8;
438
439 for (i = from_byte+1; i < to_byte; i++)
440 {
441 c = val & 0xff;
442 val >>= 8;
443 b[i] = c;
444 }
445
446 if (to % 8 != 0)
447 {
448 unsigned char cv = (unsigned char) val;
449 c = b[to_byte];
450 c = c >> (to % 8) << (to % 8);
451 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
452 b[to_byte] = c;
453 }
454 }
455 }
456
457 /* Return the contents of slot N (for N = 0, 1, or 2) in
458 and instruction bundle. */
459
460 static long long
461 slotN_contents (char *bundle, int slotnum)
462 {
463 return extract_bit_field (bundle, 5+41*slotnum, 41);
464 }
465
466 /* Store an instruction in an instruction bundle. */
467
468 static void
469 replace_slotN_contents (char *bundle, long long instr, int slotnum)
470 {
471 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
472 }
473
474 static const enum instruction_type template_encoding_table[32][3] =
475 {
476 { M, I, I }, /* 00 */
477 { M, I, I }, /* 01 */
478 { M, I, I }, /* 02 */
479 { M, I, I }, /* 03 */
480 { M, L, X }, /* 04 */
481 { M, L, X }, /* 05 */
482 { undefined, undefined, undefined }, /* 06 */
483 { undefined, undefined, undefined }, /* 07 */
484 { M, M, I }, /* 08 */
485 { M, M, I }, /* 09 */
486 { M, M, I }, /* 0A */
487 { M, M, I }, /* 0B */
488 { M, F, I }, /* 0C */
489 { M, F, I }, /* 0D */
490 { M, M, F }, /* 0E */
491 { M, M, F }, /* 0F */
492 { M, I, B }, /* 10 */
493 { M, I, B }, /* 11 */
494 { M, B, B }, /* 12 */
495 { M, B, B }, /* 13 */
496 { undefined, undefined, undefined }, /* 14 */
497 { undefined, undefined, undefined }, /* 15 */
498 { B, B, B }, /* 16 */
499 { B, B, B }, /* 17 */
500 { M, M, B }, /* 18 */
501 { M, M, B }, /* 19 */
502 { undefined, undefined, undefined }, /* 1A */
503 { undefined, undefined, undefined }, /* 1B */
504 { M, F, B }, /* 1C */
505 { M, F, B }, /* 1D */
506 { undefined, undefined, undefined }, /* 1E */
507 { undefined, undefined, undefined }, /* 1F */
508 };
509
510 /* Fetch and (partially) decode an instruction at ADDR and return the
511 address of the next instruction to fetch. */
512
513 static CORE_ADDR
514 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
515 {
516 char bundle[BUNDLE_LEN];
517 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
518 long long template;
519 int val;
520
521 /* Warn about slot numbers greater than 2. We used to generate
522 an error here on the assumption that the user entered an invalid
523 address. But, sometimes GDB itself requests an invalid address.
524 This can (easily) happen when execution stops in a function for
525 which there are no symbols. The prologue scanner will attempt to
526 find the beginning of the function - if the nearest symbol
527 happens to not be aligned on a bundle boundary (16 bytes), the
528 resulting starting address will cause GDB to think that the slot
529 number is too large.
530
531 So we warn about it and set the slot number to zero. It is
532 not necessarily a fatal condition, particularly if debugging
533 at the assembly language level. */
534 if (slotnum > 2)
535 {
536 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
537 "Using slot 0 instead"));
538 slotnum = 0;
539 }
540
541 addr &= ~0x0f;
542
543 val = target_read_memory (addr, bundle, BUNDLE_LEN);
544
545 if (val != 0)
546 return 0;
547
548 *instr = slotN_contents (bundle, slotnum);
549 template = extract_bit_field (bundle, 0, 5);
550 *it = template_encoding_table[(int)template][slotnum];
551
552 if (slotnum == 2 || (slotnum == 1 && *it == L))
553 addr += 16;
554 else
555 addr += (slotnum + 1) * SLOT_MULTIPLIER;
556
557 return addr;
558 }
559
560 /* There are 5 different break instructions (break.i, break.b,
561 break.m, break.f, and break.x), but they all have the same
562 encoding. (The five bit template in the low five bits of the
563 instruction bundle distinguishes one from another.)
564
565 The runtime architecture manual specifies that break instructions
566 used for debugging purposes must have the upper two bits of the 21
567 bit immediate set to a 0 and a 1 respectively. A breakpoint
568 instruction encodes the most significant bit of its 21 bit
569 immediate at bit 36 of the 41 bit instruction. The penultimate msb
570 is at bit 25 which leads to the pattern below.
571
572 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
573 it turns out that 0x80000 was used as the syscall break in the early
574 simulators. So I changed the pattern slightly to do "break.i 0x080001"
575 instead. But that didn't work either (I later found out that this
576 pattern was used by the simulator that I was using.) So I ended up
577 using the pattern seen below.
578
579 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
580 while we need bit-based addressing as the instructions length is 41 bits and
581 we must not modify/corrupt the adjacent slots in the same bundle.
582 Fortunately we may store larger memory incl. the adjacent bits with the
583 original memory content (not the possibly already stored breakpoints there).
584 We need to be careful in ia64_memory_remove_breakpoint to always restore
585 only the specific bits of this instruction ignoring any adjacent stored
586 bits.
587
588 We use the original addressing with the low nibble in the range <0..2> which
589 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
590 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
591 bytes just without these two possibly skipped bytes to not to exceed to the
592 next bundle.
593
594 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
595 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
596 In such case there is no other place where to store
597 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
598 SLOTNUM in ia64_memory_remove_breakpoint.
599
600 There is one special case where we need to be extra careful:
601 L-X instructions, which are instructions that occupy 2 slots
602 (The L part is always in slot 1, and the X part is always in
603 slot 2). We must refuse to insert breakpoints for an address
604 that points at slot 2 of a bundle where an L-X instruction is
605 present, since there is logically no instruction at that address.
606 However, to make things more interesting, the opcode of L-X
607 instructions is located in slot 2. This means that, to insert
608 a breakpoint at an address that points to slot 1, we actually
609 need to write the breakpoint in slot 2! Slot 1 is actually
610 the extended operand, so writing the breakpoint there would not
611 have the desired effect. Another side-effect of this issue
612 is that we need to make sure that the shadow contents buffer
613 does save byte 15 of our instruction bundle (this is the tail
614 end of slot 2, which wouldn't be saved if we were to insert
615 the breakpoint in slot 1).
616
617 ia64 16-byte bundle layout:
618 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
619
620 The current addressing used by the code below:
621 original PC placed_address placed_size required covered
622 == bp_tgt->shadow_len reqd \subset covered
623 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
624 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
625 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
626
627 L-X instructions are treated a little specially, as explained above:
628 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
629
630 `objdump -d' and some other tools show a bit unjustified offsets:
631 original PC byte where starts the instruction objdump offset
632 0xABCDE0 0xABCDE0 0xABCDE0
633 0xABCDE1 0xABCDE5 0xABCDE6
634 0xABCDE2 0xABCDEA 0xABCDEC
635 */
636
637 #define IA64_BREAKPOINT 0x00003333300LL
638
639 static int
640 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
641 struct bp_target_info *bp_tgt)
642 {
643 CORE_ADDR addr = bp_tgt->placed_address;
644 gdb_byte bundle[BUNDLE_LEN];
645 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
646 long long instr_breakpoint;
647 int val;
648 int template;
649 struct cleanup *cleanup;
650
651 if (slotnum > 2)
652 error (_("Can't insert breakpoint for slot numbers greater than 2."));
653
654 addr &= ~0x0f;
655
656 /* Enable the automatic memory restoration from breakpoints while
657 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
658 Otherwise, we could possibly store into the shadow parts of the adjacent
659 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
660 breakpoint instruction bits region. */
661 cleanup = make_show_memory_breakpoints_cleanup (0);
662 val = target_read_memory (addr, bundle, BUNDLE_LEN);
663 if (val != 0)
664 {
665 do_cleanups (cleanup);
666 return val;
667 }
668
669 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
670 for addressing the SHADOW_CONTENTS placement. */
671 shadow_slotnum = slotnum;
672
673 /* Always cover the last byte of the bundle in case we are inserting
674 a breakpoint on an L-X instruction. */
675 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
676
677 template = extract_bit_field (bundle, 0, 5);
678 if (template_encoding_table[template][slotnum] == X)
679 {
680 /* X unit types can only be used in slot 2, and are actually
681 part of a 2-slot L-X instruction. We cannot break at this
682 address, as this is the second half of an instruction that
683 lives in slot 1 of that bundle. */
684 gdb_assert (slotnum == 2);
685 error (_("Can't insert breakpoint for non-existing slot X"));
686 }
687 if (template_encoding_table[template][slotnum] == L)
688 {
689 /* L unit types can only be used in slot 1. But the associated
690 opcode for that instruction is in slot 2, so bump the slot number
691 accordingly. */
692 gdb_assert (slotnum == 1);
693 slotnum = 2;
694 }
695
696 /* Store the whole bundle, except for the initial skipped bytes by the slot
697 number interpreted as bytes offset in PLACED_ADDRESS. */
698 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
699 bp_tgt->shadow_len);
700
701 /* Re-read the same bundle as above except that, this time, read it in order
702 to compute the new bundle inside which we will be inserting the
703 breakpoint. Therefore, disable the automatic memory restoration from
704 breakpoints while we read our instruction bundle. Otherwise, the general
705 restoration mechanism kicks in and we would possibly remove parts of the
706 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
707 the real breakpoint instruction bits region. */
708 make_show_memory_breakpoints_cleanup (1);
709 val = target_read_memory (addr, bundle, BUNDLE_LEN);
710 if (val != 0)
711 {
712 do_cleanups (cleanup);
713 return val;
714 }
715
716 /* Breakpoints already present in the code will get deteacted and not get
717 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
718 location cannot induce the internal error as they are optimized into
719 a single instance by update_global_location_list. */
720 instr_breakpoint = slotN_contents (bundle, slotnum);
721 if (instr_breakpoint == IA64_BREAKPOINT)
722 internal_error (__FILE__, __LINE__,
723 _("Address %s already contains a breakpoint."),
724 paddress (gdbarch, bp_tgt->placed_address));
725 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
726
727 bp_tgt->placed_size = bp_tgt->shadow_len;
728
729 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
730 bp_tgt->shadow_len);
731
732 do_cleanups (cleanup);
733 return val;
734 }
735
736 static int
737 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
738 struct bp_target_info *bp_tgt)
739 {
740 CORE_ADDR addr = bp_tgt->placed_address;
741 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
742 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
743 long long instr_breakpoint, instr_saved;
744 int val;
745 int template;
746 struct cleanup *cleanup;
747
748 addr &= ~0x0f;
749
750 /* Disable the automatic memory restoration from breakpoints while
751 we read our instruction bundle. Otherwise, the general restoration
752 mechanism kicks in and we would possibly remove parts of the adjacent
753 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
754 breakpoint instruction bits region. */
755 cleanup = make_show_memory_breakpoints_cleanup (1);
756 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
757 if (val != 0)
758 {
759 do_cleanups (cleanup);
760 return val;
761 }
762
763 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
764 for addressing the SHADOW_CONTENTS placement. */
765 shadow_slotnum = slotnum;
766
767 template = extract_bit_field (bundle_mem, 0, 5);
768 if (template_encoding_table[template][slotnum] == X)
769 {
770 /* X unit types can only be used in slot 2, and are actually
771 part of a 2-slot L-X instruction. We refuse to insert
772 breakpoints at this address, so there should be no reason
773 for us attempting to remove one there, except if the program's
774 code somehow got modified in memory. */
775 gdb_assert (slotnum == 2);
776 warning (_("Cannot remove breakpoint at address %s from non-existing "
777 "X-type slot, memory has changed underneath"),
778 paddress (gdbarch, bp_tgt->placed_address));
779 do_cleanups (cleanup);
780 return -1;
781 }
782 if (template_encoding_table[template][slotnum] == L)
783 {
784 /* L unit types can only be used in slot 1. But the breakpoint
785 was actually saved using slot 2, so update the slot number
786 accordingly. */
787 gdb_assert (slotnum == 1);
788 slotnum = 2;
789 }
790
791 gdb_assert (bp_tgt->placed_size == BUNDLE_LEN - shadow_slotnum);
792 gdb_assert (bp_tgt->placed_size == bp_tgt->shadow_len);
793
794 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
795 if (instr_breakpoint != IA64_BREAKPOINT)
796 {
797 warning (_("Cannot remove breakpoint at address %s, "
798 "no break instruction at such address."),
799 paddress (gdbarch, bp_tgt->placed_address));
800 do_cleanups (cleanup);
801 return -1;
802 }
803
804 /* Extract the original saved instruction from SLOTNUM normalizing its
805 bit-shift for INSTR_SAVED. */
806 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
807 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
808 bp_tgt->shadow_len);
809 instr_saved = slotN_contents (bundle_saved, slotnum);
810
811 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
812 and not any of the other ones that are stored in SHADOW_CONTENTS. */
813 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
814 val = target_write_memory (addr, bundle_mem, BUNDLE_LEN);
815
816 do_cleanups (cleanup);
817 return val;
818 }
819
820 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
821 instruction slots ranges are bit-granular (41 bits) we have to provide an
822 extended range as described for ia64_memory_insert_breakpoint. We also take
823 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
824 make a match for permanent breakpoints. */
825
826 static const gdb_byte *
827 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
828 CORE_ADDR *pcptr, int *lenptr)
829 {
830 CORE_ADDR addr = *pcptr;
831 static gdb_byte bundle[BUNDLE_LEN];
832 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
833 long long instr_fetched;
834 int val;
835 int template;
836 struct cleanup *cleanup;
837
838 if (slotnum > 2)
839 error (_("Can't insert breakpoint for slot numbers greater than 2."));
840
841 addr &= ~0x0f;
842
843 /* Enable the automatic memory restoration from breakpoints while
844 we read our instruction bundle to match bp_loc_is_permanent. */
845 cleanup = make_show_memory_breakpoints_cleanup (0);
846 val = target_read_memory (addr, bundle, BUNDLE_LEN);
847 do_cleanups (cleanup);
848
849 /* The memory might be unreachable. This can happen, for instance,
850 when the user inserts a breakpoint at an invalid address. */
851 if (val != 0)
852 return NULL;
853
854 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
855 for addressing the SHADOW_CONTENTS placement. */
856 shadow_slotnum = slotnum;
857
858 /* Cover always the last byte of the bundle for the L-X slot case. */
859 *lenptr = BUNDLE_LEN - shadow_slotnum;
860
861 /* Check for L type instruction in slot 1, if present then bump up the slot
862 number to the slot 2. */
863 template = extract_bit_field (bundle, 0, 5);
864 if (template_encoding_table[template][slotnum] == X)
865 {
866 gdb_assert (slotnum == 2);
867 error (_("Can't insert breakpoint for non-existing slot X"));
868 }
869 if (template_encoding_table[template][slotnum] == L)
870 {
871 gdb_assert (slotnum == 1);
872 slotnum = 2;
873 }
874
875 /* A break instruction has its all its opcode bits cleared except for
876 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
877 we should not touch the L slot - the upper 41 bits of the parameter. */
878 instr_fetched = slotN_contents (bundle, slotnum);
879 instr_fetched &= 0x1003ffffc0LL;
880 replace_slotN_contents (bundle, instr_fetched, slotnum);
881
882 return bundle + shadow_slotnum;
883 }
884
885 static CORE_ADDR
886 ia64_read_pc (struct regcache *regcache)
887 {
888 ULONGEST psr_value, pc_value;
889 int slot_num;
890
891 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
892 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
893 slot_num = (psr_value >> 41) & 3;
894
895 return pc_value | (slot_num * SLOT_MULTIPLIER);
896 }
897
898 void
899 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
900 {
901 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
902 ULONGEST psr_value;
903
904 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
905 psr_value &= ~(3LL << 41);
906 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
907
908 new_pc &= ~0xfLL;
909
910 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
911 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
912 }
913
914 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
915
916 /* Returns the address of the slot that's NSLOTS slots away from
917 the address ADDR. NSLOTS may be positive or negative. */
918 static CORE_ADDR
919 rse_address_add(CORE_ADDR addr, int nslots)
920 {
921 CORE_ADDR new_addr;
922 int mandatory_nat_slots = nslots / 63;
923 int direction = nslots < 0 ? -1 : 1;
924
925 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
926
927 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
928 new_addr += 8 * direction;
929
930 if (IS_NaT_COLLECTION_ADDR(new_addr))
931 new_addr += 8 * direction;
932
933 return new_addr;
934 }
935
936 static void
937 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
938 int regnum, gdb_byte *buf)
939 {
940 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
941
942 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
943 {
944 #ifdef HAVE_LIBUNWIND_IA64_H
945 /* First try and use the libunwind special reg accessor,
946 otherwise fallback to standard logic. */
947 if (!libunwind_is_initialized ()
948 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
949 #endif
950 {
951 /* The fallback position is to assume that r32-r127 are
952 found sequentially in memory starting at $bof. This
953 isn't always true, but without libunwind, this is the
954 best we can do. */
955 ULONGEST cfm;
956 ULONGEST bsp;
957 CORE_ADDR reg;
958 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
959 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
960
961 /* The bsp points at the end of the register frame so we
962 subtract the size of frame from it to get start of
963 register frame. */
964 bsp = rse_address_add (bsp, -(cfm & 0x7f));
965
966 if ((cfm & 0x7f) > regnum - V32_REGNUM)
967 {
968 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
969 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
970 store_unsigned_integer (buf, register_size (gdbarch, regnum),
971 byte_order, reg);
972 }
973 else
974 store_unsigned_integer (buf, register_size (gdbarch, regnum),
975 byte_order, 0);
976 }
977 }
978 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
979 {
980 ULONGEST unatN_val;
981 ULONGEST unat;
982 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
983 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
984 store_unsigned_integer (buf, register_size (gdbarch, regnum),
985 byte_order, unatN_val);
986 }
987 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
988 {
989 ULONGEST natN_val = 0;
990 ULONGEST bsp;
991 ULONGEST cfm;
992 CORE_ADDR gr_addr = 0;
993 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
994 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
995
996 /* The bsp points at the end of the register frame so we
997 subtract the size of frame from it to get start of register frame. */
998 bsp = rse_address_add (bsp, -(cfm & 0x7f));
999
1000 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1001 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1002
1003 if (gr_addr != 0)
1004 {
1005 /* Compute address of nat collection bits. */
1006 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1007 CORE_ADDR nat_collection;
1008 int nat_bit;
1009 /* If our nat collection address is bigger than bsp, we have to get
1010 the nat collection from rnat. Otherwise, we fetch the nat
1011 collection from the computed address. */
1012 if (nat_addr >= bsp)
1013 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1014 &nat_collection);
1015 else
1016 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1017 nat_bit = (gr_addr >> 3) & 0x3f;
1018 natN_val = (nat_collection >> nat_bit) & 1;
1019 }
1020
1021 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1022 byte_order, natN_val);
1023 }
1024 else if (regnum == VBOF_REGNUM)
1025 {
1026 /* A virtual register frame start is provided for user convenience.
1027 It can be calculated as the bsp - sof (sizeof frame). */
1028 ULONGEST bsp, vbsp;
1029 ULONGEST cfm;
1030 CORE_ADDR reg;
1031 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1032 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1033
1034 /* The bsp points at the end of the register frame so we
1035 subtract the size of frame from it to get beginning of frame. */
1036 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1037 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1038 byte_order, vbsp);
1039 }
1040 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1041 {
1042 ULONGEST pr;
1043 ULONGEST cfm;
1044 ULONGEST prN_val;
1045 CORE_ADDR reg;
1046 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1047 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1048
1049 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1050 {
1051 /* Fetch predicate register rename base from current frame
1052 marker for this frame. */
1053 int rrb_pr = (cfm >> 32) & 0x3f;
1054
1055 /* Adjust the register number to account for register rotation. */
1056 regnum = VP16_REGNUM
1057 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1058 }
1059 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1060 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1061 byte_order, prN_val);
1062 }
1063 else
1064 memset (buf, 0, register_size (gdbarch, regnum));
1065 }
1066
1067 static void
1068 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1069 int regnum, const gdb_byte *buf)
1070 {
1071 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1072
1073 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1074 {
1075 ULONGEST bsp;
1076 ULONGEST cfm;
1077 CORE_ADDR reg;
1078 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1079 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1080
1081 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1082
1083 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1084 {
1085 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1086 write_memory (reg_addr, (void *) buf, 8);
1087 }
1088 }
1089 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1090 {
1091 ULONGEST unatN_val, unat, unatN_mask;
1092 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1093 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1094 regnum),
1095 byte_order);
1096 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1097 if (unatN_val == 0)
1098 unat &= ~unatN_mask;
1099 else if (unatN_val == 1)
1100 unat |= unatN_mask;
1101 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1102 }
1103 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1104 {
1105 ULONGEST natN_val;
1106 ULONGEST bsp;
1107 ULONGEST cfm;
1108 CORE_ADDR gr_addr = 0;
1109 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1110 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1111
1112 /* The bsp points at the end of the register frame so we
1113 subtract the size of frame from it to get start of register frame. */
1114 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1115
1116 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1117 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1118
1119 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1120 regnum),
1121 byte_order);
1122
1123 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1124 {
1125 /* Compute address of nat collection bits. */
1126 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1127 CORE_ADDR nat_collection;
1128 int natN_bit = (gr_addr >> 3) & 0x3f;
1129 ULONGEST natN_mask = (1LL << natN_bit);
1130 /* If our nat collection address is bigger than bsp, we have to get
1131 the nat collection from rnat. Otherwise, we fetch the nat
1132 collection from the computed address. */
1133 if (nat_addr >= bsp)
1134 {
1135 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1136 &nat_collection);
1137 if (natN_val)
1138 nat_collection |= natN_mask;
1139 else
1140 nat_collection &= ~natN_mask;
1141 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1142 nat_collection);
1143 }
1144 else
1145 {
1146 char nat_buf[8];
1147 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1148 if (natN_val)
1149 nat_collection |= natN_mask;
1150 else
1151 nat_collection &= ~natN_mask;
1152 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1153 byte_order, nat_collection);
1154 write_memory (nat_addr, nat_buf, 8);
1155 }
1156 }
1157 }
1158 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1159 {
1160 ULONGEST pr;
1161 ULONGEST cfm;
1162 ULONGEST prN_val;
1163 ULONGEST prN_mask;
1164
1165 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1166 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1167
1168 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1169 {
1170 /* Fetch predicate register rename base from current frame
1171 marker for this frame. */
1172 int rrb_pr = (cfm >> 32) & 0x3f;
1173
1174 /* Adjust the register number to account for register rotation. */
1175 regnum = VP16_REGNUM
1176 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1177 }
1178 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1179 byte_order);
1180 prN_mask = (1LL << (regnum - VP0_REGNUM));
1181 if (prN_val == 0)
1182 pr &= ~prN_mask;
1183 else if (prN_val == 1)
1184 pr |= prN_mask;
1185 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1186 }
1187 }
1188
1189 /* The ia64 needs to convert between various ieee floating-point formats
1190 and the special ia64 floating point register format. */
1191
1192 static int
1193 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1194 {
1195 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1196 && type != ia64_ext_type (gdbarch));
1197 }
1198
1199 static void
1200 ia64_register_to_value (struct frame_info *frame, int regnum,
1201 struct type *valtype, gdb_byte *out)
1202 {
1203 struct gdbarch *gdbarch = get_frame_arch (frame);
1204 char in[MAX_REGISTER_SIZE];
1205 frame_register_read (frame, regnum, in);
1206 convert_typed_floating (in, ia64_ext_type (gdbarch), out, valtype);
1207 }
1208
1209 static void
1210 ia64_value_to_register (struct frame_info *frame, int regnum,
1211 struct type *valtype, const gdb_byte *in)
1212 {
1213 struct gdbarch *gdbarch = get_frame_arch (frame);
1214 char out[MAX_REGISTER_SIZE];
1215 convert_typed_floating (in, valtype, out, ia64_ext_type (gdbarch));
1216 put_frame_register (frame, regnum, out);
1217 }
1218
1219
1220 /* Limit the number of skipped non-prologue instructions since examining
1221 of the prologue is expensive. */
1222 static int max_skip_non_prologue_insns = 40;
1223
1224 /* Given PC representing the starting address of a function, and
1225 LIM_PC which is the (sloppy) limit to which to scan when looking
1226 for a prologue, attempt to further refine this limit by using
1227 the line data in the symbol table. If successful, a better guess
1228 on where the prologue ends is returned, otherwise the previous
1229 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1230 which will be set to indicate whether the returned limit may be
1231 used with no further scanning in the event that the function is
1232 frameless. */
1233
1234 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1235 superseded by skip_prologue_using_sal. */
1236
1237 static CORE_ADDR
1238 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1239 {
1240 struct symtab_and_line prologue_sal;
1241 CORE_ADDR start_pc = pc;
1242 CORE_ADDR end_pc;
1243
1244 /* The prologue can not possibly go past the function end itself,
1245 so we can already adjust LIM_PC accordingly. */
1246 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1247 lim_pc = end_pc;
1248
1249 /* Start off not trusting the limit. */
1250 *trust_limit = 0;
1251
1252 prologue_sal = find_pc_line (pc, 0);
1253 if (prologue_sal.line != 0)
1254 {
1255 int i;
1256 CORE_ADDR addr = prologue_sal.end;
1257
1258 /* Handle the case in which compiler's optimizer/scheduler
1259 has moved instructions into the prologue. We scan ahead
1260 in the function looking for address ranges whose corresponding
1261 line number is less than or equal to the first one that we
1262 found for the function. (It can be less than when the
1263 scheduler puts a body instruction before the first prologue
1264 instruction.) */
1265 for (i = 2 * max_skip_non_prologue_insns;
1266 i > 0 && (lim_pc == 0 || addr < lim_pc);
1267 i--)
1268 {
1269 struct symtab_and_line sal;
1270
1271 sal = find_pc_line (addr, 0);
1272 if (sal.line == 0)
1273 break;
1274 if (sal.line <= prologue_sal.line
1275 && sal.symtab == prologue_sal.symtab)
1276 {
1277 prologue_sal = sal;
1278 }
1279 addr = sal.end;
1280 }
1281
1282 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1283 {
1284 lim_pc = prologue_sal.end;
1285 if (start_pc == get_pc_function_start (lim_pc))
1286 *trust_limit = 1;
1287 }
1288 }
1289 return lim_pc;
1290 }
1291
1292 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1293 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1294 || (14 <= (_regnum_) && (_regnum_) <= 31))
1295 #define imm9(_instr_) \
1296 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1297 | (((_instr_) & 0x00008000000LL) >> 20) \
1298 | (((_instr_) & 0x00000001fc0LL) >> 6))
1299
1300 /* Allocate and initialize a frame cache. */
1301
1302 static struct ia64_frame_cache *
1303 ia64_alloc_frame_cache (void)
1304 {
1305 struct ia64_frame_cache *cache;
1306 int i;
1307
1308 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1309
1310 /* Base address. */
1311 cache->base = 0;
1312 cache->pc = 0;
1313 cache->cfm = 0;
1314 cache->prev_cfm = 0;
1315 cache->sof = 0;
1316 cache->sol = 0;
1317 cache->sor = 0;
1318 cache->bsp = 0;
1319 cache->fp_reg = 0;
1320 cache->frameless = 1;
1321
1322 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1323 cache->saved_regs[i] = 0;
1324
1325 return cache;
1326 }
1327
1328 static CORE_ADDR
1329 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1330 struct frame_info *this_frame,
1331 struct ia64_frame_cache *cache)
1332 {
1333 CORE_ADDR next_pc;
1334 CORE_ADDR last_prologue_pc = pc;
1335 instruction_type it;
1336 long long instr;
1337 int cfm_reg = 0;
1338 int ret_reg = 0;
1339 int fp_reg = 0;
1340 int unat_save_reg = 0;
1341 int pr_save_reg = 0;
1342 int mem_stack_frame_size = 0;
1343 int spill_reg = 0;
1344 CORE_ADDR spill_addr = 0;
1345 char instores[8];
1346 char infpstores[8];
1347 char reg_contents[256];
1348 int trust_limit;
1349 int frameless = 1;
1350 int i;
1351 CORE_ADDR addr;
1352 char buf[8];
1353 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1354
1355 memset (instores, 0, sizeof instores);
1356 memset (infpstores, 0, sizeof infpstores);
1357 memset (reg_contents, 0, sizeof reg_contents);
1358
1359 if (cache->after_prologue != 0
1360 && cache->after_prologue <= lim_pc)
1361 return cache->after_prologue;
1362
1363 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1364 next_pc = fetch_instruction (pc, &it, &instr);
1365
1366 /* We want to check if we have a recognizable function start before we
1367 look ahead for a prologue. */
1368 if (pc < lim_pc && next_pc
1369 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1370 {
1371 /* alloc - start of a regular function. */
1372 int sor = (int) ((instr & 0x00078000000LL) >> 27);
1373 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1374 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1375 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1376
1377 /* Verify that the current cfm matches what we think is the
1378 function start. If we have somehow jumped within a function,
1379 we do not want to interpret the prologue and calculate the
1380 addresses of various registers such as the return address.
1381 We will instead treat the frame as frameless. */
1382 if (!this_frame ||
1383 (sof == (cache->cfm & 0x7f) &&
1384 sol == ((cache->cfm >> 7) & 0x7f)))
1385 frameless = 0;
1386
1387 cfm_reg = rN;
1388 last_prologue_pc = next_pc;
1389 pc = next_pc;
1390 }
1391 else
1392 {
1393 /* Look for a leaf routine. */
1394 if (pc < lim_pc && next_pc
1395 && (it == I || it == M)
1396 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1397 {
1398 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1399 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1400 | ((instr & 0x001f8000000LL) >> 20)
1401 | ((instr & 0x000000fe000LL) >> 13));
1402 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1403 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1404 int qp = (int) (instr & 0x0000000003fLL);
1405 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1406 {
1407 /* mov r2, r12 - beginning of leaf routine. */
1408 fp_reg = rN;
1409 last_prologue_pc = next_pc;
1410 }
1411 }
1412
1413 /* If we don't recognize a regular function or leaf routine, we are
1414 done. */
1415 if (!fp_reg)
1416 {
1417 pc = lim_pc;
1418 if (trust_limit)
1419 last_prologue_pc = lim_pc;
1420 }
1421 }
1422
1423 /* Loop, looking for prologue instructions, keeping track of
1424 where preserved registers were spilled. */
1425 while (pc < lim_pc)
1426 {
1427 next_pc = fetch_instruction (pc, &it, &instr);
1428 if (next_pc == 0)
1429 break;
1430
1431 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1432 {
1433 /* Exit loop upon hitting a non-nop branch instruction. */
1434 if (trust_limit)
1435 lim_pc = pc;
1436 break;
1437 }
1438 else if (((instr & 0x3fLL) != 0LL) &&
1439 (frameless || ret_reg != 0))
1440 {
1441 /* Exit loop upon hitting a predicated instruction if
1442 we already have the return register or if we are frameless. */
1443 if (trust_limit)
1444 lim_pc = pc;
1445 break;
1446 }
1447 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1448 {
1449 /* Move from BR */
1450 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1451 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1452 int qp = (int) (instr & 0x0000000003f);
1453
1454 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1455 {
1456 ret_reg = rN;
1457 last_prologue_pc = next_pc;
1458 }
1459 }
1460 else if ((it == I || it == M)
1461 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1462 {
1463 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1464 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1465 | ((instr & 0x001f8000000LL) >> 20)
1466 | ((instr & 0x000000fe000LL) >> 13));
1467 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1468 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1469 int qp = (int) (instr & 0x0000000003fLL);
1470
1471 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1472 {
1473 /* mov rN, r12 */
1474 fp_reg = rN;
1475 last_prologue_pc = next_pc;
1476 }
1477 else if (qp == 0 && rN == 12 && rM == 12)
1478 {
1479 /* adds r12, -mem_stack_frame_size, r12 */
1480 mem_stack_frame_size -= imm;
1481 last_prologue_pc = next_pc;
1482 }
1483 else if (qp == 0 && rN == 2
1484 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1485 {
1486 char buf[MAX_REGISTER_SIZE];
1487 CORE_ADDR saved_sp = 0;
1488 /* adds r2, spilloffset, rFramePointer
1489 or
1490 adds r2, spilloffset, r12
1491
1492 Get ready for stf.spill or st8.spill instructions.
1493 The address to start spilling at is loaded into r2.
1494 FIXME: Why r2? That's what gcc currently uses; it
1495 could well be different for other compilers. */
1496
1497 /* Hmm... whether or not this will work will depend on
1498 where the pc is. If it's still early in the prologue
1499 this'll be wrong. FIXME */
1500 if (this_frame)
1501 {
1502 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1503 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1504 get_frame_register (this_frame, sp_regnum, buf);
1505 saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1506 }
1507 spill_addr = saved_sp
1508 + (rM == 12 ? 0 : mem_stack_frame_size)
1509 + imm;
1510 spill_reg = rN;
1511 last_prologue_pc = next_pc;
1512 }
1513 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1514 rN < 256 && imm == 0)
1515 {
1516 /* mov rN, rM where rM is an input register. */
1517 reg_contents[rN] = rM;
1518 last_prologue_pc = next_pc;
1519 }
1520 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1521 rM == 2)
1522 {
1523 /* mov r12, r2 */
1524 last_prologue_pc = next_pc;
1525 break;
1526 }
1527 }
1528 else if (it == M
1529 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1530 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1531 {
1532 /* stf.spill [rN] = fM, imm9
1533 or
1534 stf.spill [rN] = fM */
1535
1536 int imm = imm9(instr);
1537 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1538 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1539 int qp = (int) (instr & 0x0000000003fLL);
1540 if (qp == 0 && rN == spill_reg && spill_addr != 0
1541 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1542 {
1543 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1544
1545 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1546 spill_addr += imm;
1547 else
1548 spill_addr = 0; /* last one; must be done. */
1549 last_prologue_pc = next_pc;
1550 }
1551 }
1552 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1553 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1554 {
1555 /* mov.m rN = arM
1556 or
1557 mov.i rN = arM */
1558
1559 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1560 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1561 int qp = (int) (instr & 0x0000000003fLL);
1562 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1563 {
1564 /* We have something like "mov.m r3 = ar.unat". Remember the
1565 r3 (or whatever) and watch for a store of this register... */
1566 unat_save_reg = rN;
1567 last_prologue_pc = next_pc;
1568 }
1569 }
1570 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1571 {
1572 /* mov rN = pr */
1573 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1574 int qp = (int) (instr & 0x0000000003fLL);
1575 if (qp == 0 && isScratch (rN))
1576 {
1577 pr_save_reg = rN;
1578 last_prologue_pc = next_pc;
1579 }
1580 }
1581 else if (it == M
1582 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1583 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1584 {
1585 /* st8 [rN] = rM
1586 or
1587 st8 [rN] = rM, imm9 */
1588 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1589 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1590 int qp = (int) (instr & 0x0000000003fLL);
1591 int indirect = rM < 256 ? reg_contents[rM] : 0;
1592 if (qp == 0 && rN == spill_reg && spill_addr != 0
1593 && (rM == unat_save_reg || rM == pr_save_reg))
1594 {
1595 /* We've found a spill of either the UNAT register or the PR
1596 register. (Well, not exactly; what we've actually found is
1597 a spill of the register that UNAT or PR was moved to).
1598 Record that fact and move on... */
1599 if (rM == unat_save_reg)
1600 {
1601 /* Track UNAT register. */
1602 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1603 unat_save_reg = 0;
1604 }
1605 else
1606 {
1607 /* Track PR register. */
1608 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1609 pr_save_reg = 0;
1610 }
1611 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1612 /* st8 [rN] = rM, imm9 */
1613 spill_addr += imm9(instr);
1614 else
1615 spill_addr = 0; /* Must be done spilling. */
1616 last_prologue_pc = next_pc;
1617 }
1618 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1619 {
1620 /* Allow up to one store of each input register. */
1621 instores[rM-32] = 1;
1622 last_prologue_pc = next_pc;
1623 }
1624 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1625 !instores[indirect-32])
1626 {
1627 /* Allow an indirect store of an input register. */
1628 instores[indirect-32] = 1;
1629 last_prologue_pc = next_pc;
1630 }
1631 }
1632 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1633 {
1634 /* One of
1635 st1 [rN] = rM
1636 st2 [rN] = rM
1637 st4 [rN] = rM
1638 st8 [rN] = rM
1639 Note that the st8 case is handled in the clause above.
1640
1641 Advance over stores of input registers. One store per input
1642 register is permitted. */
1643 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1644 int qp = (int) (instr & 0x0000000003fLL);
1645 int indirect = rM < 256 ? reg_contents[rM] : 0;
1646 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1647 {
1648 instores[rM-32] = 1;
1649 last_prologue_pc = next_pc;
1650 }
1651 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1652 !instores[indirect-32])
1653 {
1654 /* Allow an indirect store of an input register. */
1655 instores[indirect-32] = 1;
1656 last_prologue_pc = next_pc;
1657 }
1658 }
1659 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1660 {
1661 /* Either
1662 stfs [rN] = fM
1663 or
1664 stfd [rN] = fM
1665
1666 Advance over stores of floating point input registers. Again
1667 one store per register is permitted. */
1668 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1669 int qp = (int) (instr & 0x0000000003fLL);
1670 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1671 {
1672 infpstores[fM-8] = 1;
1673 last_prologue_pc = next_pc;
1674 }
1675 }
1676 else if (it == M
1677 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1678 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1679 {
1680 /* st8.spill [rN] = rM
1681 or
1682 st8.spill [rN] = rM, imm9 */
1683 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1684 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1685 int qp = (int) (instr & 0x0000000003fLL);
1686 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1687 {
1688 /* We've found a spill of one of the preserved general purpose
1689 regs. Record the spill address and advance the spill
1690 register if appropriate. */
1691 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1692 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1693 /* st8.spill [rN] = rM, imm9 */
1694 spill_addr += imm9(instr);
1695 else
1696 spill_addr = 0; /* Done spilling. */
1697 last_prologue_pc = next_pc;
1698 }
1699 }
1700
1701 pc = next_pc;
1702 }
1703
1704 /* If not frameless and we aren't called by skip_prologue, then we need
1705 to calculate registers for the previous frame which will be needed
1706 later. */
1707
1708 if (!frameless && this_frame)
1709 {
1710 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1711 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1712
1713 /* Extract the size of the rotating portion of the stack
1714 frame and the register rename base from the current
1715 frame marker. */
1716 cfm = cache->cfm;
1717 sor = cache->sor;
1718 sof = cache->sof;
1719 sol = cache->sol;
1720 rrb_gr = (cfm >> 18) & 0x7f;
1721
1722 /* Find the bof (beginning of frame). */
1723 bof = rse_address_add (cache->bsp, -sof);
1724
1725 for (i = 0, addr = bof;
1726 i < sof;
1727 i++, addr += 8)
1728 {
1729 if (IS_NaT_COLLECTION_ADDR (addr))
1730 {
1731 addr += 8;
1732 }
1733 if (i+32 == cfm_reg)
1734 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1735 if (i+32 == ret_reg)
1736 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1737 if (i+32 == fp_reg)
1738 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1739 }
1740
1741 /* For the previous argument registers we require the previous bof.
1742 If we can't find the previous cfm, then we can do nothing. */
1743 cfm = 0;
1744 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1745 {
1746 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1747 8, byte_order);
1748 }
1749 else if (cfm_reg != 0)
1750 {
1751 get_frame_register (this_frame, cfm_reg, buf);
1752 cfm = extract_unsigned_integer (buf, 8, byte_order);
1753 }
1754 cache->prev_cfm = cfm;
1755
1756 if (cfm != 0)
1757 {
1758 sor = ((cfm >> 14) & 0xf) * 8;
1759 sof = (cfm & 0x7f);
1760 sol = (cfm >> 7) & 0x7f;
1761 rrb_gr = (cfm >> 18) & 0x7f;
1762
1763 /* The previous bof only requires subtraction of the sol (size of
1764 locals) due to the overlap between output and input of
1765 subsequent frames. */
1766 bof = rse_address_add (bof, -sol);
1767
1768 for (i = 0, addr = bof;
1769 i < sof;
1770 i++, addr += 8)
1771 {
1772 if (IS_NaT_COLLECTION_ADDR (addr))
1773 {
1774 addr += 8;
1775 }
1776 if (i < sor)
1777 cache->saved_regs[IA64_GR32_REGNUM
1778 + ((i + (sor - rrb_gr)) % sor)]
1779 = addr;
1780 else
1781 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1782 }
1783
1784 }
1785 }
1786
1787 /* Try and trust the lim_pc value whenever possible. */
1788 if (trust_limit && lim_pc >= last_prologue_pc)
1789 last_prologue_pc = lim_pc;
1790
1791 cache->frameless = frameless;
1792 cache->after_prologue = last_prologue_pc;
1793 cache->mem_stack_frame_size = mem_stack_frame_size;
1794 cache->fp_reg = fp_reg;
1795
1796 return last_prologue_pc;
1797 }
1798
1799 CORE_ADDR
1800 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1801 {
1802 struct ia64_frame_cache cache;
1803 cache.base = 0;
1804 cache.after_prologue = 0;
1805 cache.cfm = 0;
1806 cache.bsp = 0;
1807
1808 /* Call examine_prologue with - as third argument since we don't
1809 have a next frame pointer to send. */
1810 return examine_prologue (pc, pc+1024, 0, &cache);
1811 }
1812
1813
1814 /* Normal frames. */
1815
1816 static struct ia64_frame_cache *
1817 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1818 {
1819 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1820 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1821 struct ia64_frame_cache *cache;
1822 char buf[8];
1823 CORE_ADDR cfm, sof, sol, bsp, psr;
1824 int i;
1825
1826 if (*this_cache)
1827 return *this_cache;
1828
1829 cache = ia64_alloc_frame_cache ();
1830 *this_cache = cache;
1831
1832 get_frame_register (this_frame, sp_regnum, buf);
1833 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1834
1835 /* We always want the bsp to point to the end of frame.
1836 This way, we can always get the beginning of frame (bof)
1837 by subtracting frame size. */
1838 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1839 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1840
1841 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1842 psr = extract_unsigned_integer (buf, 8, byte_order);
1843
1844 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1845 cfm = extract_unsigned_integer (buf, 8, byte_order);
1846
1847 cache->sof = (cfm & 0x7f);
1848 cache->sol = (cfm >> 7) & 0x7f;
1849 cache->sor = ((cfm >> 14) & 0xf) * 8;
1850
1851 cache->cfm = cfm;
1852
1853 cache->pc = get_frame_func (this_frame);
1854
1855 if (cache->pc != 0)
1856 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1857
1858 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1859
1860 return cache;
1861 }
1862
1863 static void
1864 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1865 struct frame_id *this_id)
1866 {
1867 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1868 struct ia64_frame_cache *cache =
1869 ia64_frame_cache (this_frame, this_cache);
1870
1871 /* If outermost frame, mark with null frame id. */
1872 if (cache->base != 0)
1873 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1874 if (gdbarch_debug >= 1)
1875 fprintf_unfiltered (gdb_stdlog,
1876 "regular frame id: code %s, stack %s, "
1877 "special %s, this_frame %s\n",
1878 paddress (gdbarch, this_id->code_addr),
1879 paddress (gdbarch, this_id->stack_addr),
1880 paddress (gdbarch, cache->bsp),
1881 host_address_to_string (this_frame));
1882 }
1883
1884 static struct value *
1885 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1886 int regnum)
1887 {
1888 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1889 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1890 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1891 char buf[8];
1892
1893 gdb_assert (regnum >= 0);
1894
1895 if (!target_has_registers)
1896 error (_("No registers."));
1897
1898 if (regnum == gdbarch_sp_regnum (gdbarch))
1899 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1900
1901 else if (regnum == IA64_BSP_REGNUM)
1902 {
1903 struct value *val;
1904 CORE_ADDR prev_cfm, bsp, prev_bsp;
1905
1906 /* We want to calculate the previous bsp as the end of the previous
1907 register stack frame. This corresponds to what the hardware bsp
1908 register will be if we pop the frame back which is why we might
1909 have been called. We know the beginning of the current frame is
1910 cache->bsp - cache->sof. This value in the previous frame points
1911 to the start of the output registers. We can calculate the end of
1912 that frame by adding the size of output:
1913 (sof (size of frame) - sol (size of locals)). */
1914 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1915 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1916 8, byte_order);
1917 bsp = rse_address_add (cache->bsp, -(cache->sof));
1918 prev_bsp =
1919 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1920
1921 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1922 }
1923
1924 else if (regnum == IA64_CFM_REGNUM)
1925 {
1926 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1927
1928 if (addr != 0)
1929 return frame_unwind_got_memory (this_frame, regnum, addr);
1930
1931 if (cache->prev_cfm)
1932 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1933
1934 if (cache->frameless)
1935 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1936 IA64_PFS_REGNUM);
1937 return frame_unwind_got_register (this_frame, regnum, 0);
1938 }
1939
1940 else if (regnum == IA64_VFP_REGNUM)
1941 {
1942 /* If the function in question uses an automatic register (r32-r127)
1943 for the frame pointer, it'll be found by ia64_find_saved_register()
1944 above. If the function lacks one of these frame pointers, we can
1945 still provide a value since we know the size of the frame. */
1946 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1947 }
1948
1949 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1950 {
1951 struct value *pr_val;
1952 ULONGEST prN;
1953
1954 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1955 IA64_PR_REGNUM);
1956 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1957 {
1958 /* Fetch predicate register rename base from current frame
1959 marker for this frame. */
1960 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1961
1962 /* Adjust the register number to account for register rotation. */
1963 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1964 }
1965 prN = extract_bit_field (value_contents_all (pr_val),
1966 regnum - VP0_REGNUM, 1);
1967 return frame_unwind_got_constant (this_frame, regnum, prN);
1968 }
1969
1970 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1971 {
1972 struct value *unat_val;
1973 ULONGEST unatN;
1974 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1975 IA64_UNAT_REGNUM);
1976 unatN = extract_bit_field (value_contents_all (unat_val),
1977 regnum - IA64_NAT0_REGNUM, 1);
1978 return frame_unwind_got_constant (this_frame, regnum, unatN);
1979 }
1980
1981 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1982 {
1983 int natval = 0;
1984 /* Find address of general register corresponding to nat bit we're
1985 interested in. */
1986 CORE_ADDR gr_addr;
1987
1988 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
1989
1990 if (gr_addr != 0)
1991 {
1992 /* Compute address of nat collection bits. */
1993 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1994 CORE_ADDR bsp;
1995 CORE_ADDR nat_collection;
1996 int nat_bit;
1997
1998 /* If our nat collection address is bigger than bsp, we have to get
1999 the nat collection from rnat. Otherwise, we fetch the nat
2000 collection from the computed address. */
2001 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2002 bsp = extract_unsigned_integer (buf, 8, byte_order);
2003 if (nat_addr >= bsp)
2004 {
2005 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2006 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2007 }
2008 else
2009 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2010 nat_bit = (gr_addr >> 3) & 0x3f;
2011 natval = (nat_collection >> nat_bit) & 1;
2012 }
2013
2014 return frame_unwind_got_constant (this_frame, regnum, natval);
2015 }
2016
2017 else if (regnum == IA64_IP_REGNUM)
2018 {
2019 CORE_ADDR pc = 0;
2020 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2021
2022 if (addr != 0)
2023 {
2024 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2025 pc = extract_unsigned_integer (buf, 8, byte_order);
2026 }
2027 else if (cache->frameless)
2028 {
2029 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2030 pc = extract_unsigned_integer (buf, 8, byte_order);
2031 }
2032 pc &= ~0xf;
2033 return frame_unwind_got_constant (this_frame, regnum, pc);
2034 }
2035
2036 else if (regnum == IA64_PSR_REGNUM)
2037 {
2038 /* We don't know how to get the complete previous PSR, but we need it
2039 for the slot information when we unwind the pc (pc is formed of IP
2040 register plus slot information from PSR). To get the previous
2041 slot information, we mask it off the return address. */
2042 ULONGEST slot_num = 0;
2043 CORE_ADDR pc = 0;
2044 CORE_ADDR psr = 0;
2045 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2046
2047 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2048 psr = extract_unsigned_integer (buf, 8, byte_order);
2049
2050 if (addr != 0)
2051 {
2052 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2053 pc = extract_unsigned_integer (buf, 8, byte_order);
2054 }
2055 else if (cache->frameless)
2056 {
2057 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2058 pc = extract_unsigned_integer (buf, 8, byte_order);
2059 }
2060 psr &= ~(3LL << 41);
2061 slot_num = pc & 0x3LL;
2062 psr |= (CORE_ADDR)slot_num << 41;
2063 return frame_unwind_got_constant (this_frame, regnum, psr);
2064 }
2065
2066 else if (regnum == IA64_BR0_REGNUM)
2067 {
2068 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2069
2070 if (addr != 0)
2071 return frame_unwind_got_memory (this_frame, regnum, addr);
2072
2073 return frame_unwind_got_constant (this_frame, regnum, 0);
2074 }
2075
2076 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2077 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2078 {
2079 CORE_ADDR addr = 0;
2080
2081 if (regnum >= V32_REGNUM)
2082 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2083 addr = cache->saved_regs[regnum];
2084 if (addr != 0)
2085 return frame_unwind_got_memory (this_frame, regnum, addr);
2086
2087 if (cache->frameless)
2088 {
2089 struct value *reg_val;
2090 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2091
2092 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2093 with the same code above? */
2094 if (regnum >= V32_REGNUM)
2095 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2096 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2097 IA64_CFM_REGNUM);
2098 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2099 8, byte_order);
2100 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2101 IA64_BSP_REGNUM);
2102 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2103 8, byte_order);
2104 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2105
2106 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2107 return frame_unwind_got_memory (this_frame, regnum, addr);
2108 }
2109
2110 return frame_unwind_got_constant (this_frame, regnum, 0);
2111 }
2112
2113 else /* All other registers. */
2114 {
2115 CORE_ADDR addr = 0;
2116
2117 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2118 {
2119 /* Fetch floating point register rename base from current
2120 frame marker for this frame. */
2121 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2122
2123 /* Adjust the floating point register number to account for
2124 register rotation. */
2125 regnum = IA64_FR32_REGNUM
2126 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2127 }
2128
2129 /* If we have stored a memory address, access the register. */
2130 addr = cache->saved_regs[regnum];
2131 if (addr != 0)
2132 return frame_unwind_got_memory (this_frame, regnum, addr);
2133 /* Otherwise, punt and get the current value of the register. */
2134 else
2135 return frame_unwind_got_register (this_frame, regnum, regnum);
2136 }
2137 }
2138
2139 static const struct frame_unwind ia64_frame_unwind =
2140 {
2141 NORMAL_FRAME,
2142 &ia64_frame_this_id,
2143 &ia64_frame_prev_register,
2144 NULL,
2145 default_frame_sniffer
2146 };
2147
2148 /* Signal trampolines. */
2149
2150 static void
2151 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2152 struct ia64_frame_cache *cache)
2153 {
2154 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2155 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2156
2157 if (tdep->sigcontext_register_address)
2158 {
2159 int regno;
2160
2161 cache->saved_regs[IA64_VRAP_REGNUM]
2162 = tdep->sigcontext_register_address (gdbarch, cache->base,
2163 IA64_IP_REGNUM);
2164 cache->saved_regs[IA64_CFM_REGNUM]
2165 = tdep->sigcontext_register_address (gdbarch, cache->base,
2166 IA64_CFM_REGNUM);
2167 cache->saved_regs[IA64_PSR_REGNUM]
2168 = tdep->sigcontext_register_address (gdbarch, cache->base,
2169 IA64_PSR_REGNUM);
2170 cache->saved_regs[IA64_BSP_REGNUM]
2171 = tdep->sigcontext_register_address (gdbarch, cache->base,
2172 IA64_BSP_REGNUM);
2173 cache->saved_regs[IA64_RNAT_REGNUM]
2174 = tdep->sigcontext_register_address (gdbarch, cache->base,
2175 IA64_RNAT_REGNUM);
2176 cache->saved_regs[IA64_CCV_REGNUM]
2177 = tdep->sigcontext_register_address (gdbarch, cache->base,
2178 IA64_CCV_REGNUM);
2179 cache->saved_regs[IA64_UNAT_REGNUM]
2180 = tdep->sigcontext_register_address (gdbarch, cache->base,
2181 IA64_UNAT_REGNUM);
2182 cache->saved_regs[IA64_FPSR_REGNUM]
2183 = tdep->sigcontext_register_address (gdbarch, cache->base,
2184 IA64_FPSR_REGNUM);
2185 cache->saved_regs[IA64_PFS_REGNUM]
2186 = tdep->sigcontext_register_address (gdbarch, cache->base,
2187 IA64_PFS_REGNUM);
2188 cache->saved_regs[IA64_LC_REGNUM]
2189 = tdep->sigcontext_register_address (gdbarch, cache->base,
2190 IA64_LC_REGNUM);
2191
2192 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2193 cache->saved_regs[regno] =
2194 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2195 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2196 cache->saved_regs[regno] =
2197 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2198 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2199 cache->saved_regs[regno] =
2200 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2201 }
2202 }
2203
2204 static struct ia64_frame_cache *
2205 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2206 {
2207 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2208 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2209 struct ia64_frame_cache *cache;
2210 CORE_ADDR addr;
2211 char buf[8];
2212 int i;
2213
2214 if (*this_cache)
2215 return *this_cache;
2216
2217 cache = ia64_alloc_frame_cache ();
2218
2219 get_frame_register (this_frame, sp_regnum, buf);
2220 /* Note that frame size is hard-coded below. We cannot calculate it
2221 via prologue examination. */
2222 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2223
2224 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2225 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2226
2227 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2228 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2229 cache->sof = cache->cfm & 0x7f;
2230
2231 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2232
2233 *this_cache = cache;
2234 return cache;
2235 }
2236
2237 static void
2238 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2239 void **this_cache, struct frame_id *this_id)
2240 {
2241 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2242 struct ia64_frame_cache *cache =
2243 ia64_sigtramp_frame_cache (this_frame, this_cache);
2244
2245 (*this_id) = frame_id_build_special (cache->base,
2246 get_frame_pc (this_frame),
2247 cache->bsp);
2248 if (gdbarch_debug >= 1)
2249 fprintf_unfiltered (gdb_stdlog,
2250 "sigtramp frame id: code %s, stack %s, "
2251 "special %s, this_frame %s\n",
2252 paddress (gdbarch, this_id->code_addr),
2253 paddress (gdbarch, this_id->stack_addr),
2254 paddress (gdbarch, cache->bsp),
2255 host_address_to_string (this_frame));
2256 }
2257
2258 static struct value *
2259 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2260 void **this_cache, int regnum)
2261 {
2262 char buf[MAX_REGISTER_SIZE];
2263
2264 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2265 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2266 struct ia64_frame_cache *cache =
2267 ia64_sigtramp_frame_cache (this_frame, this_cache);
2268
2269 gdb_assert (regnum >= 0);
2270
2271 if (!target_has_registers)
2272 error (_("No registers."));
2273
2274 if (regnum == IA64_IP_REGNUM)
2275 {
2276 CORE_ADDR pc = 0;
2277 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2278
2279 if (addr != 0)
2280 {
2281 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2282 pc = extract_unsigned_integer (buf, 8, byte_order);
2283 }
2284 pc &= ~0xf;
2285 return frame_unwind_got_constant (this_frame, regnum, pc);
2286 }
2287
2288 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2289 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2290 {
2291 CORE_ADDR addr = 0;
2292
2293 if (regnum >= V32_REGNUM)
2294 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2295 addr = cache->saved_regs[regnum];
2296 if (addr != 0)
2297 return frame_unwind_got_memory (this_frame, regnum, addr);
2298
2299 return frame_unwind_got_constant (this_frame, regnum, 0);
2300 }
2301
2302 else /* All other registers not listed above. */
2303 {
2304 CORE_ADDR addr = cache->saved_regs[regnum];
2305
2306 if (addr != 0)
2307 return frame_unwind_got_memory (this_frame, regnum, addr);
2308
2309 return frame_unwind_got_constant (this_frame, regnum, 0);
2310 }
2311 }
2312
2313 static int
2314 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2315 struct frame_info *this_frame,
2316 void **this_cache)
2317 {
2318 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2319 if (tdep->pc_in_sigtramp)
2320 {
2321 CORE_ADDR pc = get_frame_pc (this_frame);
2322
2323 if (tdep->pc_in_sigtramp (pc))
2324 return 1;
2325 }
2326
2327 return 0;
2328 }
2329
2330 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2331 {
2332 SIGTRAMP_FRAME,
2333 ia64_sigtramp_frame_this_id,
2334 ia64_sigtramp_frame_prev_register,
2335 NULL,
2336 ia64_sigtramp_frame_sniffer
2337 };
2338
2339 \f
2340
2341 static CORE_ADDR
2342 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2343 {
2344 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2345
2346 return cache->base;
2347 }
2348
2349 static const struct frame_base ia64_frame_base =
2350 {
2351 &ia64_frame_unwind,
2352 ia64_frame_base_address,
2353 ia64_frame_base_address,
2354 ia64_frame_base_address
2355 };
2356
2357 #ifdef HAVE_LIBUNWIND_IA64_H
2358
2359 struct ia64_unwind_table_entry
2360 {
2361 unw_word_t start_offset;
2362 unw_word_t end_offset;
2363 unw_word_t info_offset;
2364 };
2365
2366 static __inline__ uint64_t
2367 ia64_rse_slot_num (uint64_t addr)
2368 {
2369 return (addr >> 3) & 0x3f;
2370 }
2371
2372 /* Skip over a designated number of registers in the backing
2373 store, remembering every 64th position is for NAT. */
2374 static __inline__ uint64_t
2375 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2376 {
2377 long delta = ia64_rse_slot_num(addr) + num_regs;
2378
2379 if (num_regs < 0)
2380 delta -= 0x3e;
2381 return addr + ((num_regs + delta/0x3f) << 3);
2382 }
2383
2384 /* Gdb libunwind-frame callback function to convert from an ia64 gdb register
2385 number to a libunwind register number. */
2386 static int
2387 ia64_gdb2uw_regnum (int regnum)
2388 {
2389 if (regnum == sp_regnum)
2390 return UNW_IA64_SP;
2391 else if (regnum == IA64_BSP_REGNUM)
2392 return UNW_IA64_BSP;
2393 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2394 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2395 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2396 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2397 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2398 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2399 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2400 return -1;
2401 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2402 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2403 else if (regnum == IA64_PR_REGNUM)
2404 return UNW_IA64_PR;
2405 else if (regnum == IA64_IP_REGNUM)
2406 return UNW_REG_IP;
2407 else if (regnum == IA64_CFM_REGNUM)
2408 return UNW_IA64_CFM;
2409 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2410 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2411 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2412 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2413 else
2414 return -1;
2415 }
2416
2417 /* Gdb libunwind-frame callback function to convert from a libunwind register
2418 number to a ia64 gdb register number. */
2419 static int
2420 ia64_uw2gdb_regnum (int uw_regnum)
2421 {
2422 if (uw_regnum == UNW_IA64_SP)
2423 return sp_regnum;
2424 else if (uw_regnum == UNW_IA64_BSP)
2425 return IA64_BSP_REGNUM;
2426 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2427 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2428 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2429 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2430 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2431 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2432 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2433 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2434 else if (uw_regnum == UNW_IA64_PR)
2435 return IA64_PR_REGNUM;
2436 else if (uw_regnum == UNW_REG_IP)
2437 return IA64_IP_REGNUM;
2438 else if (uw_regnum == UNW_IA64_CFM)
2439 return IA64_CFM_REGNUM;
2440 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2441 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2442 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2443 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2444 else
2445 return -1;
2446 }
2447
2448 /* Gdb libunwind-frame callback function to reveal if register is a float
2449 register or not. */
2450 static int
2451 ia64_is_fpreg (int uw_regnum)
2452 {
2453 return unw_is_fpreg (uw_regnum);
2454 }
2455
2456 /* Libunwind callback accessor function for general registers. */
2457 static int
2458 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2459 int write, void *arg)
2460 {
2461 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2462 unw_word_t bsp, sof, sol, cfm, psr, ip;
2463 struct frame_info *this_frame = arg;
2464 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2465 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2466 long new_sof, old_sof;
2467 char buf[MAX_REGISTER_SIZE];
2468
2469 /* We never call any libunwind routines that need to write registers. */
2470 gdb_assert (!write);
2471
2472 switch (uw_regnum)
2473 {
2474 case UNW_REG_IP:
2475 /* Libunwind expects to see the pc value which means the slot number
2476 from the psr must be merged with the ip word address. */
2477 get_frame_register (this_frame, IA64_IP_REGNUM, buf);
2478 ip = extract_unsigned_integer (buf, 8, byte_order);
2479 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2480 psr = extract_unsigned_integer (buf, 8, byte_order);
2481 *val = ip | ((psr >> 41) & 0x3);
2482 break;
2483
2484 case UNW_IA64_AR_BSP:
2485 /* Libunwind expects to see the beginning of the current
2486 register frame so we must account for the fact that
2487 ptrace() will return a value for bsp that points *after*
2488 the current register frame. */
2489 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2490 bsp = extract_unsigned_integer (buf, 8, byte_order);
2491 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2492 cfm = extract_unsigned_integer (buf, 8, byte_order);
2493 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2494 *val = ia64_rse_skip_regs (bsp, -sof);
2495 break;
2496
2497 case UNW_IA64_AR_BSPSTORE:
2498 /* Libunwind wants bspstore to be after the current register frame.
2499 This is what ptrace() and gdb treats as the regular bsp value. */
2500 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2501 *val = extract_unsigned_integer (buf, 8, byte_order);
2502 break;
2503
2504 default:
2505 /* For all other registers, just unwind the value directly. */
2506 get_frame_register (this_frame, regnum, buf);
2507 *val = extract_unsigned_integer (buf, 8, byte_order);
2508 break;
2509 }
2510
2511 if (gdbarch_debug >= 1)
2512 fprintf_unfiltered (gdb_stdlog,
2513 " access_reg: from cache: %4s=%s\n",
2514 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2515 ? ia64_register_names[regnum] : "r??"),
2516 paddress (gdbarch, *val));
2517 return 0;
2518 }
2519
2520 /* Libunwind callback accessor function for floating-point registers. */
2521 static int
2522 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2523 unw_fpreg_t *val, int write, void *arg)
2524 {
2525 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2526 struct frame_info *this_frame = arg;
2527
2528 /* We never call any libunwind routines that need to write registers. */
2529 gdb_assert (!write);
2530
2531 get_frame_register (this_frame, regnum, (char *) val);
2532
2533 return 0;
2534 }
2535
2536 /* Libunwind callback accessor function for top-level rse registers. */
2537 static int
2538 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2539 unw_word_t *val, int write, void *arg)
2540 {
2541 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2542 unw_word_t bsp, sof, sol, cfm, psr, ip;
2543 struct regcache *regcache = arg;
2544 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2545 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2546 long new_sof, old_sof;
2547 char buf[MAX_REGISTER_SIZE];
2548
2549 /* We never call any libunwind routines that need to write registers. */
2550 gdb_assert (!write);
2551
2552 switch (uw_regnum)
2553 {
2554 case UNW_REG_IP:
2555 /* Libunwind expects to see the pc value which means the slot number
2556 from the psr must be merged with the ip word address. */
2557 regcache_cooked_read (regcache, IA64_IP_REGNUM, buf);
2558 ip = extract_unsigned_integer (buf, 8, byte_order);
2559 regcache_cooked_read (regcache, IA64_PSR_REGNUM, buf);
2560 psr = extract_unsigned_integer (buf, 8, byte_order);
2561 *val = ip | ((psr >> 41) & 0x3);
2562 break;
2563
2564 case UNW_IA64_AR_BSP:
2565 /* Libunwind expects to see the beginning of the current
2566 register frame so we must account for the fact that
2567 ptrace() will return a value for bsp that points *after*
2568 the current register frame. */
2569 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2570 bsp = extract_unsigned_integer (buf, 8, byte_order);
2571 regcache_cooked_read (regcache, IA64_CFM_REGNUM, buf);
2572 cfm = extract_unsigned_integer (buf, 8, byte_order);
2573 sof = (cfm & 0x7f);
2574 *val = ia64_rse_skip_regs (bsp, -sof);
2575 break;
2576
2577 case UNW_IA64_AR_BSPSTORE:
2578 /* Libunwind wants bspstore to be after the current register frame.
2579 This is what ptrace() and gdb treats as the regular bsp value. */
2580 regcache_cooked_read (regcache, IA64_BSP_REGNUM, buf);
2581 *val = extract_unsigned_integer (buf, 8, byte_order);
2582 break;
2583
2584 default:
2585 /* For all other registers, just unwind the value directly. */
2586 regcache_cooked_read (regcache, regnum, buf);
2587 *val = extract_unsigned_integer (buf, 8, byte_order);
2588 break;
2589 }
2590
2591 if (gdbarch_debug >= 1)
2592 fprintf_unfiltered (gdb_stdlog,
2593 " access_rse_reg: from cache: %4s=%s\n",
2594 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2595 ? ia64_register_names[regnum] : "r??"),
2596 paddress (gdbarch, *val));
2597
2598 return 0;
2599 }
2600
2601 /* Libunwind callback accessor function for top-level fp registers. */
2602 static int
2603 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2604 unw_fpreg_t *val, int write, void *arg)
2605 {
2606 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2607 struct regcache *regcache = arg;
2608
2609 /* We never call any libunwind routines that need to write registers. */
2610 gdb_assert (!write);
2611
2612 regcache_cooked_read (regcache, regnum, (char *) val);
2613
2614 return 0;
2615 }
2616
2617 /* Libunwind callback accessor function for accessing memory. */
2618 static int
2619 ia64_access_mem (unw_addr_space_t as,
2620 unw_word_t addr, unw_word_t *val,
2621 int write, void *arg)
2622 {
2623 if (addr - KERNEL_START < ktab_size)
2624 {
2625 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2626 + (addr - KERNEL_START));
2627
2628 if (write)
2629 *laddr = *val;
2630 else
2631 *val = *laddr;
2632 return 0;
2633 }
2634
2635 /* XXX do we need to normalize byte-order here? */
2636 if (write)
2637 return target_write_memory (addr, (char *) val, sizeof (unw_word_t));
2638 else
2639 return target_read_memory (addr, (char *) val, sizeof (unw_word_t));
2640 }
2641
2642 /* Call low-level function to access the kernel unwind table. */
2643 static LONGEST
2644 getunwind_table (gdb_byte **buf_p)
2645 {
2646 LONGEST x;
2647
2648 /* FIXME drow/2005-09-10: This code used to call
2649 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2650 for the currently running ia64-linux kernel. That data should
2651 come from the core file and be accessed via the auxv vector; if
2652 we want to preserve fall back to the running kernel's table, then
2653 we should find a way to override the corefile layer's
2654 xfer_partial method. */
2655
2656 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2657 NULL, buf_p);
2658
2659 return x;
2660 }
2661
2662 /* Get the kernel unwind table. */
2663 static int
2664 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2665 {
2666 static struct ia64_table_entry *etab;
2667
2668 if (!ktab)
2669 {
2670 gdb_byte *ktab_buf;
2671 LONGEST size;
2672
2673 size = getunwind_table (&ktab_buf);
2674 if (size <= 0)
2675 return -UNW_ENOINFO;
2676
2677 ktab = (struct ia64_table_entry *) ktab_buf;
2678 ktab_size = size;
2679
2680 for (etab = ktab; etab->start_offset; ++etab)
2681 etab->info_offset += KERNEL_START;
2682 }
2683
2684 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2685 return -UNW_ENOINFO;
2686
2687 di->format = UNW_INFO_FORMAT_TABLE;
2688 di->gp = 0;
2689 di->start_ip = ktab[0].start_offset;
2690 di->end_ip = etab[-1].end_offset;
2691 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2692 di->u.ti.segbase = 0;
2693 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2694 di->u.ti.table_data = (unw_word_t *) ktab;
2695
2696 if (gdbarch_debug >= 1)
2697 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2698 "segbase=%s, length=%s, gp=%s\n",
2699 (char *) di->u.ti.name_ptr,
2700 hex_string (di->u.ti.segbase),
2701 pulongest (di->u.ti.table_len),
2702 hex_string (di->gp));
2703 return 0;
2704 }
2705
2706 /* Find the unwind table entry for a specified address. */
2707 static int
2708 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2709 unw_dyn_info_t *dip, void **buf)
2710 {
2711 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2712 Elf_Internal_Ehdr *ehdr;
2713 unw_word_t segbase = 0;
2714 CORE_ADDR load_base;
2715 bfd *bfd;
2716 int i;
2717
2718 bfd = objfile->obfd;
2719
2720 ehdr = elf_tdata (bfd)->elf_header;
2721 phdr = elf_tdata (bfd)->phdr;
2722
2723 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2724
2725 for (i = 0; i < ehdr->e_phnum; ++i)
2726 {
2727 switch (phdr[i].p_type)
2728 {
2729 case PT_LOAD:
2730 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2731 < phdr[i].p_memsz)
2732 p_text = phdr + i;
2733 break;
2734
2735 case PT_IA_64_UNWIND:
2736 p_unwind = phdr + i;
2737 break;
2738
2739 default:
2740 break;
2741 }
2742 }
2743
2744 if (!p_text || !p_unwind)
2745 return -UNW_ENOINFO;
2746
2747 /* Verify that the segment that contains the IP also contains
2748 the static unwind table. If not, we may be in the Linux kernel's
2749 DSO gate page in which case the unwind table is another segment.
2750 Otherwise, we are dealing with runtime-generated code, for which we
2751 have no info here. */
2752 segbase = p_text->p_vaddr + load_base;
2753
2754 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2755 {
2756 int ok = 0;
2757 for (i = 0; i < ehdr->e_phnum; ++i)
2758 {
2759 if (phdr[i].p_type == PT_LOAD
2760 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2761 {
2762 ok = 1;
2763 /* Get the segbase from the section containing the
2764 libunwind table. */
2765 segbase = phdr[i].p_vaddr + load_base;
2766 }
2767 }
2768 if (!ok)
2769 return -UNW_ENOINFO;
2770 }
2771
2772 dip->start_ip = p_text->p_vaddr + load_base;
2773 dip->end_ip = dip->start_ip + p_text->p_memsz;
2774 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2775 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2776 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2777 dip->u.rti.segbase = segbase;
2778 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2779 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2780
2781 return 0;
2782 }
2783
2784 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2785 static int
2786 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2787 int need_unwind_info, void *arg)
2788 {
2789 struct obj_section *sec = find_pc_section (ip);
2790 unw_dyn_info_t di;
2791 int ret;
2792 void *buf = NULL;
2793
2794 if (!sec)
2795 {
2796 /* XXX This only works if the host and the target architecture are
2797 both ia64 and if the have (more or less) the same kernel
2798 version. */
2799 if (get_kernel_table (ip, &di) < 0)
2800 return -UNW_ENOINFO;
2801
2802 if (gdbarch_debug >= 1)
2803 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2804 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2805 "length=%s,data=%s)\n",
2806 hex_string (ip), (char *)di.u.ti.name_ptr,
2807 hex_string (di.u.ti.segbase),
2808 hex_string (di.start_ip), hex_string (di.end_ip),
2809 hex_string (di.gp),
2810 pulongest (di.u.ti.table_len),
2811 hex_string ((CORE_ADDR)di.u.ti.table_data));
2812 }
2813 else
2814 {
2815 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2816 if (ret < 0)
2817 return ret;
2818
2819 if (gdbarch_debug >= 1)
2820 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2821 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2822 "length=%s,data=%s)\n",
2823 hex_string (ip), (char *)di.u.rti.name_ptr,
2824 hex_string (di.u.rti.segbase),
2825 hex_string (di.start_ip), hex_string (di.end_ip),
2826 hex_string (di.gp),
2827 pulongest (di.u.rti.table_len),
2828 hex_string (di.u.rti.table_data));
2829 }
2830
2831 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2832 arg);
2833
2834 /* We no longer need the dyn info storage so free it. */
2835 xfree (buf);
2836
2837 return ret;
2838 }
2839
2840 /* Libunwind callback accessor function for cleanup. */
2841 static void
2842 ia64_put_unwind_info (unw_addr_space_t as,
2843 unw_proc_info_t *pip, void *arg)
2844 {
2845 /* Nothing required for now. */
2846 }
2847
2848 /* Libunwind callback accessor function to get head of the dynamic
2849 unwind-info registration list. */
2850 static int
2851 ia64_get_dyn_info_list (unw_addr_space_t as,
2852 unw_word_t *dilap, void *arg)
2853 {
2854 struct obj_section *text_sec;
2855 struct objfile *objfile;
2856 unw_word_t ip, addr;
2857 unw_dyn_info_t di;
2858 int ret;
2859
2860 if (!libunwind_is_initialized ())
2861 return -UNW_ENOINFO;
2862
2863 for (objfile = object_files; objfile; objfile = objfile->next)
2864 {
2865 void *buf = NULL;
2866
2867 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2868 ip = obj_section_addr (text_sec);
2869 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2870 if (ret >= 0)
2871 {
2872 addr = libunwind_find_dyn_list (as, &di, arg);
2873 /* We no longer need the dyn info storage so free it. */
2874 xfree (buf);
2875
2876 if (addr)
2877 {
2878 if (gdbarch_debug >= 1)
2879 fprintf_unfiltered (gdb_stdlog,
2880 "dynamic unwind table in objfile %s "
2881 "at %s (gp=%s)\n",
2882 bfd_get_filename (objfile->obfd),
2883 hex_string (addr), hex_string (di.gp));
2884 *dilap = addr;
2885 return 0;
2886 }
2887 }
2888 }
2889 return -UNW_ENOINFO;
2890 }
2891
2892
2893 /* Frame interface functions for libunwind. */
2894
2895 static void
2896 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2897 struct frame_id *this_id)
2898 {
2899 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2900 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2901 struct frame_id id = outer_frame_id;
2902 char buf[8];
2903 CORE_ADDR bsp;
2904
2905 libunwind_frame_this_id (this_frame, this_cache, &id);
2906 if (frame_id_eq (id, outer_frame_id))
2907 {
2908 (*this_id) = outer_frame_id;
2909 return;
2910 }
2911
2912 /* We must add the bsp as the special address for frame comparison
2913 purposes. */
2914 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2915 bsp = extract_unsigned_integer (buf, 8, byte_order);
2916
2917 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2918
2919 if (gdbarch_debug >= 1)
2920 fprintf_unfiltered (gdb_stdlog,
2921 "libunwind frame id: code %s, stack %s, "
2922 "special %s, this_frame %s\n",
2923 paddress (gdbarch, id.code_addr),
2924 paddress (gdbarch, id.stack_addr),
2925 paddress (gdbarch, bsp),
2926 host_address_to_string (this_frame));
2927 }
2928
2929 static struct value *
2930 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2931 void **this_cache, int regnum)
2932 {
2933 int reg = regnum;
2934 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2935 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2936 struct value *val;
2937
2938 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2939 reg = IA64_PR_REGNUM;
2940 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2941 reg = IA64_UNAT_REGNUM;
2942
2943 /* Let libunwind do most of the work. */
2944 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2945
2946 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2947 {
2948 ULONGEST prN_val;
2949
2950 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2951 {
2952 int rrb_pr = 0;
2953 ULONGEST cfm;
2954 unsigned char buf[MAX_REGISTER_SIZE];
2955
2956 /* Fetch predicate register rename base from current frame
2957 marker for this frame. */
2958 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2959 cfm = extract_unsigned_integer (buf, 8, byte_order);
2960 rrb_pr = (cfm >> 32) & 0x3f;
2961
2962 /* Adjust the register number to account for register rotation. */
2963 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2964 }
2965 prN_val = extract_bit_field (value_contents_all (val),
2966 regnum - VP0_REGNUM, 1);
2967 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2968 }
2969
2970 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2971 {
2972 ULONGEST unatN_val;
2973
2974 unatN_val = extract_bit_field (value_contents_all (val),
2975 regnum - IA64_NAT0_REGNUM, 1);
2976 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2977 }
2978
2979 else if (regnum == IA64_BSP_REGNUM)
2980 {
2981 struct value *cfm_val;
2982 CORE_ADDR prev_bsp, prev_cfm;
2983
2984 /* We want to calculate the previous bsp as the end of the previous
2985 register stack frame. This corresponds to what the hardware bsp
2986 register will be if we pop the frame back which is why we might
2987 have been called. We know that libunwind will pass us back the
2988 beginning of the current frame so we should just add sof to it. */
2989 prev_bsp = extract_unsigned_integer (value_contents_all (val),
2990 8, byte_order);
2991 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2992 IA64_CFM_REGNUM);
2993 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
2994 8, byte_order);
2995 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2996
2997 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2998 }
2999 else
3000 return val;
3001 }
3002
3003 static int
3004 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3005 struct frame_info *this_frame,
3006 void **this_cache)
3007 {
3008 if (libunwind_is_initialized ()
3009 && libunwind_frame_sniffer (self, this_frame, this_cache))
3010 return 1;
3011
3012 return 0;
3013 }
3014
3015 static const struct frame_unwind ia64_libunwind_frame_unwind =
3016 {
3017 NORMAL_FRAME,
3018 ia64_libunwind_frame_this_id,
3019 ia64_libunwind_frame_prev_register,
3020 NULL,
3021 ia64_libunwind_frame_sniffer,
3022 libunwind_frame_dealloc_cache
3023 };
3024
3025 static void
3026 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3027 void **this_cache,
3028 struct frame_id *this_id)
3029 {
3030 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3031 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3032 char buf[8];
3033 CORE_ADDR bsp;
3034 struct frame_id id = outer_frame_id;
3035 CORE_ADDR prev_ip;
3036
3037 libunwind_frame_this_id (this_frame, this_cache, &id);
3038 if (frame_id_eq (id, outer_frame_id))
3039 {
3040 (*this_id) = outer_frame_id;
3041 return;
3042 }
3043
3044 /* We must add the bsp as the special address for frame comparison
3045 purposes. */
3046 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3047 bsp = extract_unsigned_integer (buf, 8, byte_order);
3048
3049 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3050 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3051
3052 if (gdbarch_debug >= 1)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "libunwind sigtramp frame id: code %s, "
3055 "stack %s, special %s, this_frame %s\n",
3056 paddress (gdbarch, id.code_addr),
3057 paddress (gdbarch, id.stack_addr),
3058 paddress (gdbarch, bsp),
3059 host_address_to_string (this_frame));
3060 }
3061
3062 static struct value *
3063 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3064 void **this_cache, int regnum)
3065 {
3066 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3067 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3068 struct value *prev_ip_val;
3069 CORE_ADDR prev_ip;
3070
3071 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3072 method of getting previous registers. */
3073 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3074 IA64_IP_REGNUM);
3075 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3076 8, byte_order);
3077
3078 if (prev_ip == 0)
3079 {
3080 void *tmp_cache = NULL;
3081 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3082 regnum);
3083 }
3084 else
3085 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3086 }
3087
3088 static int
3089 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3090 struct frame_info *this_frame,
3091 void **this_cache)
3092 {
3093 if (libunwind_is_initialized ())
3094 {
3095 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3096 return 1;
3097 return 0;
3098 }
3099 else
3100 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3101 }
3102
3103 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3104 {
3105 SIGTRAMP_FRAME,
3106 ia64_libunwind_sigtramp_frame_this_id,
3107 ia64_libunwind_sigtramp_frame_prev_register,
3108 NULL,
3109 ia64_libunwind_sigtramp_frame_sniffer
3110 };
3111
3112 /* Set of libunwind callback acccessor functions. */
3113 static unw_accessors_t ia64_unw_accessors =
3114 {
3115 ia64_find_proc_info_x,
3116 ia64_put_unwind_info,
3117 ia64_get_dyn_info_list,
3118 ia64_access_mem,
3119 ia64_access_reg,
3120 ia64_access_fpreg,
3121 /* resume */
3122 /* get_proc_name */
3123 };
3124
3125 /* Set of special libunwind callback acccessor functions specific for accessing
3126 the rse registers. At the top of the stack, we want libunwind to figure out
3127 how to read r32 - r127. Though usually they are found sequentially in
3128 memory starting from $bof, this is not always true. */
3129 static unw_accessors_t ia64_unw_rse_accessors =
3130 {
3131 ia64_find_proc_info_x,
3132 ia64_put_unwind_info,
3133 ia64_get_dyn_info_list,
3134 ia64_access_mem,
3135 ia64_access_rse_reg,
3136 ia64_access_rse_fpreg,
3137 /* resume */
3138 /* get_proc_name */
3139 };
3140
3141 /* Set of ia64 gdb libunwind-frame callbacks and data for generic
3142 libunwind-frame code to use. */
3143 static struct libunwind_descr ia64_libunwind_descr =
3144 {
3145 ia64_gdb2uw_regnum,
3146 ia64_uw2gdb_regnum,
3147 ia64_is_fpreg,
3148 &ia64_unw_accessors,
3149 &ia64_unw_rse_accessors,
3150 };
3151
3152 #endif /* HAVE_LIBUNWIND_IA64_H */
3153
3154 static int
3155 ia64_use_struct_convention (struct type *type)
3156 {
3157 struct type *float_elt_type;
3158
3159 /* Don't use the struct convention for anything but structure,
3160 union, or array types. */
3161 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3162 || TYPE_CODE (type) == TYPE_CODE_UNION
3163 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3164 return 0;
3165
3166 /* HFAs are structures (or arrays) consisting entirely of floating
3167 point values of the same length. Up to 8 of these are returned
3168 in registers. Don't use the struct convention when this is the
3169 case. */
3170 float_elt_type = is_float_or_hfa_type (type);
3171 if (float_elt_type != NULL
3172 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3173 return 0;
3174
3175 /* Other structs of length 32 or less are returned in r8-r11.
3176 Don't use the struct convention for those either. */
3177 return TYPE_LENGTH (type) > 32;
3178 }
3179
3180 /* Return non-zero if TYPE is a structure or union type. */
3181
3182 static int
3183 ia64_struct_type_p (const struct type *type)
3184 {
3185 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3186 || TYPE_CODE (type) == TYPE_CODE_UNION);
3187 }
3188
3189 static void
3190 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3191 gdb_byte *valbuf)
3192 {
3193 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3194 struct type *float_elt_type;
3195
3196 float_elt_type = is_float_or_hfa_type (type);
3197 if (float_elt_type != NULL)
3198 {
3199 char from[MAX_REGISTER_SIZE];
3200 int offset = 0;
3201 int regnum = IA64_FR8_REGNUM;
3202 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3203
3204 while (n-- > 0)
3205 {
3206 regcache_cooked_read (regcache, regnum, from);
3207 convert_typed_floating (from, ia64_ext_type (gdbarch),
3208 (char *)valbuf + offset, float_elt_type);
3209 offset += TYPE_LENGTH (float_elt_type);
3210 regnum++;
3211 }
3212 }
3213 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3214 {
3215 /* This is an integral value, and its size is less than 8 bytes.
3216 These values are LSB-aligned, so extract the relevant bytes,
3217 and copy them into VALBUF. */
3218 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3219 so I suppose we should also add handling here for integral values
3220 whose size is greater than 8. But I wasn't able to create such
3221 a type, neither in C nor in Ada, so not worrying about these yet. */
3222 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3223 ULONGEST val;
3224
3225 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3226 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3227 }
3228 else
3229 {
3230 ULONGEST val;
3231 int offset = 0;
3232 int regnum = IA64_GR8_REGNUM;
3233 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3234 int n = TYPE_LENGTH (type) / reglen;
3235 int m = TYPE_LENGTH (type) % reglen;
3236
3237 while (n-- > 0)
3238 {
3239 ULONGEST val;
3240 regcache_cooked_read_unsigned (regcache, regnum, &val);
3241 memcpy ((char *)valbuf + offset, &val, reglen);
3242 offset += reglen;
3243 regnum++;
3244 }
3245
3246 if (m)
3247 {
3248 regcache_cooked_read_unsigned (regcache, regnum, &val);
3249 memcpy ((char *)valbuf + offset, &val, m);
3250 }
3251 }
3252 }
3253
3254 static void
3255 ia64_store_return_value (struct type *type, struct regcache *regcache,
3256 const gdb_byte *valbuf)
3257 {
3258 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3259 struct type *float_elt_type;
3260
3261 float_elt_type = is_float_or_hfa_type (type);
3262 if (float_elt_type != NULL)
3263 {
3264 char to[MAX_REGISTER_SIZE];
3265 int offset = 0;
3266 int regnum = IA64_FR8_REGNUM;
3267 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3268
3269 while (n-- > 0)
3270 {
3271 convert_typed_floating ((char *)valbuf + offset, float_elt_type,
3272 to, ia64_ext_type (gdbarch));
3273 regcache_cooked_write (regcache, regnum, to);
3274 offset += TYPE_LENGTH (float_elt_type);
3275 regnum++;
3276 }
3277 }
3278 else
3279 {
3280 ULONGEST val;
3281 int offset = 0;
3282 int regnum = IA64_GR8_REGNUM;
3283 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3284 int n = TYPE_LENGTH (type) / reglen;
3285 int m = TYPE_LENGTH (type) % reglen;
3286
3287 while (n-- > 0)
3288 {
3289 ULONGEST val;
3290 memcpy (&val, (char *)valbuf + offset, reglen);
3291 regcache_cooked_write_unsigned (regcache, regnum, val);
3292 offset += reglen;
3293 regnum++;
3294 }
3295
3296 if (m)
3297 {
3298 memcpy (&val, (char *)valbuf + offset, m);
3299 regcache_cooked_write_unsigned (regcache, regnum, val);
3300 }
3301 }
3302 }
3303
3304 static enum return_value_convention
3305 ia64_return_value (struct gdbarch *gdbarch, struct type *func_type,
3306 struct type *valtype, struct regcache *regcache,
3307 gdb_byte *readbuf, const gdb_byte *writebuf)
3308 {
3309 int struct_return = ia64_use_struct_convention (valtype);
3310
3311 if (writebuf != NULL)
3312 {
3313 gdb_assert (!struct_return);
3314 ia64_store_return_value (valtype, regcache, writebuf);
3315 }
3316
3317 if (readbuf != NULL)
3318 {
3319 gdb_assert (!struct_return);
3320 ia64_extract_return_value (valtype, regcache, readbuf);
3321 }
3322
3323 if (struct_return)
3324 return RETURN_VALUE_STRUCT_CONVENTION;
3325 else
3326 return RETURN_VALUE_REGISTER_CONVENTION;
3327 }
3328
3329 static int
3330 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3331 {
3332 switch (TYPE_CODE (t))
3333 {
3334 case TYPE_CODE_FLT:
3335 if (*etp)
3336 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3337 else
3338 {
3339 *etp = t;
3340 return 1;
3341 }
3342 break;
3343 case TYPE_CODE_ARRAY:
3344 return
3345 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3346 etp);
3347 break;
3348 case TYPE_CODE_STRUCT:
3349 {
3350 int i;
3351
3352 for (i = 0; i < TYPE_NFIELDS (t); i++)
3353 if (!is_float_or_hfa_type_recurse
3354 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3355 return 0;
3356 return 1;
3357 }
3358 break;
3359 default:
3360 return 0;
3361 break;
3362 }
3363 }
3364
3365 /* Determine if the given type is one of the floating point types or
3366 and HFA (which is a struct, array, or combination thereof whose
3367 bottom-most elements are all of the same floating point type). */
3368
3369 static struct type *
3370 is_float_or_hfa_type (struct type *t)
3371 {
3372 struct type *et = 0;
3373
3374 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3375 }
3376
3377
3378 /* Return 1 if the alignment of T is such that the next even slot
3379 should be used. Return 0, if the next available slot should
3380 be used. (See section 8.5.1 of the IA-64 Software Conventions
3381 and Runtime manual). */
3382
3383 static int
3384 slot_alignment_is_next_even (struct type *t)
3385 {
3386 switch (TYPE_CODE (t))
3387 {
3388 case TYPE_CODE_INT:
3389 case TYPE_CODE_FLT:
3390 if (TYPE_LENGTH (t) > 8)
3391 return 1;
3392 else
3393 return 0;
3394 case TYPE_CODE_ARRAY:
3395 return
3396 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3397 case TYPE_CODE_STRUCT:
3398 {
3399 int i;
3400
3401 for (i = 0; i < TYPE_NFIELDS (t); i++)
3402 if (slot_alignment_is_next_even
3403 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3404 return 1;
3405 return 0;
3406 }
3407 default:
3408 return 0;
3409 }
3410 }
3411
3412 /* Attempt to find (and return) the global pointer for the given
3413 function.
3414
3415 This is a rather nasty bit of code searchs for the .dynamic section
3416 in the objfile corresponding to the pc of the function we're trying
3417 to call. Once it finds the addresses at which the .dynamic section
3418 lives in the child process, it scans the Elf64_Dyn entries for a
3419 DT_PLTGOT tag. If it finds one of these, the corresponding
3420 d_un.d_ptr value is the global pointer. */
3421
3422 static CORE_ADDR
3423 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3424 CORE_ADDR faddr)
3425 {
3426 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3427 struct obj_section *faddr_sect;
3428
3429 faddr_sect = find_pc_section (faddr);
3430 if (faddr_sect != NULL)
3431 {
3432 struct obj_section *osect;
3433
3434 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3435 {
3436 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3437 break;
3438 }
3439
3440 if (osect < faddr_sect->objfile->sections_end)
3441 {
3442 CORE_ADDR addr, endaddr;
3443
3444 addr = obj_section_addr (osect);
3445 endaddr = obj_section_endaddr (osect);
3446
3447 while (addr < endaddr)
3448 {
3449 int status;
3450 LONGEST tag;
3451 char buf[8];
3452
3453 status = target_read_memory (addr, buf, sizeof (buf));
3454 if (status != 0)
3455 break;
3456 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3457
3458 if (tag == DT_PLTGOT)
3459 {
3460 CORE_ADDR global_pointer;
3461
3462 status = target_read_memory (addr + 8, buf, sizeof (buf));
3463 if (status != 0)
3464 break;
3465 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3466 byte_order);
3467
3468 /* The payoff... */
3469 return global_pointer;
3470 }
3471
3472 if (tag == DT_NULL)
3473 break;
3474
3475 addr += 16;
3476 }
3477 }
3478 }
3479 return 0;
3480 }
3481
3482 /* Attempt to find (and return) the global pointer for the given
3483 function. We first try the find_global_pointer_from_solib routine
3484 from the gdbarch tdep vector, if provided. And if that does not
3485 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3486
3487 static CORE_ADDR
3488 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3489 {
3490 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3491 CORE_ADDR addr = 0;
3492
3493 if (tdep->find_global_pointer_from_solib)
3494 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3495 if (addr == 0)
3496 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3497 return addr;
3498 }
3499
3500 /* Given a function's address, attempt to find (and return) the
3501 corresponding (canonical) function descriptor. Return 0 if
3502 not found. */
3503 static CORE_ADDR
3504 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3505 {
3506 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3507 struct obj_section *faddr_sect;
3508
3509 /* Return early if faddr is already a function descriptor. */
3510 faddr_sect = find_pc_section (faddr);
3511 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3512 return faddr;
3513
3514 if (faddr_sect != NULL)
3515 {
3516 struct obj_section *osect;
3517 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3518 {
3519 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3520 break;
3521 }
3522
3523 if (osect < faddr_sect->objfile->sections_end)
3524 {
3525 CORE_ADDR addr, endaddr;
3526
3527 addr = obj_section_addr (osect);
3528 endaddr = obj_section_endaddr (osect);
3529
3530 while (addr < endaddr)
3531 {
3532 int status;
3533 LONGEST faddr2;
3534 char buf[8];
3535
3536 status = target_read_memory (addr, buf, sizeof (buf));
3537 if (status != 0)
3538 break;
3539 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3540
3541 if (faddr == faddr2)
3542 return addr;
3543
3544 addr += 16;
3545 }
3546 }
3547 }
3548 return 0;
3549 }
3550
3551 /* Attempt to find a function descriptor corresponding to the
3552 given address. If none is found, construct one on the
3553 stack using the address at fdaptr. */
3554
3555 static CORE_ADDR
3556 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3557 {
3558 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3559 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3560 CORE_ADDR fdesc;
3561
3562 fdesc = find_extant_func_descr (gdbarch, faddr);
3563
3564 if (fdesc == 0)
3565 {
3566 ULONGEST global_pointer;
3567 char buf[16];
3568
3569 fdesc = *fdaptr;
3570 *fdaptr += 16;
3571
3572 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3573
3574 if (global_pointer == 0)
3575 regcache_cooked_read_unsigned (regcache,
3576 IA64_GR1_REGNUM, &global_pointer);
3577
3578 store_unsigned_integer (buf, 8, byte_order, faddr);
3579 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3580
3581 write_memory (fdesc, buf, 16);
3582 }
3583
3584 return fdesc;
3585 }
3586
3587 /* Use the following routine when printing out function pointers
3588 so the user can see the function address rather than just the
3589 function descriptor. */
3590 static CORE_ADDR
3591 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3592 struct target_ops *targ)
3593 {
3594 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3595 struct obj_section *s;
3596 gdb_byte buf[8];
3597
3598 s = find_pc_section (addr);
3599
3600 /* check if ADDR points to a function descriptor. */
3601 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3602 return read_memory_unsigned_integer (addr, 8, byte_order);
3603
3604 /* Normally, functions live inside a section that is executable.
3605 So, if ADDR points to a non-executable section, then treat it
3606 as a function descriptor and return the target address iff
3607 the target address itself points to a section that is executable.
3608 Check first the memory of the whole length of 8 bytes is readable. */
3609 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3610 && target_read_memory (addr, buf, 8) == 0)
3611 {
3612 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3613 struct obj_section *pc_section = find_pc_section (pc);
3614
3615 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3616 return pc;
3617 }
3618
3619 /* There are also descriptors embedded in vtables. */
3620 if (s)
3621 {
3622 struct minimal_symbol *minsym;
3623
3624 minsym = lookup_minimal_symbol_by_pc (addr);
3625
3626 if (minsym && is_vtable_name (SYMBOL_LINKAGE_NAME (minsym)))
3627 return read_memory_unsigned_integer (addr, 8, byte_order);
3628 }
3629
3630 return addr;
3631 }
3632
3633 static CORE_ADDR
3634 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3635 {
3636 return sp & ~0xfLL;
3637 }
3638
3639 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3640
3641 static void
3642 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3643 {
3644 ULONGEST cfm, pfs, new_bsp;
3645
3646 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3647
3648 new_bsp = rse_address_add (bsp, sof);
3649 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3650
3651 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3652 pfs &= 0xc000000000000000LL;
3653 pfs |= (cfm & 0xffffffffffffLL);
3654 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3655
3656 cfm &= 0xc000000000000000LL;
3657 cfm |= sof;
3658 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3659 }
3660
3661 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3662 ia64. */
3663
3664 static void
3665 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3666 int slotnum, gdb_byte *buf)
3667 {
3668 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3669 }
3670
3671 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3672
3673 static void
3674 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3675 {
3676 /* Nothing needed. */
3677 }
3678
3679 static CORE_ADDR
3680 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3681 struct regcache *regcache, CORE_ADDR bp_addr,
3682 int nargs, struct value **args, CORE_ADDR sp,
3683 int struct_return, CORE_ADDR struct_addr)
3684 {
3685 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3686 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3687 int argno;
3688 struct value *arg;
3689 struct type *type;
3690 int len, argoffset;
3691 int nslots, rseslots, memslots, slotnum, nfuncargs;
3692 int floatreg;
3693 ULONGEST bsp;
3694 CORE_ADDR funcdescaddr, pc, global_pointer;
3695 CORE_ADDR func_addr = find_function_addr (function, NULL);
3696
3697 nslots = 0;
3698 nfuncargs = 0;
3699 /* Count the number of slots needed for the arguments. */
3700 for (argno = 0; argno < nargs; argno++)
3701 {
3702 arg = args[argno];
3703 type = check_typedef (value_type (arg));
3704 len = TYPE_LENGTH (type);
3705
3706 if ((nslots & 1) && slot_alignment_is_next_even (type))
3707 nslots++;
3708
3709 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3710 nfuncargs++;
3711
3712 nslots += (len + 7) / 8;
3713 }
3714
3715 /* Divvy up the slots between the RSE and the memory stack. */
3716 rseslots = (nslots > 8) ? 8 : nslots;
3717 memslots = nslots - rseslots;
3718
3719 /* Allocate a new RSE frame. */
3720 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3721 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3722
3723 /* We will attempt to find function descriptors in the .opd segment,
3724 but if we can't we'll construct them ourselves. That being the
3725 case, we'll need to reserve space on the stack for them. */
3726 funcdescaddr = sp - nfuncargs * 16;
3727 funcdescaddr &= ~0xfLL;
3728
3729 /* Adjust the stack pointer to it's new value. The calling conventions
3730 require us to have 16 bytes of scratch, plus whatever space is
3731 necessary for the memory slots and our function descriptors. */
3732 sp = sp - 16 - (memslots + nfuncargs) * 8;
3733 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3734
3735 /* Place the arguments where they belong. The arguments will be
3736 either placed in the RSE backing store or on the memory stack.
3737 In addition, floating point arguments or HFAs are placed in
3738 floating point registers. */
3739 slotnum = 0;
3740 floatreg = IA64_FR8_REGNUM;
3741 for (argno = 0; argno < nargs; argno++)
3742 {
3743 struct type *float_elt_type;
3744
3745 arg = args[argno];
3746 type = check_typedef (value_type (arg));
3747 len = TYPE_LENGTH (type);
3748
3749 /* Special handling for function parameters. */
3750 if (len == 8
3751 && TYPE_CODE (type) == TYPE_CODE_PTR
3752 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3753 {
3754 char val_buf[8];
3755 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3756 8, byte_order);
3757 store_unsigned_integer (val_buf, 8, byte_order,
3758 find_func_descr (regcache, faddr,
3759 &funcdescaddr));
3760 if (slotnum < rseslots)
3761 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3762 slotnum, val_buf);
3763 else
3764 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3765 slotnum++;
3766 continue;
3767 }
3768
3769 /* Normal slots. */
3770
3771 /* Skip odd slot if necessary... */
3772 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3773 slotnum++;
3774
3775 argoffset = 0;
3776 while (len > 0)
3777 {
3778 char val_buf[8];
3779
3780 memset (val_buf, 0, 8);
3781 if (!ia64_struct_type_p (type) && len < 8)
3782 {
3783 /* Integral types are LSB-aligned, so we have to be careful
3784 to insert the argument on the correct side of the buffer.
3785 This is why we use store_unsigned_integer. */
3786 store_unsigned_integer
3787 (val_buf, 8, byte_order,
3788 extract_unsigned_integer (value_contents (arg), len,
3789 byte_order));
3790 }
3791 else
3792 {
3793 /* This is either an 8bit integral type, or an aggregate.
3794 For 8bit integral type, there is no problem, we just
3795 copy the value over.
3796
3797 For aggregates, the only potentially tricky portion
3798 is to write the last one if it is less than 8 bytes.
3799 In this case, the data is Byte0-aligned. Happy news,
3800 this means that we don't need to differentiate the
3801 handling of 8byte blocks and less-than-8bytes blocks. */
3802 memcpy (val_buf, value_contents (arg) + argoffset,
3803 (len > 8) ? 8 : len);
3804 }
3805
3806 if (slotnum < rseslots)
3807 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3808 slotnum, val_buf);
3809 else
3810 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3811
3812 argoffset += 8;
3813 len -= 8;
3814 slotnum++;
3815 }
3816
3817 /* Handle floating point types (including HFAs). */
3818 float_elt_type = is_float_or_hfa_type (type);
3819 if (float_elt_type != NULL)
3820 {
3821 argoffset = 0;
3822 len = TYPE_LENGTH (type);
3823 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3824 {
3825 char to[MAX_REGISTER_SIZE];
3826 convert_typed_floating (value_contents (arg) + argoffset,
3827 float_elt_type, to,
3828 ia64_ext_type (gdbarch));
3829 regcache_cooked_write (regcache, floatreg, (void *)to);
3830 floatreg++;
3831 argoffset += TYPE_LENGTH (float_elt_type);
3832 len -= TYPE_LENGTH (float_elt_type);
3833 }
3834 }
3835 }
3836
3837 /* Store the struct return value in r8 if necessary. */
3838 if (struct_return)
3839 {
3840 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3841 (ULONGEST) struct_addr);
3842 }
3843
3844 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3845
3846 if (global_pointer != 0)
3847 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3848
3849 /* The following is not necessary on HP-UX, because we're using
3850 a dummy code sequence pushed on the stack to make the call, and
3851 this sequence doesn't need b0 to be set in order for our dummy
3852 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3853 it's needed for other OSes, so we do this unconditionaly. */
3854 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3855
3856 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3857
3858 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3859
3860 return sp;
3861 }
3862
3863 static const struct ia64_infcall_ops ia64_infcall_ops =
3864 {
3865 ia64_allocate_new_rse_frame,
3866 ia64_store_argument_in_slot,
3867 ia64_set_function_addr
3868 };
3869
3870 static struct frame_id
3871 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3872 {
3873 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3874 char buf[8];
3875 CORE_ADDR sp, bsp;
3876
3877 get_frame_register (this_frame, sp_regnum, buf);
3878 sp = extract_unsigned_integer (buf, 8, byte_order);
3879
3880 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3881 bsp = extract_unsigned_integer (buf, 8, byte_order);
3882
3883 if (gdbarch_debug >= 1)
3884 fprintf_unfiltered (gdb_stdlog,
3885 "dummy frame id: code %s, stack %s, special %s\n",
3886 paddress (gdbarch, get_frame_pc (this_frame)),
3887 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3888
3889 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3890 }
3891
3892 static CORE_ADDR
3893 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3894 {
3895 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3896 char buf[8];
3897 CORE_ADDR ip, psr, pc;
3898
3899 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3900 ip = extract_unsigned_integer (buf, 8, byte_order);
3901 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3902 psr = extract_unsigned_integer (buf, 8, byte_order);
3903
3904 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3905 return pc;
3906 }
3907
3908 static int
3909 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3910 {
3911 info->bytes_per_line = SLOT_MULTIPLIER;
3912 return print_insn_ia64 (memaddr, info);
3913 }
3914
3915 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3916
3917 static int
3918 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3919 {
3920 return (cfm & 0x7f);
3921 }
3922
3923 static struct gdbarch *
3924 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3925 {
3926 struct gdbarch *gdbarch;
3927 struct gdbarch_tdep *tdep;
3928
3929 /* If there is already a candidate, use it. */
3930 arches = gdbarch_list_lookup_by_info (arches, &info);
3931 if (arches != NULL)
3932 return arches->gdbarch;
3933
3934 tdep = xzalloc (sizeof (struct gdbarch_tdep));
3935 gdbarch = gdbarch_alloc (&info, tdep);
3936
3937 tdep->size_of_register_frame = ia64_size_of_register_frame;
3938
3939 /* According to the ia64 specs, instructions that store long double
3940 floats in memory use a long-double format different than that
3941 used in the floating registers. The memory format matches the
3942 x86 extended float format which is 80 bits. An OS may choose to
3943 use this format (e.g. GNU/Linux) or choose to use a different
3944 format for storing long doubles (e.g. HPUX). In the latter case,
3945 the setting of the format may be moved/overridden in an
3946 OS-specific tdep file. */
3947 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3948
3949 set_gdbarch_short_bit (gdbarch, 16);
3950 set_gdbarch_int_bit (gdbarch, 32);
3951 set_gdbarch_long_bit (gdbarch, 64);
3952 set_gdbarch_long_long_bit (gdbarch, 64);
3953 set_gdbarch_float_bit (gdbarch, 32);
3954 set_gdbarch_double_bit (gdbarch, 64);
3955 set_gdbarch_long_double_bit (gdbarch, 128);
3956 set_gdbarch_ptr_bit (gdbarch, 64);
3957
3958 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3959 set_gdbarch_num_pseudo_regs (gdbarch,
3960 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3961 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3962 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3963
3964 set_gdbarch_register_name (gdbarch, ia64_register_name);
3965 set_gdbarch_register_type (gdbarch, ia64_register_type);
3966
3967 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3968 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3969 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3970 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3971 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3972 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3973 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3974
3975 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3976
3977 set_gdbarch_return_value (gdbarch, ia64_return_value);
3978
3979 set_gdbarch_memory_insert_breakpoint (gdbarch,
3980 ia64_memory_insert_breakpoint);
3981 set_gdbarch_memory_remove_breakpoint (gdbarch,
3982 ia64_memory_remove_breakpoint);
3983 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3984 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3985 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3986
3987 /* Settings for calling functions in the inferior. */
3988 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3989 tdep->infcall_ops = ia64_infcall_ops;
3990 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3991 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3992
3993 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3994 #ifdef HAVE_LIBUNWIND_IA64_H
3995 frame_unwind_append_unwinder (gdbarch,
3996 &ia64_libunwind_sigtramp_frame_unwind);
3997 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
3998 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3999 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4000 #else
4001 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4002 #endif
4003 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4004 frame_base_set_default (gdbarch, &ia64_frame_base);
4005
4006 /* Settings that should be unnecessary. */
4007 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4008
4009 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4010 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4011 ia64_convert_from_func_ptr_addr);
4012
4013 /* The virtual table contains 16-byte descriptors, not pointers to
4014 descriptors. */
4015 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4016
4017 /* Hook in ABI-specific overrides, if they have been registered. */
4018 gdbarch_init_osabi (info, gdbarch);
4019
4020 return gdbarch;
4021 }
4022
4023 extern initialize_file_ftype _initialize_ia64_tdep; /* -Wmissing-prototypes */
4024
4025 void
4026 _initialize_ia64_tdep (void)
4027 {
4028 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4029 }
This page took 0.136676 seconds and 4 git commands to generate.