Improve MSP430 section placement.
[deliverable/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "doublest.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72
73 #endif
74
75 /* An enumeration of the different IA-64 instruction types. */
76
77 typedef enum instruction_type
78 {
79 A, /* Integer ALU ; I-unit or M-unit */
80 I, /* Non-ALU integer; I-unit */
81 M, /* Memory ; M-unit */
82 F, /* Floating-point ; F-unit */
83 B, /* Branch ; B-unit */
84 L, /* Extended (L+X) ; I-unit */
85 X, /* Extended (L+X) ; I-unit */
86 undefined /* undefined or reserved */
87 } instruction_type;
88
89 /* We represent IA-64 PC addresses as the value of the instruction
90 pointer or'd with some bit combination in the low nibble which
91 represents the slot number in the bundle addressed by the
92 instruction pointer. The problem is that the Linux kernel
93 multiplies its slot numbers (for exceptions) by one while the
94 disassembler multiplies its slot numbers by 6. In addition, I've
95 heard it said that the simulator uses 1 as the multiplier.
96
97 I've fixed the disassembler so that the bytes_per_line field will
98 be the slot multiplier. If bytes_per_line comes in as zero, it
99 is set to six (which is how it was set up initially). -- objdump
100 displays pretty disassembly dumps with this value. For our purposes,
101 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
102 never want to also display the raw bytes the way objdump does. */
103
104 #define SLOT_MULTIPLIER 1
105
106 /* Length in bytes of an instruction bundle. */
107
108 #define BUNDLE_LEN 16
109
110 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
111
112 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
113 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
114 #endif
115
116 static gdbarch_init_ftype ia64_gdbarch_init;
117
118 static gdbarch_register_name_ftype ia64_register_name;
119 static gdbarch_register_type_ftype ia64_register_type;
120 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
121 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
122 static struct type *is_float_or_hfa_type (struct type *t);
123 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
124 CORE_ADDR faddr);
125
126 #define NUM_IA64_RAW_REGS 462
127
128 /* Big enough to hold a FP register in bytes. */
129 #define IA64_FP_REGISTER_SIZE 16
130
131 static int sp_regnum = IA64_GR12_REGNUM;
132
133 /* NOTE: we treat the register stack registers r32-r127 as
134 pseudo-registers because they may not be accessible via the ptrace
135 register get/set interfaces. */
136
137 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
138 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
139 V127_REGNUM = V32_REGNUM + 95,
140 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
141 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
142
143 /* Array of register names; There should be ia64_num_regs strings in
144 the initializer. */
145
146 static const char *ia64_register_names[] =
147 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
148 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
149 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
150 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
151 "", "", "", "", "", "", "", "",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163
164 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
165 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
166 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
167 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
168 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
169 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
170 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
171 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
172 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
173 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
174 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
175 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
176 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
177 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
178 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
179 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
180
181 "", "", "", "", "", "", "", "",
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189
190 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
191
192 "vfp", "vrap",
193
194 "pr", "ip", "psr", "cfm",
195
196 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
197 "", "", "", "", "", "", "", "",
198 "rsc", "bsp", "bspstore", "rnat",
199 "", "fcr", "", "",
200 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
201 "ccv", "", "", "", "unat", "", "", "",
202 "fpsr", "", "", "", "itc",
203 "", "", "", "", "", "", "", "", "", "",
204 "", "", "", "", "", "", "", "", "",
205 "pfs", "lc", "ec",
206 "", "", "", "", "", "", "", "", "", "",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "",
213 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
214 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
215 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
216 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
217 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
218 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
219 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
220 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
221 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
222 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
223 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
224 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
225 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
226 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
227 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
228 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
229
230 "bof",
231
232 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
233 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
234 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
235 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
236 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
237 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
238 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
239 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
240 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
241 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
242 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
243 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
244
245 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
246 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
247 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
248 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
249 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
250 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
251 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
252 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
253 };
254
255 struct ia64_frame_cache
256 {
257 CORE_ADDR base; /* frame pointer base for frame */
258 CORE_ADDR pc; /* function start pc for frame */
259 CORE_ADDR saved_sp; /* stack pointer for frame */
260 CORE_ADDR bsp; /* points at r32 for the current frame */
261 CORE_ADDR cfm; /* cfm value for current frame */
262 CORE_ADDR prev_cfm; /* cfm value for previous frame */
263 int frameless;
264 int sof; /* Size of frame (decoded from cfm value). */
265 int sol; /* Size of locals (decoded from cfm value). */
266 int sor; /* Number of rotating registers (decoded from
267 cfm value). */
268 CORE_ADDR after_prologue;
269 /* Address of first instruction after the last
270 prologue instruction; Note that there may
271 be instructions from the function's body
272 intermingled with the prologue. */
273 int mem_stack_frame_size;
274 /* Size of the memory stack frame (may be zero),
275 or -1 if it has not been determined yet. */
276 int fp_reg; /* Register number (if any) used a frame pointer
277 for this frame. 0 if no register is being used
278 as the frame pointer. */
279
280 /* Saved registers. */
281 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
282
283 };
284
285 static int
286 floatformat_valid (const struct floatformat *fmt, const void *from)
287 {
288 return 1;
289 }
290
291 static const struct floatformat floatformat_ia64_ext_little =
292 {
293 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
294 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
295 };
296
297 static const struct floatformat floatformat_ia64_ext_big =
298 {
299 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
300 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
301 };
302
303 static const struct floatformat *floatformats_ia64_ext[2] =
304 {
305 &floatformat_ia64_ext_big,
306 &floatformat_ia64_ext_little
307 };
308
309 static struct type *
310 ia64_ext_type (struct gdbarch *gdbarch)
311 {
312 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
313
314 if (!tdep->ia64_ext_type)
315 tdep->ia64_ext_type
316 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
317 floatformats_ia64_ext);
318
319 return tdep->ia64_ext_type;
320 }
321
322 static int
323 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
324 struct reggroup *group)
325 {
326 int vector_p;
327 int float_p;
328 int raw_p;
329 if (group == all_reggroup)
330 return 1;
331 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
332 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
333 raw_p = regnum < NUM_IA64_RAW_REGS;
334 if (group == float_reggroup)
335 return float_p;
336 if (group == vector_reggroup)
337 return vector_p;
338 if (group == general_reggroup)
339 return (!vector_p && !float_p);
340 if (group == save_reggroup || group == restore_reggroup)
341 return raw_p;
342 return 0;
343 }
344
345 static const char *
346 ia64_register_name (struct gdbarch *gdbarch, int reg)
347 {
348 return ia64_register_names[reg];
349 }
350
351 struct type *
352 ia64_register_type (struct gdbarch *arch, int reg)
353 {
354 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
355 return ia64_ext_type (arch);
356 else
357 return builtin_type (arch)->builtin_long;
358 }
359
360 static int
361 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
362 {
363 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
364 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
365 return reg;
366 }
367
368
369 /* Extract ``len'' bits from an instruction bundle starting at
370 bit ``from''. */
371
372 static long long
373 extract_bit_field (const gdb_byte *bundle, int from, int len)
374 {
375 long long result = 0LL;
376 int to = from + len;
377 int from_byte = from / 8;
378 int to_byte = to / 8;
379 unsigned char *b = (unsigned char *) bundle;
380 unsigned char c;
381 int lshift;
382 int i;
383
384 c = b[from_byte];
385 if (from_byte == to_byte)
386 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
387 result = c >> (from % 8);
388 lshift = 8 - (from % 8);
389
390 for (i = from_byte+1; i < to_byte; i++)
391 {
392 result |= ((long long) b[i]) << lshift;
393 lshift += 8;
394 }
395
396 if (from_byte < to_byte && (to % 8 != 0))
397 {
398 c = b[to_byte];
399 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
400 result |= ((long long) c) << lshift;
401 }
402
403 return result;
404 }
405
406 /* Replace the specified bits in an instruction bundle. */
407
408 static void
409 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
410 {
411 int to = from + len;
412 int from_byte = from / 8;
413 int to_byte = to / 8;
414 unsigned char *b = (unsigned char *) bundle;
415 unsigned char c;
416
417 if (from_byte == to_byte)
418 {
419 unsigned char left, right;
420 c = b[from_byte];
421 left = (c >> (to % 8)) << (to % 8);
422 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
423 c = (unsigned char) (val & 0xff);
424 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
425 c |= right | left;
426 b[from_byte] = c;
427 }
428 else
429 {
430 int i;
431 c = b[from_byte];
432 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
433 c = c | (val << (from % 8));
434 b[from_byte] = c;
435 val >>= 8 - from % 8;
436
437 for (i = from_byte+1; i < to_byte; i++)
438 {
439 c = val & 0xff;
440 val >>= 8;
441 b[i] = c;
442 }
443
444 if (to % 8 != 0)
445 {
446 unsigned char cv = (unsigned char) val;
447 c = b[to_byte];
448 c = c >> (to % 8) << (to % 8);
449 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
450 b[to_byte] = c;
451 }
452 }
453 }
454
455 /* Return the contents of slot N (for N = 0, 1, or 2) in
456 and instruction bundle. */
457
458 static long long
459 slotN_contents (gdb_byte *bundle, int slotnum)
460 {
461 return extract_bit_field (bundle, 5+41*slotnum, 41);
462 }
463
464 /* Store an instruction in an instruction bundle. */
465
466 static void
467 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
468 {
469 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
470 }
471
472 static const enum instruction_type template_encoding_table[32][3] =
473 {
474 { M, I, I }, /* 00 */
475 { M, I, I }, /* 01 */
476 { M, I, I }, /* 02 */
477 { M, I, I }, /* 03 */
478 { M, L, X }, /* 04 */
479 { M, L, X }, /* 05 */
480 { undefined, undefined, undefined }, /* 06 */
481 { undefined, undefined, undefined }, /* 07 */
482 { M, M, I }, /* 08 */
483 { M, M, I }, /* 09 */
484 { M, M, I }, /* 0A */
485 { M, M, I }, /* 0B */
486 { M, F, I }, /* 0C */
487 { M, F, I }, /* 0D */
488 { M, M, F }, /* 0E */
489 { M, M, F }, /* 0F */
490 { M, I, B }, /* 10 */
491 { M, I, B }, /* 11 */
492 { M, B, B }, /* 12 */
493 { M, B, B }, /* 13 */
494 { undefined, undefined, undefined }, /* 14 */
495 { undefined, undefined, undefined }, /* 15 */
496 { B, B, B }, /* 16 */
497 { B, B, B }, /* 17 */
498 { M, M, B }, /* 18 */
499 { M, M, B }, /* 19 */
500 { undefined, undefined, undefined }, /* 1A */
501 { undefined, undefined, undefined }, /* 1B */
502 { M, F, B }, /* 1C */
503 { M, F, B }, /* 1D */
504 { undefined, undefined, undefined }, /* 1E */
505 { undefined, undefined, undefined }, /* 1F */
506 };
507
508 /* Fetch and (partially) decode an instruction at ADDR and return the
509 address of the next instruction to fetch. */
510
511 static CORE_ADDR
512 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
513 {
514 gdb_byte bundle[BUNDLE_LEN];
515 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
516 long long templ;
517 int val;
518
519 /* Warn about slot numbers greater than 2. We used to generate
520 an error here on the assumption that the user entered an invalid
521 address. But, sometimes GDB itself requests an invalid address.
522 This can (easily) happen when execution stops in a function for
523 which there are no symbols. The prologue scanner will attempt to
524 find the beginning of the function - if the nearest symbol
525 happens to not be aligned on a bundle boundary (16 bytes), the
526 resulting starting address will cause GDB to think that the slot
527 number is too large.
528
529 So we warn about it and set the slot number to zero. It is
530 not necessarily a fatal condition, particularly if debugging
531 at the assembly language level. */
532 if (slotnum > 2)
533 {
534 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
535 "Using slot 0 instead"));
536 slotnum = 0;
537 }
538
539 addr &= ~0x0f;
540
541 val = target_read_memory (addr, bundle, BUNDLE_LEN);
542
543 if (val != 0)
544 return 0;
545
546 *instr = slotN_contents (bundle, slotnum);
547 templ = extract_bit_field (bundle, 0, 5);
548 *it = template_encoding_table[(int)templ][slotnum];
549
550 if (slotnum == 2 || (slotnum == 1 && *it == L))
551 addr += 16;
552 else
553 addr += (slotnum + 1) * SLOT_MULTIPLIER;
554
555 return addr;
556 }
557
558 /* There are 5 different break instructions (break.i, break.b,
559 break.m, break.f, and break.x), but they all have the same
560 encoding. (The five bit template in the low five bits of the
561 instruction bundle distinguishes one from another.)
562
563 The runtime architecture manual specifies that break instructions
564 used for debugging purposes must have the upper two bits of the 21
565 bit immediate set to a 0 and a 1 respectively. A breakpoint
566 instruction encodes the most significant bit of its 21 bit
567 immediate at bit 36 of the 41 bit instruction. The penultimate msb
568 is at bit 25 which leads to the pattern below.
569
570 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
571 it turns out that 0x80000 was used as the syscall break in the early
572 simulators. So I changed the pattern slightly to do "break.i 0x080001"
573 instead. But that didn't work either (I later found out that this
574 pattern was used by the simulator that I was using.) So I ended up
575 using the pattern seen below.
576
577 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
578 while we need bit-based addressing as the instructions length is 41 bits and
579 we must not modify/corrupt the adjacent slots in the same bundle.
580 Fortunately we may store larger memory incl. the adjacent bits with the
581 original memory content (not the possibly already stored breakpoints there).
582 We need to be careful in ia64_memory_remove_breakpoint to always restore
583 only the specific bits of this instruction ignoring any adjacent stored
584 bits.
585
586 We use the original addressing with the low nibble in the range <0..2> which
587 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
588 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
589 bytes just without these two possibly skipped bytes to not to exceed to the
590 next bundle.
591
592 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
593 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
594 In such case there is no other place where to store
595 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
596 SLOTNUM in ia64_memory_remove_breakpoint.
597
598 There is one special case where we need to be extra careful:
599 L-X instructions, which are instructions that occupy 2 slots
600 (The L part is always in slot 1, and the X part is always in
601 slot 2). We must refuse to insert breakpoints for an address
602 that points at slot 2 of a bundle where an L-X instruction is
603 present, since there is logically no instruction at that address.
604 However, to make things more interesting, the opcode of L-X
605 instructions is located in slot 2. This means that, to insert
606 a breakpoint at an address that points to slot 1, we actually
607 need to write the breakpoint in slot 2! Slot 1 is actually
608 the extended operand, so writing the breakpoint there would not
609 have the desired effect. Another side-effect of this issue
610 is that we need to make sure that the shadow contents buffer
611 does save byte 15 of our instruction bundle (this is the tail
612 end of slot 2, which wouldn't be saved if we were to insert
613 the breakpoint in slot 1).
614
615 ia64 16-byte bundle layout:
616 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
617
618 The current addressing used by the code below:
619 original PC placed_address placed_size required covered
620 == bp_tgt->shadow_len reqd \subset covered
621 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
622 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
623 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
624
625 L-X instructions are treated a little specially, as explained above:
626 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
627
628 `objdump -d' and some other tools show a bit unjustified offsets:
629 original PC byte where starts the instruction objdump offset
630 0xABCDE0 0xABCDE0 0xABCDE0
631 0xABCDE1 0xABCDE5 0xABCDE6
632 0xABCDE2 0xABCDEA 0xABCDEC
633 */
634
635 #define IA64_BREAKPOINT 0x00003333300LL
636
637 static int
638 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
639 struct bp_target_info *bp_tgt)
640 {
641 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
642 gdb_byte bundle[BUNDLE_LEN];
643 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
644 long long instr_breakpoint;
645 int val;
646 int templ;
647 struct cleanup *cleanup;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 cleanup = make_show_memory_breakpoints_cleanup (0);
660 val = target_read_memory (addr, bundle, BUNDLE_LEN);
661 if (val != 0)
662 {
663 do_cleanups (cleanup);
664 return val;
665 }
666
667 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
668 for addressing the SHADOW_CONTENTS placement. */
669 shadow_slotnum = slotnum;
670
671 /* Always cover the last byte of the bundle in case we are inserting
672 a breakpoint on an L-X instruction. */
673 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
674
675 templ = extract_bit_field (bundle, 0, 5);
676 if (template_encoding_table[templ][slotnum] == X)
677 {
678 /* X unit types can only be used in slot 2, and are actually
679 part of a 2-slot L-X instruction. We cannot break at this
680 address, as this is the second half of an instruction that
681 lives in slot 1 of that bundle. */
682 gdb_assert (slotnum == 2);
683 error (_("Can't insert breakpoint for non-existing slot X"));
684 }
685 if (template_encoding_table[templ][slotnum] == L)
686 {
687 /* L unit types can only be used in slot 1. But the associated
688 opcode for that instruction is in slot 2, so bump the slot number
689 accordingly. */
690 gdb_assert (slotnum == 1);
691 slotnum = 2;
692 }
693
694 /* Store the whole bundle, except for the initial skipped bytes by the slot
695 number interpreted as bytes offset in PLACED_ADDRESS. */
696 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
697 bp_tgt->shadow_len);
698
699 /* Re-read the same bundle as above except that, this time, read it in order
700 to compute the new bundle inside which we will be inserting the
701 breakpoint. Therefore, disable the automatic memory restoration from
702 breakpoints while we read our instruction bundle. Otherwise, the general
703 restoration mechanism kicks in and we would possibly remove parts of the
704 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
705 the real breakpoint instruction bits region. */
706 make_show_memory_breakpoints_cleanup (1);
707 val = target_read_memory (addr, bundle, BUNDLE_LEN);
708 if (val != 0)
709 {
710 do_cleanups (cleanup);
711 return val;
712 }
713
714 /* Breakpoints already present in the code will get deteacted and not get
715 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
716 location cannot induce the internal error as they are optimized into
717 a single instance by update_global_location_list. */
718 instr_breakpoint = slotN_contents (bundle, slotnum);
719 if (instr_breakpoint == IA64_BREAKPOINT)
720 internal_error (__FILE__, __LINE__,
721 _("Address %s already contains a breakpoint."),
722 paddress (gdbarch, bp_tgt->placed_address));
723 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
724
725 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
726 bp_tgt->shadow_len);
727
728 do_cleanups (cleanup);
729 return val;
730 }
731
732 static int
733 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
734 struct bp_target_info *bp_tgt)
735 {
736 CORE_ADDR addr = bp_tgt->placed_address;
737 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
738 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
739 long long instr_breakpoint, instr_saved;
740 int val;
741 int templ;
742 struct cleanup *cleanup;
743
744 addr &= ~0x0f;
745
746 /* Disable the automatic memory restoration from breakpoints while
747 we read our instruction bundle. Otherwise, the general restoration
748 mechanism kicks in and we would possibly remove parts of the adjacent
749 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
750 breakpoint instruction bits region. */
751 cleanup = make_show_memory_breakpoints_cleanup (1);
752 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
753 if (val != 0)
754 {
755 do_cleanups (cleanup);
756 return val;
757 }
758
759 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
760 for addressing the SHADOW_CONTENTS placement. */
761 shadow_slotnum = slotnum;
762
763 templ = extract_bit_field (bundle_mem, 0, 5);
764 if (template_encoding_table[templ][slotnum] == X)
765 {
766 /* X unit types can only be used in slot 2, and are actually
767 part of a 2-slot L-X instruction. We refuse to insert
768 breakpoints at this address, so there should be no reason
769 for us attempting to remove one there, except if the program's
770 code somehow got modified in memory. */
771 gdb_assert (slotnum == 2);
772 warning (_("Cannot remove breakpoint at address %s from non-existing "
773 "X-type slot, memory has changed underneath"),
774 paddress (gdbarch, bp_tgt->placed_address));
775 do_cleanups (cleanup);
776 return -1;
777 }
778 if (template_encoding_table[templ][slotnum] == L)
779 {
780 /* L unit types can only be used in slot 1. But the breakpoint
781 was actually saved using slot 2, so update the slot number
782 accordingly. */
783 gdb_assert (slotnum == 1);
784 slotnum = 2;
785 }
786
787 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
788
789 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
790 if (instr_breakpoint != IA64_BREAKPOINT)
791 {
792 warning (_("Cannot remove breakpoint at address %s, "
793 "no break instruction at such address."),
794 paddress (gdbarch, bp_tgt->placed_address));
795 do_cleanups (cleanup);
796 return -1;
797 }
798
799 /* Extract the original saved instruction from SLOTNUM normalizing its
800 bit-shift for INSTR_SAVED. */
801 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
802 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
803 bp_tgt->shadow_len);
804 instr_saved = slotN_contents (bundle_saved, slotnum);
805
806 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
807 and not any of the other ones that are stored in SHADOW_CONTENTS. */
808 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
809 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
810
811 do_cleanups (cleanup);
812 return val;
813 }
814
815 /* Implement the breakpoint_kind_from_pc gdbarch method. */
816
817 static int
818 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
819 {
820 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
821 return 0;
822 }
823
824 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
825 instruction slots ranges are bit-granular (41 bits) we have to provide an
826 extended range as described for ia64_memory_insert_breakpoint. We also take
827 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
828 make a match for permanent breakpoints. */
829
830 static const gdb_byte *
831 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
832 CORE_ADDR *pcptr, int *lenptr)
833 {
834 CORE_ADDR addr = *pcptr;
835 static gdb_byte bundle[BUNDLE_LEN];
836 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
837 long long instr_fetched;
838 int val;
839 int templ;
840 struct cleanup *cleanup;
841
842 if (slotnum > 2)
843 error (_("Can't insert breakpoint for slot numbers greater than 2."));
844
845 addr &= ~0x0f;
846
847 /* Enable the automatic memory restoration from breakpoints while
848 we read our instruction bundle to match bp_loc_is_permanent. */
849 cleanup = make_show_memory_breakpoints_cleanup (0);
850 val = target_read_memory (addr, bundle, BUNDLE_LEN);
851 do_cleanups (cleanup);
852
853 /* The memory might be unreachable. This can happen, for instance,
854 when the user inserts a breakpoint at an invalid address. */
855 if (val != 0)
856 return NULL;
857
858 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
859 for addressing the SHADOW_CONTENTS placement. */
860 shadow_slotnum = slotnum;
861
862 /* Cover always the last byte of the bundle for the L-X slot case. */
863 *lenptr = BUNDLE_LEN - shadow_slotnum;
864
865 /* Check for L type instruction in slot 1, if present then bump up the slot
866 number to the slot 2. */
867 templ = extract_bit_field (bundle, 0, 5);
868 if (template_encoding_table[templ][slotnum] == X)
869 {
870 gdb_assert (slotnum == 2);
871 error (_("Can't insert breakpoint for non-existing slot X"));
872 }
873 if (template_encoding_table[templ][slotnum] == L)
874 {
875 gdb_assert (slotnum == 1);
876 slotnum = 2;
877 }
878
879 /* A break instruction has its all its opcode bits cleared except for
880 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
881 we should not touch the L slot - the upper 41 bits of the parameter. */
882 instr_fetched = slotN_contents (bundle, slotnum);
883 instr_fetched &= 0x1003ffffc0LL;
884 replace_slotN_contents (bundle, instr_fetched, slotnum);
885
886 return bundle + shadow_slotnum;
887 }
888
889 static CORE_ADDR
890 ia64_read_pc (struct regcache *regcache)
891 {
892 ULONGEST psr_value, pc_value;
893 int slot_num;
894
895 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
896 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
897 slot_num = (psr_value >> 41) & 3;
898
899 return pc_value | (slot_num * SLOT_MULTIPLIER);
900 }
901
902 void
903 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
904 {
905 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
906 ULONGEST psr_value;
907
908 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
909 psr_value &= ~(3LL << 41);
910 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
911
912 new_pc &= ~0xfLL;
913
914 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
915 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
916 }
917
918 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
919
920 /* Returns the address of the slot that's NSLOTS slots away from
921 the address ADDR. NSLOTS may be positive or negative. */
922 static CORE_ADDR
923 rse_address_add(CORE_ADDR addr, int nslots)
924 {
925 CORE_ADDR new_addr;
926 int mandatory_nat_slots = nslots / 63;
927 int direction = nslots < 0 ? -1 : 1;
928
929 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
930
931 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
932 new_addr += 8 * direction;
933
934 if (IS_NaT_COLLECTION_ADDR(new_addr))
935 new_addr += 8 * direction;
936
937 return new_addr;
938 }
939
940 static enum register_status
941 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
942 int regnum, gdb_byte *buf)
943 {
944 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
945 enum register_status status;
946
947 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
948 {
949 #ifdef HAVE_LIBUNWIND_IA64_H
950 /* First try and use the libunwind special reg accessor,
951 otherwise fallback to standard logic. */
952 if (!libunwind_is_initialized ()
953 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
954 #endif
955 {
956 /* The fallback position is to assume that r32-r127 are
957 found sequentially in memory starting at $bof. This
958 isn't always true, but without libunwind, this is the
959 best we can do. */
960 enum register_status status;
961 ULONGEST cfm;
962 ULONGEST bsp;
963 CORE_ADDR reg;
964
965 status = regcache_cooked_read_unsigned (regcache,
966 IA64_BSP_REGNUM, &bsp);
967 if (status != REG_VALID)
968 return status;
969
970 status = regcache_cooked_read_unsigned (regcache,
971 IA64_CFM_REGNUM, &cfm);
972 if (status != REG_VALID)
973 return status;
974
975 /* The bsp points at the end of the register frame so we
976 subtract the size of frame from it to get start of
977 register frame. */
978 bsp = rse_address_add (bsp, -(cfm & 0x7f));
979
980 if ((cfm & 0x7f) > regnum - V32_REGNUM)
981 {
982 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
983 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
984 store_unsigned_integer (buf, register_size (gdbarch, regnum),
985 byte_order, reg);
986 }
987 else
988 store_unsigned_integer (buf, register_size (gdbarch, regnum),
989 byte_order, 0);
990 }
991 }
992 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
993 {
994 ULONGEST unatN_val;
995 ULONGEST unat;
996 status = regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
997 if (status != REG_VALID)
998 return status;
999 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
1000 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1001 byte_order, unatN_val);
1002 }
1003 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1004 {
1005 ULONGEST natN_val = 0;
1006 ULONGEST bsp;
1007 ULONGEST cfm;
1008 CORE_ADDR gr_addr = 0;
1009 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1010 if (status != REG_VALID)
1011 return status;
1012 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1013 if (status != REG_VALID)
1014 return status;
1015
1016 /* The bsp points at the end of the register frame so we
1017 subtract the size of frame from it to get start of register frame. */
1018 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1019
1020 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1021 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1022
1023 if (gr_addr != 0)
1024 {
1025 /* Compute address of nat collection bits. */
1026 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1027 CORE_ADDR nat_collection;
1028 int nat_bit;
1029 /* If our nat collection address is bigger than bsp, we have to get
1030 the nat collection from rnat. Otherwise, we fetch the nat
1031 collection from the computed address. */
1032 if (nat_addr >= bsp)
1033 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1034 &nat_collection);
1035 else
1036 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1037 nat_bit = (gr_addr >> 3) & 0x3f;
1038 natN_val = (nat_collection >> nat_bit) & 1;
1039 }
1040
1041 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1042 byte_order, natN_val);
1043 }
1044 else if (regnum == VBOF_REGNUM)
1045 {
1046 /* A virtual register frame start is provided for user convenience.
1047 It can be calculated as the bsp - sof (sizeof frame). */
1048 ULONGEST bsp, vbsp;
1049 ULONGEST cfm;
1050 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1051 if (status != REG_VALID)
1052 return status;
1053 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1054 if (status != REG_VALID)
1055 return status;
1056
1057 /* The bsp points at the end of the register frame so we
1058 subtract the size of frame from it to get beginning of frame. */
1059 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1060 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1061 byte_order, vbsp);
1062 }
1063 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1064 {
1065 ULONGEST pr;
1066 ULONGEST cfm;
1067 ULONGEST prN_val;
1068 status = regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1069 if (status != REG_VALID)
1070 return status;
1071 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1072 if (status != REG_VALID)
1073 return status;
1074
1075 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1076 {
1077 /* Fetch predicate register rename base from current frame
1078 marker for this frame. */
1079 int rrb_pr = (cfm >> 32) & 0x3f;
1080
1081 /* Adjust the register number to account for register rotation. */
1082 regnum = VP16_REGNUM
1083 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1084 }
1085 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1086 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1087 byte_order, prN_val);
1088 }
1089 else
1090 memset (buf, 0, register_size (gdbarch, regnum));
1091
1092 return REG_VALID;
1093 }
1094
1095 static void
1096 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1097 int regnum, const gdb_byte *buf)
1098 {
1099 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1100
1101 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1102 {
1103 ULONGEST bsp;
1104 ULONGEST cfm;
1105 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1106 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1107
1108 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1109
1110 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1111 {
1112 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1113 write_memory (reg_addr, buf, 8);
1114 }
1115 }
1116 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1117 {
1118 ULONGEST unatN_val, unat, unatN_mask;
1119 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1120 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1121 regnum),
1122 byte_order);
1123 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1124 if (unatN_val == 0)
1125 unat &= ~unatN_mask;
1126 else if (unatN_val == 1)
1127 unat |= unatN_mask;
1128 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1129 }
1130 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1131 {
1132 ULONGEST natN_val;
1133 ULONGEST bsp;
1134 ULONGEST cfm;
1135 CORE_ADDR gr_addr = 0;
1136 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1137 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1138
1139 /* The bsp points at the end of the register frame so we
1140 subtract the size of frame from it to get start of register frame. */
1141 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1142
1143 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1144 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1145
1146 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1147 regnum),
1148 byte_order);
1149
1150 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1151 {
1152 /* Compute address of nat collection bits. */
1153 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1154 CORE_ADDR nat_collection;
1155 int natN_bit = (gr_addr >> 3) & 0x3f;
1156 ULONGEST natN_mask = (1LL << natN_bit);
1157 /* If our nat collection address is bigger than bsp, we have to get
1158 the nat collection from rnat. Otherwise, we fetch the nat
1159 collection from the computed address. */
1160 if (nat_addr >= bsp)
1161 {
1162 regcache_cooked_read_unsigned (regcache,
1163 IA64_RNAT_REGNUM,
1164 &nat_collection);
1165 if (natN_val)
1166 nat_collection |= natN_mask;
1167 else
1168 nat_collection &= ~natN_mask;
1169 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1170 nat_collection);
1171 }
1172 else
1173 {
1174 gdb_byte nat_buf[8];
1175 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1176 if (natN_val)
1177 nat_collection |= natN_mask;
1178 else
1179 nat_collection &= ~natN_mask;
1180 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1181 byte_order, nat_collection);
1182 write_memory (nat_addr, nat_buf, 8);
1183 }
1184 }
1185 }
1186 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1187 {
1188 ULONGEST pr;
1189 ULONGEST cfm;
1190 ULONGEST prN_val;
1191 ULONGEST prN_mask;
1192
1193 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1194 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1195
1196 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1197 {
1198 /* Fetch predicate register rename base from current frame
1199 marker for this frame. */
1200 int rrb_pr = (cfm >> 32) & 0x3f;
1201
1202 /* Adjust the register number to account for register rotation. */
1203 regnum = VP16_REGNUM
1204 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1205 }
1206 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1207 byte_order);
1208 prN_mask = (1LL << (regnum - VP0_REGNUM));
1209 if (prN_val == 0)
1210 pr &= ~prN_mask;
1211 else if (prN_val == 1)
1212 pr |= prN_mask;
1213 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1214 }
1215 }
1216
1217 /* The ia64 needs to convert between various ieee floating-point formats
1218 and the special ia64 floating point register format. */
1219
1220 static int
1221 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1222 {
1223 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1224 && TYPE_CODE (type) == TYPE_CODE_FLT
1225 && type != ia64_ext_type (gdbarch));
1226 }
1227
1228 static int
1229 ia64_register_to_value (struct frame_info *frame, int regnum,
1230 struct type *valtype, gdb_byte *out,
1231 int *optimizedp, int *unavailablep)
1232 {
1233 struct gdbarch *gdbarch = get_frame_arch (frame);
1234 gdb_byte in[IA64_FP_REGISTER_SIZE];
1235
1236 /* Convert to TYPE. */
1237 if (!get_frame_register_bytes (frame, regnum, 0,
1238 register_size (gdbarch, regnum),
1239 in, optimizedp, unavailablep))
1240 return 0;
1241
1242 convert_typed_floating (in, ia64_ext_type (gdbarch), out, valtype);
1243 *optimizedp = *unavailablep = 0;
1244 return 1;
1245 }
1246
1247 static void
1248 ia64_value_to_register (struct frame_info *frame, int regnum,
1249 struct type *valtype, const gdb_byte *in)
1250 {
1251 struct gdbarch *gdbarch = get_frame_arch (frame);
1252 gdb_byte out[IA64_FP_REGISTER_SIZE];
1253 convert_typed_floating (in, valtype, out, ia64_ext_type (gdbarch));
1254 put_frame_register (frame, regnum, out);
1255 }
1256
1257
1258 /* Limit the number of skipped non-prologue instructions since examining
1259 of the prologue is expensive. */
1260 static int max_skip_non_prologue_insns = 40;
1261
1262 /* Given PC representing the starting address of a function, and
1263 LIM_PC which is the (sloppy) limit to which to scan when looking
1264 for a prologue, attempt to further refine this limit by using
1265 the line data in the symbol table. If successful, a better guess
1266 on where the prologue ends is returned, otherwise the previous
1267 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1268 which will be set to indicate whether the returned limit may be
1269 used with no further scanning in the event that the function is
1270 frameless. */
1271
1272 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1273 superseded by skip_prologue_using_sal. */
1274
1275 static CORE_ADDR
1276 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1277 {
1278 struct symtab_and_line prologue_sal;
1279 CORE_ADDR start_pc = pc;
1280 CORE_ADDR end_pc;
1281
1282 /* The prologue can not possibly go past the function end itself,
1283 so we can already adjust LIM_PC accordingly. */
1284 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1285 lim_pc = end_pc;
1286
1287 /* Start off not trusting the limit. */
1288 *trust_limit = 0;
1289
1290 prologue_sal = find_pc_line (pc, 0);
1291 if (prologue_sal.line != 0)
1292 {
1293 int i;
1294 CORE_ADDR addr = prologue_sal.end;
1295
1296 /* Handle the case in which compiler's optimizer/scheduler
1297 has moved instructions into the prologue. We scan ahead
1298 in the function looking for address ranges whose corresponding
1299 line number is less than or equal to the first one that we
1300 found for the function. (It can be less than when the
1301 scheduler puts a body instruction before the first prologue
1302 instruction.) */
1303 for (i = 2 * max_skip_non_prologue_insns;
1304 i > 0 && (lim_pc == 0 || addr < lim_pc);
1305 i--)
1306 {
1307 struct symtab_and_line sal;
1308
1309 sal = find_pc_line (addr, 0);
1310 if (sal.line == 0)
1311 break;
1312 if (sal.line <= prologue_sal.line
1313 && sal.symtab == prologue_sal.symtab)
1314 {
1315 prologue_sal = sal;
1316 }
1317 addr = sal.end;
1318 }
1319
1320 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1321 {
1322 lim_pc = prologue_sal.end;
1323 if (start_pc == get_pc_function_start (lim_pc))
1324 *trust_limit = 1;
1325 }
1326 }
1327 return lim_pc;
1328 }
1329
1330 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1331 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1332 || (14 <= (_regnum_) && (_regnum_) <= 31))
1333 #define imm9(_instr_) \
1334 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1335 | (((_instr_) & 0x00008000000LL) >> 20) \
1336 | (((_instr_) & 0x00000001fc0LL) >> 6))
1337
1338 /* Allocate and initialize a frame cache. */
1339
1340 static struct ia64_frame_cache *
1341 ia64_alloc_frame_cache (void)
1342 {
1343 struct ia64_frame_cache *cache;
1344 int i;
1345
1346 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1347
1348 /* Base address. */
1349 cache->base = 0;
1350 cache->pc = 0;
1351 cache->cfm = 0;
1352 cache->prev_cfm = 0;
1353 cache->sof = 0;
1354 cache->sol = 0;
1355 cache->sor = 0;
1356 cache->bsp = 0;
1357 cache->fp_reg = 0;
1358 cache->frameless = 1;
1359
1360 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1361 cache->saved_regs[i] = 0;
1362
1363 return cache;
1364 }
1365
1366 static CORE_ADDR
1367 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1368 struct frame_info *this_frame,
1369 struct ia64_frame_cache *cache)
1370 {
1371 CORE_ADDR next_pc;
1372 CORE_ADDR last_prologue_pc = pc;
1373 instruction_type it;
1374 long long instr;
1375 int cfm_reg = 0;
1376 int ret_reg = 0;
1377 int fp_reg = 0;
1378 int unat_save_reg = 0;
1379 int pr_save_reg = 0;
1380 int mem_stack_frame_size = 0;
1381 int spill_reg = 0;
1382 CORE_ADDR spill_addr = 0;
1383 char instores[8];
1384 char infpstores[8];
1385 char reg_contents[256];
1386 int trust_limit;
1387 int frameless = 1;
1388 int i;
1389 CORE_ADDR addr;
1390 gdb_byte buf[8];
1391 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1392
1393 memset (instores, 0, sizeof instores);
1394 memset (infpstores, 0, sizeof infpstores);
1395 memset (reg_contents, 0, sizeof reg_contents);
1396
1397 if (cache->after_prologue != 0
1398 && cache->after_prologue <= lim_pc)
1399 return cache->after_prologue;
1400
1401 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1402 next_pc = fetch_instruction (pc, &it, &instr);
1403
1404 /* We want to check if we have a recognizable function start before we
1405 look ahead for a prologue. */
1406 if (pc < lim_pc && next_pc
1407 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1408 {
1409 /* alloc - start of a regular function. */
1410 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1411 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1412 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1413
1414 /* Verify that the current cfm matches what we think is the
1415 function start. If we have somehow jumped within a function,
1416 we do not want to interpret the prologue and calculate the
1417 addresses of various registers such as the return address.
1418 We will instead treat the frame as frameless. */
1419 if (!this_frame ||
1420 (sof == (cache->cfm & 0x7f) &&
1421 sol == ((cache->cfm >> 7) & 0x7f)))
1422 frameless = 0;
1423
1424 cfm_reg = rN;
1425 last_prologue_pc = next_pc;
1426 pc = next_pc;
1427 }
1428 else
1429 {
1430 /* Look for a leaf routine. */
1431 if (pc < lim_pc && next_pc
1432 && (it == I || it == M)
1433 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1434 {
1435 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1436 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1437 | ((instr & 0x001f8000000LL) >> 20)
1438 | ((instr & 0x000000fe000LL) >> 13));
1439 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1440 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1441 int qp = (int) (instr & 0x0000000003fLL);
1442 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1443 {
1444 /* mov r2, r12 - beginning of leaf routine. */
1445 fp_reg = rN;
1446 last_prologue_pc = next_pc;
1447 }
1448 }
1449
1450 /* If we don't recognize a regular function or leaf routine, we are
1451 done. */
1452 if (!fp_reg)
1453 {
1454 pc = lim_pc;
1455 if (trust_limit)
1456 last_prologue_pc = lim_pc;
1457 }
1458 }
1459
1460 /* Loop, looking for prologue instructions, keeping track of
1461 where preserved registers were spilled. */
1462 while (pc < lim_pc)
1463 {
1464 next_pc = fetch_instruction (pc, &it, &instr);
1465 if (next_pc == 0)
1466 break;
1467
1468 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1469 {
1470 /* Exit loop upon hitting a non-nop branch instruction. */
1471 if (trust_limit)
1472 lim_pc = pc;
1473 break;
1474 }
1475 else if (((instr & 0x3fLL) != 0LL) &&
1476 (frameless || ret_reg != 0))
1477 {
1478 /* Exit loop upon hitting a predicated instruction if
1479 we already have the return register or if we are frameless. */
1480 if (trust_limit)
1481 lim_pc = pc;
1482 break;
1483 }
1484 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1485 {
1486 /* Move from BR */
1487 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1488 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1489 int qp = (int) (instr & 0x0000000003f);
1490
1491 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1492 {
1493 ret_reg = rN;
1494 last_prologue_pc = next_pc;
1495 }
1496 }
1497 else if ((it == I || it == M)
1498 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1499 {
1500 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1501 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1502 | ((instr & 0x001f8000000LL) >> 20)
1503 | ((instr & 0x000000fe000LL) >> 13));
1504 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1505 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1506 int qp = (int) (instr & 0x0000000003fLL);
1507
1508 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1509 {
1510 /* mov rN, r12 */
1511 fp_reg = rN;
1512 last_prologue_pc = next_pc;
1513 }
1514 else if (qp == 0 && rN == 12 && rM == 12)
1515 {
1516 /* adds r12, -mem_stack_frame_size, r12 */
1517 mem_stack_frame_size -= imm;
1518 last_prologue_pc = next_pc;
1519 }
1520 else if (qp == 0 && rN == 2
1521 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1522 {
1523 CORE_ADDR saved_sp = 0;
1524 /* adds r2, spilloffset, rFramePointer
1525 or
1526 adds r2, spilloffset, r12
1527
1528 Get ready for stf.spill or st8.spill instructions.
1529 The address to start spilling at is loaded into r2.
1530 FIXME: Why r2? That's what gcc currently uses; it
1531 could well be different for other compilers. */
1532
1533 /* Hmm... whether or not this will work will depend on
1534 where the pc is. If it's still early in the prologue
1535 this'll be wrong. FIXME */
1536 if (this_frame)
1537 {
1538 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1539 saved_sp = get_frame_register_unsigned (this_frame,
1540 sp_regnum);
1541 }
1542 spill_addr = saved_sp
1543 + (rM == 12 ? 0 : mem_stack_frame_size)
1544 + imm;
1545 spill_reg = rN;
1546 last_prologue_pc = next_pc;
1547 }
1548 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1549 rN < 256 && imm == 0)
1550 {
1551 /* mov rN, rM where rM is an input register. */
1552 reg_contents[rN] = rM;
1553 last_prologue_pc = next_pc;
1554 }
1555 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1556 rM == 2)
1557 {
1558 /* mov r12, r2 */
1559 last_prologue_pc = next_pc;
1560 break;
1561 }
1562 }
1563 else if (it == M
1564 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1565 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1566 {
1567 /* stf.spill [rN] = fM, imm9
1568 or
1569 stf.spill [rN] = fM */
1570
1571 int imm = imm9(instr);
1572 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1573 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1574 int qp = (int) (instr & 0x0000000003fLL);
1575 if (qp == 0 && rN == spill_reg && spill_addr != 0
1576 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1577 {
1578 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1579
1580 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1581 spill_addr += imm;
1582 else
1583 spill_addr = 0; /* last one; must be done. */
1584 last_prologue_pc = next_pc;
1585 }
1586 }
1587 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1588 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1589 {
1590 /* mov.m rN = arM
1591 or
1592 mov.i rN = arM */
1593
1594 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1595 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1596 int qp = (int) (instr & 0x0000000003fLL);
1597 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1598 {
1599 /* We have something like "mov.m r3 = ar.unat". Remember the
1600 r3 (or whatever) and watch for a store of this register... */
1601 unat_save_reg = rN;
1602 last_prologue_pc = next_pc;
1603 }
1604 }
1605 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1606 {
1607 /* mov rN = pr */
1608 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1609 int qp = (int) (instr & 0x0000000003fLL);
1610 if (qp == 0 && isScratch (rN))
1611 {
1612 pr_save_reg = rN;
1613 last_prologue_pc = next_pc;
1614 }
1615 }
1616 else if (it == M
1617 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1618 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1619 {
1620 /* st8 [rN] = rM
1621 or
1622 st8 [rN] = rM, imm9 */
1623 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1624 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1625 int qp = (int) (instr & 0x0000000003fLL);
1626 int indirect = rM < 256 ? reg_contents[rM] : 0;
1627 if (qp == 0 && rN == spill_reg && spill_addr != 0
1628 && (rM == unat_save_reg || rM == pr_save_reg))
1629 {
1630 /* We've found a spill of either the UNAT register or the PR
1631 register. (Well, not exactly; what we've actually found is
1632 a spill of the register that UNAT or PR was moved to).
1633 Record that fact and move on... */
1634 if (rM == unat_save_reg)
1635 {
1636 /* Track UNAT register. */
1637 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1638 unat_save_reg = 0;
1639 }
1640 else
1641 {
1642 /* Track PR register. */
1643 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1644 pr_save_reg = 0;
1645 }
1646 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1647 /* st8 [rN] = rM, imm9 */
1648 spill_addr += imm9(instr);
1649 else
1650 spill_addr = 0; /* Must be done spilling. */
1651 last_prologue_pc = next_pc;
1652 }
1653 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1654 {
1655 /* Allow up to one store of each input register. */
1656 instores[rM-32] = 1;
1657 last_prologue_pc = next_pc;
1658 }
1659 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1660 !instores[indirect-32])
1661 {
1662 /* Allow an indirect store of an input register. */
1663 instores[indirect-32] = 1;
1664 last_prologue_pc = next_pc;
1665 }
1666 }
1667 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1668 {
1669 /* One of
1670 st1 [rN] = rM
1671 st2 [rN] = rM
1672 st4 [rN] = rM
1673 st8 [rN] = rM
1674 Note that the st8 case is handled in the clause above.
1675
1676 Advance over stores of input registers. One store per input
1677 register is permitted. */
1678 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1679 int qp = (int) (instr & 0x0000000003fLL);
1680 int indirect = rM < 256 ? reg_contents[rM] : 0;
1681 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1682 {
1683 instores[rM-32] = 1;
1684 last_prologue_pc = next_pc;
1685 }
1686 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1687 !instores[indirect-32])
1688 {
1689 /* Allow an indirect store of an input register. */
1690 instores[indirect-32] = 1;
1691 last_prologue_pc = next_pc;
1692 }
1693 }
1694 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1695 {
1696 /* Either
1697 stfs [rN] = fM
1698 or
1699 stfd [rN] = fM
1700
1701 Advance over stores of floating point input registers. Again
1702 one store per register is permitted. */
1703 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1704 int qp = (int) (instr & 0x0000000003fLL);
1705 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1706 {
1707 infpstores[fM-8] = 1;
1708 last_prologue_pc = next_pc;
1709 }
1710 }
1711 else if (it == M
1712 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1713 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1714 {
1715 /* st8.spill [rN] = rM
1716 or
1717 st8.spill [rN] = rM, imm9 */
1718 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1719 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1720 int qp = (int) (instr & 0x0000000003fLL);
1721 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1722 {
1723 /* We've found a spill of one of the preserved general purpose
1724 regs. Record the spill address and advance the spill
1725 register if appropriate. */
1726 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1727 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1728 /* st8.spill [rN] = rM, imm9 */
1729 spill_addr += imm9(instr);
1730 else
1731 spill_addr = 0; /* Done spilling. */
1732 last_prologue_pc = next_pc;
1733 }
1734 }
1735
1736 pc = next_pc;
1737 }
1738
1739 /* If not frameless and we aren't called by skip_prologue, then we need
1740 to calculate registers for the previous frame which will be needed
1741 later. */
1742
1743 if (!frameless && this_frame)
1744 {
1745 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1746 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1747
1748 /* Extract the size of the rotating portion of the stack
1749 frame and the register rename base from the current
1750 frame marker. */
1751 cfm = cache->cfm;
1752 sor = cache->sor;
1753 sof = cache->sof;
1754 sol = cache->sol;
1755 rrb_gr = (cfm >> 18) & 0x7f;
1756
1757 /* Find the bof (beginning of frame). */
1758 bof = rse_address_add (cache->bsp, -sof);
1759
1760 for (i = 0, addr = bof;
1761 i < sof;
1762 i++, addr += 8)
1763 {
1764 if (IS_NaT_COLLECTION_ADDR (addr))
1765 {
1766 addr += 8;
1767 }
1768 if (i+32 == cfm_reg)
1769 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1770 if (i+32 == ret_reg)
1771 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1772 if (i+32 == fp_reg)
1773 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1774 }
1775
1776 /* For the previous argument registers we require the previous bof.
1777 If we can't find the previous cfm, then we can do nothing. */
1778 cfm = 0;
1779 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1780 {
1781 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1782 8, byte_order);
1783 }
1784 else if (cfm_reg != 0)
1785 {
1786 get_frame_register (this_frame, cfm_reg, buf);
1787 cfm = extract_unsigned_integer (buf, 8, byte_order);
1788 }
1789 cache->prev_cfm = cfm;
1790
1791 if (cfm != 0)
1792 {
1793 sor = ((cfm >> 14) & 0xf) * 8;
1794 sof = (cfm & 0x7f);
1795 sol = (cfm >> 7) & 0x7f;
1796 rrb_gr = (cfm >> 18) & 0x7f;
1797
1798 /* The previous bof only requires subtraction of the sol (size of
1799 locals) due to the overlap between output and input of
1800 subsequent frames. */
1801 bof = rse_address_add (bof, -sol);
1802
1803 for (i = 0, addr = bof;
1804 i < sof;
1805 i++, addr += 8)
1806 {
1807 if (IS_NaT_COLLECTION_ADDR (addr))
1808 {
1809 addr += 8;
1810 }
1811 if (i < sor)
1812 cache->saved_regs[IA64_GR32_REGNUM
1813 + ((i + (sor - rrb_gr)) % sor)]
1814 = addr;
1815 else
1816 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1817 }
1818
1819 }
1820 }
1821
1822 /* Try and trust the lim_pc value whenever possible. */
1823 if (trust_limit && lim_pc >= last_prologue_pc)
1824 last_prologue_pc = lim_pc;
1825
1826 cache->frameless = frameless;
1827 cache->after_prologue = last_prologue_pc;
1828 cache->mem_stack_frame_size = mem_stack_frame_size;
1829 cache->fp_reg = fp_reg;
1830
1831 return last_prologue_pc;
1832 }
1833
1834 CORE_ADDR
1835 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1836 {
1837 struct ia64_frame_cache cache;
1838 cache.base = 0;
1839 cache.after_prologue = 0;
1840 cache.cfm = 0;
1841 cache.bsp = 0;
1842
1843 /* Call examine_prologue with - as third argument since we don't
1844 have a next frame pointer to send. */
1845 return examine_prologue (pc, pc+1024, 0, &cache);
1846 }
1847
1848
1849 /* Normal frames. */
1850
1851 static struct ia64_frame_cache *
1852 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1853 {
1854 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1855 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1856 struct ia64_frame_cache *cache;
1857 gdb_byte buf[8];
1858 CORE_ADDR cfm;
1859
1860 if (*this_cache)
1861 return (struct ia64_frame_cache *) *this_cache;
1862
1863 cache = ia64_alloc_frame_cache ();
1864 *this_cache = cache;
1865
1866 get_frame_register (this_frame, sp_regnum, buf);
1867 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1868
1869 /* We always want the bsp to point to the end of frame.
1870 This way, we can always get the beginning of frame (bof)
1871 by subtracting frame size. */
1872 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1873 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1874
1875 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1876
1877 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1878 cfm = extract_unsigned_integer (buf, 8, byte_order);
1879
1880 cache->sof = (cfm & 0x7f);
1881 cache->sol = (cfm >> 7) & 0x7f;
1882 cache->sor = ((cfm >> 14) & 0xf) * 8;
1883
1884 cache->cfm = cfm;
1885
1886 cache->pc = get_frame_func (this_frame);
1887
1888 if (cache->pc != 0)
1889 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1890
1891 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1892
1893 return cache;
1894 }
1895
1896 static void
1897 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1898 struct frame_id *this_id)
1899 {
1900 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1901 struct ia64_frame_cache *cache =
1902 ia64_frame_cache (this_frame, this_cache);
1903
1904 /* If outermost frame, mark with null frame id. */
1905 if (cache->base != 0)
1906 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1907 if (gdbarch_debug >= 1)
1908 fprintf_unfiltered (gdb_stdlog,
1909 "regular frame id: code %s, stack %s, "
1910 "special %s, this_frame %s\n",
1911 paddress (gdbarch, this_id->code_addr),
1912 paddress (gdbarch, this_id->stack_addr),
1913 paddress (gdbarch, cache->bsp),
1914 host_address_to_string (this_frame));
1915 }
1916
1917 static struct value *
1918 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1919 int regnum)
1920 {
1921 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1923 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1924 gdb_byte buf[8];
1925
1926 gdb_assert (regnum >= 0);
1927
1928 if (!target_has_registers)
1929 error (_("No registers."));
1930
1931 if (regnum == gdbarch_sp_regnum (gdbarch))
1932 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1933
1934 else if (regnum == IA64_BSP_REGNUM)
1935 {
1936 struct value *val;
1937 CORE_ADDR prev_cfm, bsp, prev_bsp;
1938
1939 /* We want to calculate the previous bsp as the end of the previous
1940 register stack frame. This corresponds to what the hardware bsp
1941 register will be if we pop the frame back which is why we might
1942 have been called. We know the beginning of the current frame is
1943 cache->bsp - cache->sof. This value in the previous frame points
1944 to the start of the output registers. We can calculate the end of
1945 that frame by adding the size of output:
1946 (sof (size of frame) - sol (size of locals)). */
1947 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1948 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1949 8, byte_order);
1950 bsp = rse_address_add (cache->bsp, -(cache->sof));
1951 prev_bsp =
1952 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1953
1954 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1955 }
1956
1957 else if (regnum == IA64_CFM_REGNUM)
1958 {
1959 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1960
1961 if (addr != 0)
1962 return frame_unwind_got_memory (this_frame, regnum, addr);
1963
1964 if (cache->prev_cfm)
1965 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1966
1967 if (cache->frameless)
1968 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1969 IA64_PFS_REGNUM);
1970 return frame_unwind_got_register (this_frame, regnum, 0);
1971 }
1972
1973 else if (regnum == IA64_VFP_REGNUM)
1974 {
1975 /* If the function in question uses an automatic register (r32-r127)
1976 for the frame pointer, it'll be found by ia64_find_saved_register()
1977 above. If the function lacks one of these frame pointers, we can
1978 still provide a value since we know the size of the frame. */
1979 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1980 }
1981
1982 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1983 {
1984 struct value *pr_val;
1985 ULONGEST prN;
1986
1987 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1988 IA64_PR_REGNUM);
1989 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1990 {
1991 /* Fetch predicate register rename base from current frame
1992 marker for this frame. */
1993 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1994
1995 /* Adjust the register number to account for register rotation. */
1996 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1997 }
1998 prN = extract_bit_field (value_contents_all (pr_val),
1999 regnum - VP0_REGNUM, 1);
2000 return frame_unwind_got_constant (this_frame, regnum, prN);
2001 }
2002
2003 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
2004 {
2005 struct value *unat_val;
2006 ULONGEST unatN;
2007 unat_val = ia64_frame_prev_register (this_frame, this_cache,
2008 IA64_UNAT_REGNUM);
2009 unatN = extract_bit_field (value_contents_all (unat_val),
2010 regnum - IA64_NAT0_REGNUM, 1);
2011 return frame_unwind_got_constant (this_frame, regnum, unatN);
2012 }
2013
2014 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2015 {
2016 int natval = 0;
2017 /* Find address of general register corresponding to nat bit we're
2018 interested in. */
2019 CORE_ADDR gr_addr;
2020
2021 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2022
2023 if (gr_addr != 0)
2024 {
2025 /* Compute address of nat collection bits. */
2026 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2027 CORE_ADDR bsp;
2028 CORE_ADDR nat_collection;
2029 int nat_bit;
2030
2031 /* If our nat collection address is bigger than bsp, we have to get
2032 the nat collection from rnat. Otherwise, we fetch the nat
2033 collection from the computed address. */
2034 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2035 bsp = extract_unsigned_integer (buf, 8, byte_order);
2036 if (nat_addr >= bsp)
2037 {
2038 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2039 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2040 }
2041 else
2042 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2043 nat_bit = (gr_addr >> 3) & 0x3f;
2044 natval = (nat_collection >> nat_bit) & 1;
2045 }
2046
2047 return frame_unwind_got_constant (this_frame, regnum, natval);
2048 }
2049
2050 else if (regnum == IA64_IP_REGNUM)
2051 {
2052 CORE_ADDR pc = 0;
2053 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2054
2055 if (addr != 0)
2056 {
2057 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2058 pc = extract_unsigned_integer (buf, 8, byte_order);
2059 }
2060 else if (cache->frameless)
2061 {
2062 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2063 pc = extract_unsigned_integer (buf, 8, byte_order);
2064 }
2065 pc &= ~0xf;
2066 return frame_unwind_got_constant (this_frame, regnum, pc);
2067 }
2068
2069 else if (regnum == IA64_PSR_REGNUM)
2070 {
2071 /* We don't know how to get the complete previous PSR, but we need it
2072 for the slot information when we unwind the pc (pc is formed of IP
2073 register plus slot information from PSR). To get the previous
2074 slot information, we mask it off the return address. */
2075 ULONGEST slot_num = 0;
2076 CORE_ADDR pc = 0;
2077 CORE_ADDR psr = 0;
2078 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2079
2080 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2081 psr = extract_unsigned_integer (buf, 8, byte_order);
2082
2083 if (addr != 0)
2084 {
2085 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2086 pc = extract_unsigned_integer (buf, 8, byte_order);
2087 }
2088 else if (cache->frameless)
2089 {
2090 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2091 pc = extract_unsigned_integer (buf, 8, byte_order);
2092 }
2093 psr &= ~(3LL << 41);
2094 slot_num = pc & 0x3LL;
2095 psr |= (CORE_ADDR)slot_num << 41;
2096 return frame_unwind_got_constant (this_frame, regnum, psr);
2097 }
2098
2099 else if (regnum == IA64_BR0_REGNUM)
2100 {
2101 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2102
2103 if (addr != 0)
2104 return frame_unwind_got_memory (this_frame, regnum, addr);
2105
2106 return frame_unwind_got_constant (this_frame, regnum, 0);
2107 }
2108
2109 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2110 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2111 {
2112 CORE_ADDR addr = 0;
2113
2114 if (regnum >= V32_REGNUM)
2115 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2116 addr = cache->saved_regs[regnum];
2117 if (addr != 0)
2118 return frame_unwind_got_memory (this_frame, regnum, addr);
2119
2120 if (cache->frameless)
2121 {
2122 struct value *reg_val;
2123 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2124
2125 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2126 with the same code above? */
2127 if (regnum >= V32_REGNUM)
2128 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2129 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2130 IA64_CFM_REGNUM);
2131 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2132 8, byte_order);
2133 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2134 IA64_BSP_REGNUM);
2135 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2136 8, byte_order);
2137 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2138
2139 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2140 return frame_unwind_got_memory (this_frame, regnum, addr);
2141 }
2142
2143 return frame_unwind_got_constant (this_frame, regnum, 0);
2144 }
2145
2146 else /* All other registers. */
2147 {
2148 CORE_ADDR addr = 0;
2149
2150 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2151 {
2152 /* Fetch floating point register rename base from current
2153 frame marker for this frame. */
2154 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2155
2156 /* Adjust the floating point register number to account for
2157 register rotation. */
2158 regnum = IA64_FR32_REGNUM
2159 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2160 }
2161
2162 /* If we have stored a memory address, access the register. */
2163 addr = cache->saved_regs[regnum];
2164 if (addr != 0)
2165 return frame_unwind_got_memory (this_frame, regnum, addr);
2166 /* Otherwise, punt and get the current value of the register. */
2167 else
2168 return frame_unwind_got_register (this_frame, regnum, regnum);
2169 }
2170 }
2171
2172 static const struct frame_unwind ia64_frame_unwind =
2173 {
2174 NORMAL_FRAME,
2175 default_frame_unwind_stop_reason,
2176 &ia64_frame_this_id,
2177 &ia64_frame_prev_register,
2178 NULL,
2179 default_frame_sniffer
2180 };
2181
2182 /* Signal trampolines. */
2183
2184 static void
2185 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2186 struct ia64_frame_cache *cache)
2187 {
2188 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2189 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2190
2191 if (tdep->sigcontext_register_address)
2192 {
2193 int regno;
2194
2195 cache->saved_regs[IA64_VRAP_REGNUM]
2196 = tdep->sigcontext_register_address (gdbarch, cache->base,
2197 IA64_IP_REGNUM);
2198 cache->saved_regs[IA64_CFM_REGNUM]
2199 = tdep->sigcontext_register_address (gdbarch, cache->base,
2200 IA64_CFM_REGNUM);
2201 cache->saved_regs[IA64_PSR_REGNUM]
2202 = tdep->sigcontext_register_address (gdbarch, cache->base,
2203 IA64_PSR_REGNUM);
2204 cache->saved_regs[IA64_BSP_REGNUM]
2205 = tdep->sigcontext_register_address (gdbarch, cache->base,
2206 IA64_BSP_REGNUM);
2207 cache->saved_regs[IA64_RNAT_REGNUM]
2208 = tdep->sigcontext_register_address (gdbarch, cache->base,
2209 IA64_RNAT_REGNUM);
2210 cache->saved_regs[IA64_CCV_REGNUM]
2211 = tdep->sigcontext_register_address (gdbarch, cache->base,
2212 IA64_CCV_REGNUM);
2213 cache->saved_regs[IA64_UNAT_REGNUM]
2214 = tdep->sigcontext_register_address (gdbarch, cache->base,
2215 IA64_UNAT_REGNUM);
2216 cache->saved_regs[IA64_FPSR_REGNUM]
2217 = tdep->sigcontext_register_address (gdbarch, cache->base,
2218 IA64_FPSR_REGNUM);
2219 cache->saved_regs[IA64_PFS_REGNUM]
2220 = tdep->sigcontext_register_address (gdbarch, cache->base,
2221 IA64_PFS_REGNUM);
2222 cache->saved_regs[IA64_LC_REGNUM]
2223 = tdep->sigcontext_register_address (gdbarch, cache->base,
2224 IA64_LC_REGNUM);
2225
2226 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2227 cache->saved_regs[regno] =
2228 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2229 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2230 cache->saved_regs[regno] =
2231 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2232 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2233 cache->saved_regs[regno] =
2234 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2235 }
2236 }
2237
2238 static struct ia64_frame_cache *
2239 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2240 {
2241 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2242 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2243 struct ia64_frame_cache *cache;
2244 gdb_byte buf[8];
2245
2246 if (*this_cache)
2247 return (struct ia64_frame_cache *) *this_cache;
2248
2249 cache = ia64_alloc_frame_cache ();
2250
2251 get_frame_register (this_frame, sp_regnum, buf);
2252 /* Note that frame size is hard-coded below. We cannot calculate it
2253 via prologue examination. */
2254 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2255
2256 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2257 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2258
2259 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2260 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2261 cache->sof = cache->cfm & 0x7f;
2262
2263 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2264
2265 *this_cache = cache;
2266 return cache;
2267 }
2268
2269 static void
2270 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2271 void **this_cache, struct frame_id *this_id)
2272 {
2273 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2274 struct ia64_frame_cache *cache =
2275 ia64_sigtramp_frame_cache (this_frame, this_cache);
2276
2277 (*this_id) = frame_id_build_special (cache->base,
2278 get_frame_pc (this_frame),
2279 cache->bsp);
2280 if (gdbarch_debug >= 1)
2281 fprintf_unfiltered (gdb_stdlog,
2282 "sigtramp frame id: code %s, stack %s, "
2283 "special %s, this_frame %s\n",
2284 paddress (gdbarch, this_id->code_addr),
2285 paddress (gdbarch, this_id->stack_addr),
2286 paddress (gdbarch, cache->bsp),
2287 host_address_to_string (this_frame));
2288 }
2289
2290 static struct value *
2291 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2292 void **this_cache, int regnum)
2293 {
2294 struct ia64_frame_cache *cache =
2295 ia64_sigtramp_frame_cache (this_frame, this_cache);
2296
2297 gdb_assert (regnum >= 0);
2298
2299 if (!target_has_registers)
2300 error (_("No registers."));
2301
2302 if (regnum == IA64_IP_REGNUM)
2303 {
2304 CORE_ADDR pc = 0;
2305 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2306
2307 if (addr != 0)
2308 {
2309 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2310 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2311 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2312 }
2313 pc &= ~0xf;
2314 return frame_unwind_got_constant (this_frame, regnum, pc);
2315 }
2316
2317 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2318 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2319 {
2320 CORE_ADDR addr = 0;
2321
2322 if (regnum >= V32_REGNUM)
2323 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2324 addr = cache->saved_regs[regnum];
2325 if (addr != 0)
2326 return frame_unwind_got_memory (this_frame, regnum, addr);
2327
2328 return frame_unwind_got_constant (this_frame, regnum, 0);
2329 }
2330
2331 else /* All other registers not listed above. */
2332 {
2333 CORE_ADDR addr = cache->saved_regs[regnum];
2334
2335 if (addr != 0)
2336 return frame_unwind_got_memory (this_frame, regnum, addr);
2337
2338 return frame_unwind_got_constant (this_frame, regnum, 0);
2339 }
2340 }
2341
2342 static int
2343 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2344 struct frame_info *this_frame,
2345 void **this_cache)
2346 {
2347 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2348 if (tdep->pc_in_sigtramp)
2349 {
2350 CORE_ADDR pc = get_frame_pc (this_frame);
2351
2352 if (tdep->pc_in_sigtramp (pc))
2353 return 1;
2354 }
2355
2356 return 0;
2357 }
2358
2359 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2360 {
2361 SIGTRAMP_FRAME,
2362 default_frame_unwind_stop_reason,
2363 ia64_sigtramp_frame_this_id,
2364 ia64_sigtramp_frame_prev_register,
2365 NULL,
2366 ia64_sigtramp_frame_sniffer
2367 };
2368
2369 \f
2370
2371 static CORE_ADDR
2372 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2373 {
2374 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2375
2376 return cache->base;
2377 }
2378
2379 static const struct frame_base ia64_frame_base =
2380 {
2381 &ia64_frame_unwind,
2382 ia64_frame_base_address,
2383 ia64_frame_base_address,
2384 ia64_frame_base_address
2385 };
2386
2387 #ifdef HAVE_LIBUNWIND_IA64_H
2388
2389 struct ia64_unwind_table_entry
2390 {
2391 unw_word_t start_offset;
2392 unw_word_t end_offset;
2393 unw_word_t info_offset;
2394 };
2395
2396 static __inline__ uint64_t
2397 ia64_rse_slot_num (uint64_t addr)
2398 {
2399 return (addr >> 3) & 0x3f;
2400 }
2401
2402 /* Skip over a designated number of registers in the backing
2403 store, remembering every 64th position is for NAT. */
2404 static __inline__ uint64_t
2405 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2406 {
2407 long delta = ia64_rse_slot_num(addr) + num_regs;
2408
2409 if (num_regs < 0)
2410 delta -= 0x3e;
2411 return addr + ((num_regs + delta/0x3f) << 3);
2412 }
2413
2414 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2415 register number to a libunwind register number. */
2416 static int
2417 ia64_gdb2uw_regnum (int regnum)
2418 {
2419 if (regnum == sp_regnum)
2420 return UNW_IA64_SP;
2421 else if (regnum == IA64_BSP_REGNUM)
2422 return UNW_IA64_BSP;
2423 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2424 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2425 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2426 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2427 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2428 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2429 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2430 return -1;
2431 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2432 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2433 else if (regnum == IA64_PR_REGNUM)
2434 return UNW_IA64_PR;
2435 else if (regnum == IA64_IP_REGNUM)
2436 return UNW_REG_IP;
2437 else if (regnum == IA64_CFM_REGNUM)
2438 return UNW_IA64_CFM;
2439 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2440 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2441 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2442 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2443 else
2444 return -1;
2445 }
2446
2447 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2448 register number to a ia64 gdb register number. */
2449 static int
2450 ia64_uw2gdb_regnum (int uw_regnum)
2451 {
2452 if (uw_regnum == UNW_IA64_SP)
2453 return sp_regnum;
2454 else if (uw_regnum == UNW_IA64_BSP)
2455 return IA64_BSP_REGNUM;
2456 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2457 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2458 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2459 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2460 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2461 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2462 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2463 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2464 else if (uw_regnum == UNW_IA64_PR)
2465 return IA64_PR_REGNUM;
2466 else if (uw_regnum == UNW_REG_IP)
2467 return IA64_IP_REGNUM;
2468 else if (uw_regnum == UNW_IA64_CFM)
2469 return IA64_CFM_REGNUM;
2470 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2471 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2472 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2473 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2474 else
2475 return -1;
2476 }
2477
2478 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2479 a float register or not. */
2480 static int
2481 ia64_is_fpreg (int uw_regnum)
2482 {
2483 return unw_is_fpreg (uw_regnum);
2484 }
2485
2486 /* Libunwind callback accessor function for general registers. */
2487 static int
2488 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2489 int write, void *arg)
2490 {
2491 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2492 unw_word_t bsp, sof, cfm, psr, ip;
2493 struct frame_info *this_frame = (struct frame_info *) arg;
2494 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2495 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2496 long new_sof, old_sof;
2497
2498 /* We never call any libunwind routines that need to write registers. */
2499 gdb_assert (!write);
2500
2501 switch (uw_regnum)
2502 {
2503 case UNW_REG_IP:
2504 /* Libunwind expects to see the pc value which means the slot number
2505 from the psr must be merged with the ip word address. */
2506 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2507 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2508 *val = ip | ((psr >> 41) & 0x3);
2509 break;
2510
2511 case UNW_IA64_AR_BSP:
2512 /* Libunwind expects to see the beginning of the current
2513 register frame so we must account for the fact that
2514 ptrace() will return a value for bsp that points *after*
2515 the current register frame. */
2516 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2517 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2518 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2519 *val = ia64_rse_skip_regs (bsp, -sof);
2520 break;
2521
2522 case UNW_IA64_AR_BSPSTORE:
2523 /* Libunwind wants bspstore to be after the current register frame.
2524 This is what ptrace() and gdb treats as the regular bsp value. */
2525 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2526 break;
2527
2528 default:
2529 /* For all other registers, just unwind the value directly. */
2530 *val = get_frame_register_unsigned (this_frame, regnum);
2531 break;
2532 }
2533
2534 if (gdbarch_debug >= 1)
2535 fprintf_unfiltered (gdb_stdlog,
2536 " access_reg: from cache: %4s=%s\n",
2537 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2538 ? ia64_register_names[regnum] : "r??"),
2539 paddress (gdbarch, *val));
2540 return 0;
2541 }
2542
2543 /* Libunwind callback accessor function for floating-point registers. */
2544 static int
2545 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2546 unw_fpreg_t *val, int write, void *arg)
2547 {
2548 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2549 struct frame_info *this_frame = (struct frame_info *) arg;
2550
2551 /* We never call any libunwind routines that need to write registers. */
2552 gdb_assert (!write);
2553
2554 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2555
2556 return 0;
2557 }
2558
2559 /* Libunwind callback accessor function for top-level rse registers. */
2560 static int
2561 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2562 unw_word_t *val, int write, void *arg)
2563 {
2564 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2565 unw_word_t bsp, sof, cfm, psr, ip;
2566 struct regcache *regcache = (struct regcache *) arg;
2567 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2568 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2569 long new_sof, old_sof;
2570
2571 /* We never call any libunwind routines that need to write registers. */
2572 gdb_assert (!write);
2573
2574 switch (uw_regnum)
2575 {
2576 case UNW_REG_IP:
2577 /* Libunwind expects to see the pc value which means the slot number
2578 from the psr must be merged with the ip word address. */
2579 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2580 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2581 *val = ip | ((psr >> 41) & 0x3);
2582 break;
2583
2584 case UNW_IA64_AR_BSP:
2585 /* Libunwind expects to see the beginning of the current
2586 register frame so we must account for the fact that
2587 ptrace() will return a value for bsp that points *after*
2588 the current register frame. */
2589 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2590 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2591 sof = (cfm & 0x7f);
2592 *val = ia64_rse_skip_regs (bsp, -sof);
2593 break;
2594
2595 case UNW_IA64_AR_BSPSTORE:
2596 /* Libunwind wants bspstore to be after the current register frame.
2597 This is what ptrace() and gdb treats as the regular bsp value. */
2598 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2599 break;
2600
2601 default:
2602 /* For all other registers, just unwind the value directly. */
2603 regcache_cooked_read_unsigned (regcache, regnum, val);
2604 break;
2605 }
2606
2607 if (gdbarch_debug >= 1)
2608 fprintf_unfiltered (gdb_stdlog,
2609 " access_rse_reg: from cache: %4s=%s\n",
2610 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2611 ? ia64_register_names[regnum] : "r??"),
2612 paddress (gdbarch, *val));
2613
2614 return 0;
2615 }
2616
2617 /* Libunwind callback accessor function for top-level fp registers. */
2618 static int
2619 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2620 unw_fpreg_t *val, int write, void *arg)
2621 {
2622 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2623 struct regcache *regcache = (struct regcache *) arg;
2624
2625 /* We never call any libunwind routines that need to write registers. */
2626 gdb_assert (!write);
2627
2628 regcache_cooked_read (regcache, regnum, (gdb_byte *) val);
2629
2630 return 0;
2631 }
2632
2633 /* Libunwind callback accessor function for accessing memory. */
2634 static int
2635 ia64_access_mem (unw_addr_space_t as,
2636 unw_word_t addr, unw_word_t *val,
2637 int write, void *arg)
2638 {
2639 if (addr - KERNEL_START < ktab_size)
2640 {
2641 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2642 + (addr - KERNEL_START));
2643
2644 if (write)
2645 *laddr = *val;
2646 else
2647 *val = *laddr;
2648 return 0;
2649 }
2650
2651 /* XXX do we need to normalize byte-order here? */
2652 if (write)
2653 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2654 else
2655 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2656 }
2657
2658 /* Call low-level function to access the kernel unwind table. */
2659 static LONGEST
2660 getunwind_table (gdb_byte **buf_p)
2661 {
2662 LONGEST x;
2663
2664 /* FIXME drow/2005-09-10: This code used to call
2665 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2666 for the currently running ia64-linux kernel. That data should
2667 come from the core file and be accessed via the auxv vector; if
2668 we want to preserve fall back to the running kernel's table, then
2669 we should find a way to override the corefile layer's
2670 xfer_partial method. */
2671
2672 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2673 NULL, buf_p);
2674
2675 return x;
2676 }
2677
2678 /* Get the kernel unwind table. */
2679 static int
2680 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2681 {
2682 static struct ia64_table_entry *etab;
2683
2684 if (!ktab)
2685 {
2686 gdb_byte *ktab_buf;
2687 LONGEST size;
2688
2689 size = getunwind_table (&ktab_buf);
2690 if (size <= 0)
2691 return -UNW_ENOINFO;
2692
2693 ktab = (struct ia64_table_entry *) ktab_buf;
2694 ktab_size = size;
2695
2696 for (etab = ktab; etab->start_offset; ++etab)
2697 etab->info_offset += KERNEL_START;
2698 }
2699
2700 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2701 return -UNW_ENOINFO;
2702
2703 di->format = UNW_INFO_FORMAT_TABLE;
2704 di->gp = 0;
2705 di->start_ip = ktab[0].start_offset;
2706 di->end_ip = etab[-1].end_offset;
2707 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2708 di->u.ti.segbase = 0;
2709 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2710 di->u.ti.table_data = (unw_word_t *) ktab;
2711
2712 if (gdbarch_debug >= 1)
2713 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2714 "segbase=%s, length=%s, gp=%s\n",
2715 (char *) di->u.ti.name_ptr,
2716 hex_string (di->u.ti.segbase),
2717 pulongest (di->u.ti.table_len),
2718 hex_string (di->gp));
2719 return 0;
2720 }
2721
2722 /* Find the unwind table entry for a specified address. */
2723 static int
2724 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2725 unw_dyn_info_t *dip, void **buf)
2726 {
2727 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2728 Elf_Internal_Ehdr *ehdr;
2729 unw_word_t segbase = 0;
2730 CORE_ADDR load_base;
2731 bfd *bfd;
2732 int i;
2733
2734 bfd = objfile->obfd;
2735
2736 ehdr = elf_tdata (bfd)->elf_header;
2737 phdr = elf_tdata (bfd)->phdr;
2738
2739 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2740
2741 for (i = 0; i < ehdr->e_phnum; ++i)
2742 {
2743 switch (phdr[i].p_type)
2744 {
2745 case PT_LOAD:
2746 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2747 < phdr[i].p_memsz)
2748 p_text = phdr + i;
2749 break;
2750
2751 case PT_IA_64_UNWIND:
2752 p_unwind = phdr + i;
2753 break;
2754
2755 default:
2756 break;
2757 }
2758 }
2759
2760 if (!p_text || !p_unwind)
2761 return -UNW_ENOINFO;
2762
2763 /* Verify that the segment that contains the IP also contains
2764 the static unwind table. If not, we may be in the Linux kernel's
2765 DSO gate page in which case the unwind table is another segment.
2766 Otherwise, we are dealing with runtime-generated code, for which we
2767 have no info here. */
2768 segbase = p_text->p_vaddr + load_base;
2769
2770 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2771 {
2772 int ok = 0;
2773 for (i = 0; i < ehdr->e_phnum; ++i)
2774 {
2775 if (phdr[i].p_type == PT_LOAD
2776 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2777 {
2778 ok = 1;
2779 /* Get the segbase from the section containing the
2780 libunwind table. */
2781 segbase = phdr[i].p_vaddr + load_base;
2782 }
2783 }
2784 if (!ok)
2785 return -UNW_ENOINFO;
2786 }
2787
2788 dip->start_ip = p_text->p_vaddr + load_base;
2789 dip->end_ip = dip->start_ip + p_text->p_memsz;
2790 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2791 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2792 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2793 dip->u.rti.segbase = segbase;
2794 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2795 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2796
2797 return 0;
2798 }
2799
2800 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2801 static int
2802 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2803 int need_unwind_info, void *arg)
2804 {
2805 struct obj_section *sec = find_pc_section (ip);
2806 unw_dyn_info_t di;
2807 int ret;
2808 void *buf = NULL;
2809
2810 if (!sec)
2811 {
2812 /* XXX This only works if the host and the target architecture are
2813 both ia64 and if the have (more or less) the same kernel
2814 version. */
2815 if (get_kernel_table (ip, &di) < 0)
2816 return -UNW_ENOINFO;
2817
2818 if (gdbarch_debug >= 1)
2819 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2820 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2821 "length=%s,data=%s)\n",
2822 hex_string (ip), (char *)di.u.ti.name_ptr,
2823 hex_string (di.u.ti.segbase),
2824 hex_string (di.start_ip), hex_string (di.end_ip),
2825 hex_string (di.gp),
2826 pulongest (di.u.ti.table_len),
2827 hex_string ((CORE_ADDR)di.u.ti.table_data));
2828 }
2829 else
2830 {
2831 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2832 if (ret < 0)
2833 return ret;
2834
2835 if (gdbarch_debug >= 1)
2836 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2837 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2838 "length=%s,data=%s)\n",
2839 hex_string (ip), (char *)di.u.rti.name_ptr,
2840 hex_string (di.u.rti.segbase),
2841 hex_string (di.start_ip), hex_string (di.end_ip),
2842 hex_string (di.gp),
2843 pulongest (di.u.rti.table_len),
2844 hex_string (di.u.rti.table_data));
2845 }
2846
2847 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2848 arg);
2849
2850 /* We no longer need the dyn info storage so free it. */
2851 xfree (buf);
2852
2853 return ret;
2854 }
2855
2856 /* Libunwind callback accessor function for cleanup. */
2857 static void
2858 ia64_put_unwind_info (unw_addr_space_t as,
2859 unw_proc_info_t *pip, void *arg)
2860 {
2861 /* Nothing required for now. */
2862 }
2863
2864 /* Libunwind callback accessor function to get head of the dynamic
2865 unwind-info registration list. */
2866 static int
2867 ia64_get_dyn_info_list (unw_addr_space_t as,
2868 unw_word_t *dilap, void *arg)
2869 {
2870 struct obj_section *text_sec;
2871 struct objfile *objfile;
2872 unw_word_t ip, addr;
2873 unw_dyn_info_t di;
2874 int ret;
2875
2876 if (!libunwind_is_initialized ())
2877 return -UNW_ENOINFO;
2878
2879 for (objfile = object_files; objfile; objfile = objfile->next)
2880 {
2881 void *buf = NULL;
2882
2883 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2884 ip = obj_section_addr (text_sec);
2885 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2886 if (ret >= 0)
2887 {
2888 addr = libunwind_find_dyn_list (as, &di, arg);
2889 /* We no longer need the dyn info storage so free it. */
2890 xfree (buf);
2891
2892 if (addr)
2893 {
2894 if (gdbarch_debug >= 1)
2895 fprintf_unfiltered (gdb_stdlog,
2896 "dynamic unwind table in objfile %s "
2897 "at %s (gp=%s)\n",
2898 bfd_get_filename (objfile->obfd),
2899 hex_string (addr), hex_string (di.gp));
2900 *dilap = addr;
2901 return 0;
2902 }
2903 }
2904 }
2905 return -UNW_ENOINFO;
2906 }
2907
2908
2909 /* Frame interface functions for libunwind. */
2910
2911 static void
2912 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2913 struct frame_id *this_id)
2914 {
2915 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2916 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2917 struct frame_id id = outer_frame_id;
2918 gdb_byte buf[8];
2919 CORE_ADDR bsp;
2920
2921 libunwind_frame_this_id (this_frame, this_cache, &id);
2922 if (frame_id_eq (id, outer_frame_id))
2923 {
2924 (*this_id) = outer_frame_id;
2925 return;
2926 }
2927
2928 /* We must add the bsp as the special address for frame comparison
2929 purposes. */
2930 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2931 bsp = extract_unsigned_integer (buf, 8, byte_order);
2932
2933 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2934
2935 if (gdbarch_debug >= 1)
2936 fprintf_unfiltered (gdb_stdlog,
2937 "libunwind frame id: code %s, stack %s, "
2938 "special %s, this_frame %s\n",
2939 paddress (gdbarch, id.code_addr),
2940 paddress (gdbarch, id.stack_addr),
2941 paddress (gdbarch, bsp),
2942 host_address_to_string (this_frame));
2943 }
2944
2945 static struct value *
2946 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2947 void **this_cache, int regnum)
2948 {
2949 int reg = regnum;
2950 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2951 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2952 struct value *val;
2953
2954 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2955 reg = IA64_PR_REGNUM;
2956 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2957 reg = IA64_UNAT_REGNUM;
2958
2959 /* Let libunwind do most of the work. */
2960 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2961
2962 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2963 {
2964 ULONGEST prN_val;
2965
2966 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2967 {
2968 int rrb_pr = 0;
2969 ULONGEST cfm;
2970
2971 /* Fetch predicate register rename base from current frame
2972 marker for this frame. */
2973 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2974 rrb_pr = (cfm >> 32) & 0x3f;
2975
2976 /* Adjust the register number to account for register rotation. */
2977 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2978 }
2979 prN_val = extract_bit_field (value_contents_all (val),
2980 regnum - VP0_REGNUM, 1);
2981 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2982 }
2983
2984 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2985 {
2986 ULONGEST unatN_val;
2987
2988 unatN_val = extract_bit_field (value_contents_all (val),
2989 regnum - IA64_NAT0_REGNUM, 1);
2990 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2991 }
2992
2993 else if (regnum == IA64_BSP_REGNUM)
2994 {
2995 struct value *cfm_val;
2996 CORE_ADDR prev_bsp, prev_cfm;
2997
2998 /* We want to calculate the previous bsp as the end of the previous
2999 register stack frame. This corresponds to what the hardware bsp
3000 register will be if we pop the frame back which is why we might
3001 have been called. We know that libunwind will pass us back the
3002 beginning of the current frame so we should just add sof to it. */
3003 prev_bsp = extract_unsigned_integer (value_contents_all (val),
3004 8, byte_order);
3005 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
3006 IA64_CFM_REGNUM);
3007 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
3008 8, byte_order);
3009 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
3010
3011 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3012 }
3013 else
3014 return val;
3015 }
3016
3017 static int
3018 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3019 struct frame_info *this_frame,
3020 void **this_cache)
3021 {
3022 if (libunwind_is_initialized ()
3023 && libunwind_frame_sniffer (self, this_frame, this_cache))
3024 return 1;
3025
3026 return 0;
3027 }
3028
3029 static const struct frame_unwind ia64_libunwind_frame_unwind =
3030 {
3031 NORMAL_FRAME,
3032 default_frame_unwind_stop_reason,
3033 ia64_libunwind_frame_this_id,
3034 ia64_libunwind_frame_prev_register,
3035 NULL,
3036 ia64_libunwind_frame_sniffer,
3037 libunwind_frame_dealloc_cache
3038 };
3039
3040 static void
3041 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3042 void **this_cache,
3043 struct frame_id *this_id)
3044 {
3045 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3046 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3047 gdb_byte buf[8];
3048 CORE_ADDR bsp;
3049 struct frame_id id = outer_frame_id;
3050 CORE_ADDR prev_ip;
3051
3052 libunwind_frame_this_id (this_frame, this_cache, &id);
3053 if (frame_id_eq (id, outer_frame_id))
3054 {
3055 (*this_id) = outer_frame_id;
3056 return;
3057 }
3058
3059 /* We must add the bsp as the special address for frame comparison
3060 purposes. */
3061 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3062 bsp = extract_unsigned_integer (buf, 8, byte_order);
3063
3064 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3065 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3066
3067 if (gdbarch_debug >= 1)
3068 fprintf_unfiltered (gdb_stdlog,
3069 "libunwind sigtramp frame id: code %s, "
3070 "stack %s, special %s, this_frame %s\n",
3071 paddress (gdbarch, id.code_addr),
3072 paddress (gdbarch, id.stack_addr),
3073 paddress (gdbarch, bsp),
3074 host_address_to_string (this_frame));
3075 }
3076
3077 static struct value *
3078 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3079 void **this_cache, int regnum)
3080 {
3081 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3082 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3083 struct value *prev_ip_val;
3084 CORE_ADDR prev_ip;
3085
3086 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3087 method of getting previous registers. */
3088 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3089 IA64_IP_REGNUM);
3090 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3091 8, byte_order);
3092
3093 if (prev_ip == 0)
3094 {
3095 void *tmp_cache = NULL;
3096 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3097 regnum);
3098 }
3099 else
3100 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3101 }
3102
3103 static int
3104 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3105 struct frame_info *this_frame,
3106 void **this_cache)
3107 {
3108 if (libunwind_is_initialized ())
3109 {
3110 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3111 return 1;
3112 return 0;
3113 }
3114 else
3115 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3116 }
3117
3118 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3119 {
3120 SIGTRAMP_FRAME,
3121 default_frame_unwind_stop_reason,
3122 ia64_libunwind_sigtramp_frame_this_id,
3123 ia64_libunwind_sigtramp_frame_prev_register,
3124 NULL,
3125 ia64_libunwind_sigtramp_frame_sniffer
3126 };
3127
3128 /* Set of libunwind callback acccessor functions. */
3129 unw_accessors_t ia64_unw_accessors =
3130 {
3131 ia64_find_proc_info_x,
3132 ia64_put_unwind_info,
3133 ia64_get_dyn_info_list,
3134 ia64_access_mem,
3135 ia64_access_reg,
3136 ia64_access_fpreg,
3137 /* resume */
3138 /* get_proc_name */
3139 };
3140
3141 /* Set of special libunwind callback acccessor functions specific for accessing
3142 the rse registers. At the top of the stack, we want libunwind to figure out
3143 how to read r32 - r127. Though usually they are found sequentially in
3144 memory starting from $bof, this is not always true. */
3145 unw_accessors_t ia64_unw_rse_accessors =
3146 {
3147 ia64_find_proc_info_x,
3148 ia64_put_unwind_info,
3149 ia64_get_dyn_info_list,
3150 ia64_access_mem,
3151 ia64_access_rse_reg,
3152 ia64_access_rse_fpreg,
3153 /* resume */
3154 /* get_proc_name */
3155 };
3156
3157 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3158 ia64-libunwind-tdep code to use. */
3159 struct libunwind_descr ia64_libunwind_descr =
3160 {
3161 ia64_gdb2uw_regnum,
3162 ia64_uw2gdb_regnum,
3163 ia64_is_fpreg,
3164 &ia64_unw_accessors,
3165 &ia64_unw_rse_accessors,
3166 };
3167
3168 #endif /* HAVE_LIBUNWIND_IA64_H */
3169
3170 static int
3171 ia64_use_struct_convention (struct type *type)
3172 {
3173 struct type *float_elt_type;
3174
3175 /* Don't use the struct convention for anything but structure,
3176 union, or array types. */
3177 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3178 || TYPE_CODE (type) == TYPE_CODE_UNION
3179 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3180 return 0;
3181
3182 /* HFAs are structures (or arrays) consisting entirely of floating
3183 point values of the same length. Up to 8 of these are returned
3184 in registers. Don't use the struct convention when this is the
3185 case. */
3186 float_elt_type = is_float_or_hfa_type (type);
3187 if (float_elt_type != NULL
3188 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3189 return 0;
3190
3191 /* Other structs of length 32 or less are returned in r8-r11.
3192 Don't use the struct convention for those either. */
3193 return TYPE_LENGTH (type) > 32;
3194 }
3195
3196 /* Return non-zero if TYPE is a structure or union type. */
3197
3198 static int
3199 ia64_struct_type_p (const struct type *type)
3200 {
3201 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3202 || TYPE_CODE (type) == TYPE_CODE_UNION);
3203 }
3204
3205 static void
3206 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3207 gdb_byte *valbuf)
3208 {
3209 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3210 struct type *float_elt_type;
3211
3212 float_elt_type = is_float_or_hfa_type (type);
3213 if (float_elt_type != NULL)
3214 {
3215 gdb_byte from[IA64_FP_REGISTER_SIZE];
3216 int offset = 0;
3217 int regnum = IA64_FR8_REGNUM;
3218 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3219
3220 while (n-- > 0)
3221 {
3222 regcache_cooked_read (regcache, regnum, from);
3223 convert_typed_floating (from, ia64_ext_type (gdbarch),
3224 (char *)valbuf + offset, float_elt_type);
3225 offset += TYPE_LENGTH (float_elt_type);
3226 regnum++;
3227 }
3228 }
3229 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3230 {
3231 /* This is an integral value, and its size is less than 8 bytes.
3232 These values are LSB-aligned, so extract the relevant bytes,
3233 and copy them into VALBUF. */
3234 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3235 so I suppose we should also add handling here for integral values
3236 whose size is greater than 8. But I wasn't able to create such
3237 a type, neither in C nor in Ada, so not worrying about these yet. */
3238 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3239 ULONGEST val;
3240
3241 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3242 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3243 }
3244 else
3245 {
3246 ULONGEST val;
3247 int offset = 0;
3248 int regnum = IA64_GR8_REGNUM;
3249 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3250 int n = TYPE_LENGTH (type) / reglen;
3251 int m = TYPE_LENGTH (type) % reglen;
3252
3253 while (n-- > 0)
3254 {
3255 ULONGEST val;
3256 regcache_cooked_read_unsigned (regcache, regnum, &val);
3257 memcpy ((char *)valbuf + offset, &val, reglen);
3258 offset += reglen;
3259 regnum++;
3260 }
3261
3262 if (m)
3263 {
3264 regcache_cooked_read_unsigned (regcache, regnum, &val);
3265 memcpy ((char *)valbuf + offset, &val, m);
3266 }
3267 }
3268 }
3269
3270 static void
3271 ia64_store_return_value (struct type *type, struct regcache *regcache,
3272 const gdb_byte *valbuf)
3273 {
3274 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3275 struct type *float_elt_type;
3276
3277 float_elt_type = is_float_or_hfa_type (type);
3278 if (float_elt_type != NULL)
3279 {
3280 gdb_byte to[IA64_FP_REGISTER_SIZE];
3281 int offset = 0;
3282 int regnum = IA64_FR8_REGNUM;
3283 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3284
3285 while (n-- > 0)
3286 {
3287 convert_typed_floating ((char *)valbuf + offset, float_elt_type,
3288 to, ia64_ext_type (gdbarch));
3289 regcache_cooked_write (regcache, regnum, to);
3290 offset += TYPE_LENGTH (float_elt_type);
3291 regnum++;
3292 }
3293 }
3294 else
3295 {
3296 ULONGEST val;
3297 int offset = 0;
3298 int regnum = IA64_GR8_REGNUM;
3299 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3300 int n = TYPE_LENGTH (type) / reglen;
3301 int m = TYPE_LENGTH (type) % reglen;
3302
3303 while (n-- > 0)
3304 {
3305 ULONGEST val;
3306 memcpy (&val, (char *)valbuf + offset, reglen);
3307 regcache_cooked_write_unsigned (regcache, regnum, val);
3308 offset += reglen;
3309 regnum++;
3310 }
3311
3312 if (m)
3313 {
3314 memcpy (&val, (char *)valbuf + offset, m);
3315 regcache_cooked_write_unsigned (regcache, regnum, val);
3316 }
3317 }
3318 }
3319
3320 static enum return_value_convention
3321 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3322 struct type *valtype, struct regcache *regcache,
3323 gdb_byte *readbuf, const gdb_byte *writebuf)
3324 {
3325 int struct_return = ia64_use_struct_convention (valtype);
3326
3327 if (writebuf != NULL)
3328 {
3329 gdb_assert (!struct_return);
3330 ia64_store_return_value (valtype, regcache, writebuf);
3331 }
3332
3333 if (readbuf != NULL)
3334 {
3335 gdb_assert (!struct_return);
3336 ia64_extract_return_value (valtype, regcache, readbuf);
3337 }
3338
3339 if (struct_return)
3340 return RETURN_VALUE_STRUCT_CONVENTION;
3341 else
3342 return RETURN_VALUE_REGISTER_CONVENTION;
3343 }
3344
3345 static int
3346 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3347 {
3348 switch (TYPE_CODE (t))
3349 {
3350 case TYPE_CODE_FLT:
3351 if (*etp)
3352 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3353 else
3354 {
3355 *etp = t;
3356 return 1;
3357 }
3358 break;
3359 case TYPE_CODE_ARRAY:
3360 return
3361 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3362 etp);
3363 break;
3364 case TYPE_CODE_STRUCT:
3365 {
3366 int i;
3367
3368 for (i = 0; i < TYPE_NFIELDS (t); i++)
3369 if (!is_float_or_hfa_type_recurse
3370 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3371 return 0;
3372 return 1;
3373 }
3374 break;
3375 default:
3376 return 0;
3377 break;
3378 }
3379 }
3380
3381 /* Determine if the given type is one of the floating point types or
3382 and HFA (which is a struct, array, or combination thereof whose
3383 bottom-most elements are all of the same floating point type). */
3384
3385 static struct type *
3386 is_float_or_hfa_type (struct type *t)
3387 {
3388 struct type *et = 0;
3389
3390 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3391 }
3392
3393
3394 /* Return 1 if the alignment of T is such that the next even slot
3395 should be used. Return 0, if the next available slot should
3396 be used. (See section 8.5.1 of the IA-64 Software Conventions
3397 and Runtime manual). */
3398
3399 static int
3400 slot_alignment_is_next_even (struct type *t)
3401 {
3402 switch (TYPE_CODE (t))
3403 {
3404 case TYPE_CODE_INT:
3405 case TYPE_CODE_FLT:
3406 if (TYPE_LENGTH (t) > 8)
3407 return 1;
3408 else
3409 return 0;
3410 case TYPE_CODE_ARRAY:
3411 return
3412 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3413 case TYPE_CODE_STRUCT:
3414 {
3415 int i;
3416
3417 for (i = 0; i < TYPE_NFIELDS (t); i++)
3418 if (slot_alignment_is_next_even
3419 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3420 return 1;
3421 return 0;
3422 }
3423 default:
3424 return 0;
3425 }
3426 }
3427
3428 /* Attempt to find (and return) the global pointer for the given
3429 function.
3430
3431 This is a rather nasty bit of code searchs for the .dynamic section
3432 in the objfile corresponding to the pc of the function we're trying
3433 to call. Once it finds the addresses at which the .dynamic section
3434 lives in the child process, it scans the Elf64_Dyn entries for a
3435 DT_PLTGOT tag. If it finds one of these, the corresponding
3436 d_un.d_ptr value is the global pointer. */
3437
3438 static CORE_ADDR
3439 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3440 CORE_ADDR faddr)
3441 {
3442 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3443 struct obj_section *faddr_sect;
3444
3445 faddr_sect = find_pc_section (faddr);
3446 if (faddr_sect != NULL)
3447 {
3448 struct obj_section *osect;
3449
3450 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3451 {
3452 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3453 break;
3454 }
3455
3456 if (osect < faddr_sect->objfile->sections_end)
3457 {
3458 CORE_ADDR addr, endaddr;
3459
3460 addr = obj_section_addr (osect);
3461 endaddr = obj_section_endaddr (osect);
3462
3463 while (addr < endaddr)
3464 {
3465 int status;
3466 LONGEST tag;
3467 gdb_byte buf[8];
3468
3469 status = target_read_memory (addr, buf, sizeof (buf));
3470 if (status != 0)
3471 break;
3472 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3473
3474 if (tag == DT_PLTGOT)
3475 {
3476 CORE_ADDR global_pointer;
3477
3478 status = target_read_memory (addr + 8, buf, sizeof (buf));
3479 if (status != 0)
3480 break;
3481 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3482 byte_order);
3483
3484 /* The payoff... */
3485 return global_pointer;
3486 }
3487
3488 if (tag == DT_NULL)
3489 break;
3490
3491 addr += 16;
3492 }
3493 }
3494 }
3495 return 0;
3496 }
3497
3498 /* Attempt to find (and return) the global pointer for the given
3499 function. We first try the find_global_pointer_from_solib routine
3500 from the gdbarch tdep vector, if provided. And if that does not
3501 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3502
3503 static CORE_ADDR
3504 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3505 {
3506 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3507 CORE_ADDR addr = 0;
3508
3509 if (tdep->find_global_pointer_from_solib)
3510 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3511 if (addr == 0)
3512 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3513 return addr;
3514 }
3515
3516 /* Given a function's address, attempt to find (and return) the
3517 corresponding (canonical) function descriptor. Return 0 if
3518 not found. */
3519 static CORE_ADDR
3520 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3521 {
3522 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3523 struct obj_section *faddr_sect;
3524
3525 /* Return early if faddr is already a function descriptor. */
3526 faddr_sect = find_pc_section (faddr);
3527 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3528 return faddr;
3529
3530 if (faddr_sect != NULL)
3531 {
3532 struct obj_section *osect;
3533 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3534 {
3535 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3536 break;
3537 }
3538
3539 if (osect < faddr_sect->objfile->sections_end)
3540 {
3541 CORE_ADDR addr, endaddr;
3542
3543 addr = obj_section_addr (osect);
3544 endaddr = obj_section_endaddr (osect);
3545
3546 while (addr < endaddr)
3547 {
3548 int status;
3549 LONGEST faddr2;
3550 gdb_byte buf[8];
3551
3552 status = target_read_memory (addr, buf, sizeof (buf));
3553 if (status != 0)
3554 break;
3555 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3556
3557 if (faddr == faddr2)
3558 return addr;
3559
3560 addr += 16;
3561 }
3562 }
3563 }
3564 return 0;
3565 }
3566
3567 /* Attempt to find a function descriptor corresponding to the
3568 given address. If none is found, construct one on the
3569 stack using the address at fdaptr. */
3570
3571 static CORE_ADDR
3572 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3573 {
3574 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3575 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3576 CORE_ADDR fdesc;
3577
3578 fdesc = find_extant_func_descr (gdbarch, faddr);
3579
3580 if (fdesc == 0)
3581 {
3582 ULONGEST global_pointer;
3583 gdb_byte buf[16];
3584
3585 fdesc = *fdaptr;
3586 *fdaptr += 16;
3587
3588 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3589
3590 if (global_pointer == 0)
3591 regcache_cooked_read_unsigned (regcache,
3592 IA64_GR1_REGNUM, &global_pointer);
3593
3594 store_unsigned_integer (buf, 8, byte_order, faddr);
3595 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3596
3597 write_memory (fdesc, buf, 16);
3598 }
3599
3600 return fdesc;
3601 }
3602
3603 /* Use the following routine when printing out function pointers
3604 so the user can see the function address rather than just the
3605 function descriptor. */
3606 static CORE_ADDR
3607 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3608 struct target_ops *targ)
3609 {
3610 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3611 struct obj_section *s;
3612 gdb_byte buf[8];
3613
3614 s = find_pc_section (addr);
3615
3616 /* check if ADDR points to a function descriptor. */
3617 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3618 return read_memory_unsigned_integer (addr, 8, byte_order);
3619
3620 /* Normally, functions live inside a section that is executable.
3621 So, if ADDR points to a non-executable section, then treat it
3622 as a function descriptor and return the target address iff
3623 the target address itself points to a section that is executable.
3624 Check first the memory of the whole length of 8 bytes is readable. */
3625 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3626 && target_read_memory (addr, buf, 8) == 0)
3627 {
3628 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3629 struct obj_section *pc_section = find_pc_section (pc);
3630
3631 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3632 return pc;
3633 }
3634
3635 /* There are also descriptors embedded in vtables. */
3636 if (s)
3637 {
3638 struct bound_minimal_symbol minsym;
3639
3640 minsym = lookup_minimal_symbol_by_pc (addr);
3641
3642 if (minsym.minsym
3643 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3644 return read_memory_unsigned_integer (addr, 8, byte_order);
3645 }
3646
3647 return addr;
3648 }
3649
3650 static CORE_ADDR
3651 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3652 {
3653 return sp & ~0xfLL;
3654 }
3655
3656 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3657
3658 static void
3659 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3660 {
3661 ULONGEST cfm, pfs, new_bsp;
3662
3663 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3664
3665 new_bsp = rse_address_add (bsp, sof);
3666 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3667
3668 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3669 pfs &= 0xc000000000000000LL;
3670 pfs |= (cfm & 0xffffffffffffLL);
3671 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3672
3673 cfm &= 0xc000000000000000LL;
3674 cfm |= sof;
3675 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3676 }
3677
3678 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3679 ia64. */
3680
3681 static void
3682 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3683 int slotnum, gdb_byte *buf)
3684 {
3685 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3686 }
3687
3688 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3689
3690 static void
3691 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3692 {
3693 /* Nothing needed. */
3694 }
3695
3696 static CORE_ADDR
3697 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3698 struct regcache *regcache, CORE_ADDR bp_addr,
3699 int nargs, struct value **args, CORE_ADDR sp,
3700 int struct_return, CORE_ADDR struct_addr)
3701 {
3702 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3704 int argno;
3705 struct value *arg;
3706 struct type *type;
3707 int len, argoffset;
3708 int nslots, rseslots, memslots, slotnum, nfuncargs;
3709 int floatreg;
3710 ULONGEST bsp;
3711 CORE_ADDR funcdescaddr, global_pointer;
3712 CORE_ADDR func_addr = find_function_addr (function, NULL);
3713
3714 nslots = 0;
3715 nfuncargs = 0;
3716 /* Count the number of slots needed for the arguments. */
3717 for (argno = 0; argno < nargs; argno++)
3718 {
3719 arg = args[argno];
3720 type = check_typedef (value_type (arg));
3721 len = TYPE_LENGTH (type);
3722
3723 if ((nslots & 1) && slot_alignment_is_next_even (type))
3724 nslots++;
3725
3726 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3727 nfuncargs++;
3728
3729 nslots += (len + 7) / 8;
3730 }
3731
3732 /* Divvy up the slots between the RSE and the memory stack. */
3733 rseslots = (nslots > 8) ? 8 : nslots;
3734 memslots = nslots - rseslots;
3735
3736 /* Allocate a new RSE frame. */
3737 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3738 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3739
3740 /* We will attempt to find function descriptors in the .opd segment,
3741 but if we can't we'll construct them ourselves. That being the
3742 case, we'll need to reserve space on the stack for them. */
3743 funcdescaddr = sp - nfuncargs * 16;
3744 funcdescaddr &= ~0xfLL;
3745
3746 /* Adjust the stack pointer to it's new value. The calling conventions
3747 require us to have 16 bytes of scratch, plus whatever space is
3748 necessary for the memory slots and our function descriptors. */
3749 sp = sp - 16 - (memslots + nfuncargs) * 8;
3750 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3751
3752 /* Place the arguments where they belong. The arguments will be
3753 either placed in the RSE backing store or on the memory stack.
3754 In addition, floating point arguments or HFAs are placed in
3755 floating point registers. */
3756 slotnum = 0;
3757 floatreg = IA64_FR8_REGNUM;
3758 for (argno = 0; argno < nargs; argno++)
3759 {
3760 struct type *float_elt_type;
3761
3762 arg = args[argno];
3763 type = check_typedef (value_type (arg));
3764 len = TYPE_LENGTH (type);
3765
3766 /* Special handling for function parameters. */
3767 if (len == 8
3768 && TYPE_CODE (type) == TYPE_CODE_PTR
3769 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3770 {
3771 gdb_byte val_buf[8];
3772 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3773 8, byte_order);
3774 store_unsigned_integer (val_buf, 8, byte_order,
3775 find_func_descr (regcache, faddr,
3776 &funcdescaddr));
3777 if (slotnum < rseslots)
3778 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3779 slotnum, val_buf);
3780 else
3781 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3782 slotnum++;
3783 continue;
3784 }
3785
3786 /* Normal slots. */
3787
3788 /* Skip odd slot if necessary... */
3789 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3790 slotnum++;
3791
3792 argoffset = 0;
3793 while (len > 0)
3794 {
3795 gdb_byte val_buf[8];
3796
3797 memset (val_buf, 0, 8);
3798 if (!ia64_struct_type_p (type) && len < 8)
3799 {
3800 /* Integral types are LSB-aligned, so we have to be careful
3801 to insert the argument on the correct side of the buffer.
3802 This is why we use store_unsigned_integer. */
3803 store_unsigned_integer
3804 (val_buf, 8, byte_order,
3805 extract_unsigned_integer (value_contents (arg), len,
3806 byte_order));
3807 }
3808 else
3809 {
3810 /* This is either an 8bit integral type, or an aggregate.
3811 For 8bit integral type, there is no problem, we just
3812 copy the value over.
3813
3814 For aggregates, the only potentially tricky portion
3815 is to write the last one if it is less than 8 bytes.
3816 In this case, the data is Byte0-aligned. Happy news,
3817 this means that we don't need to differentiate the
3818 handling of 8byte blocks and less-than-8bytes blocks. */
3819 memcpy (val_buf, value_contents (arg) + argoffset,
3820 (len > 8) ? 8 : len);
3821 }
3822
3823 if (slotnum < rseslots)
3824 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3825 slotnum, val_buf);
3826 else
3827 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3828
3829 argoffset += 8;
3830 len -= 8;
3831 slotnum++;
3832 }
3833
3834 /* Handle floating point types (including HFAs). */
3835 float_elt_type = is_float_or_hfa_type (type);
3836 if (float_elt_type != NULL)
3837 {
3838 argoffset = 0;
3839 len = TYPE_LENGTH (type);
3840 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3841 {
3842 gdb_byte to[IA64_FP_REGISTER_SIZE];
3843 convert_typed_floating (value_contents (arg) + argoffset,
3844 float_elt_type, to,
3845 ia64_ext_type (gdbarch));
3846 regcache_cooked_write (regcache, floatreg, to);
3847 floatreg++;
3848 argoffset += TYPE_LENGTH (float_elt_type);
3849 len -= TYPE_LENGTH (float_elt_type);
3850 }
3851 }
3852 }
3853
3854 /* Store the struct return value in r8 if necessary. */
3855 if (struct_return)
3856 {
3857 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3858 (ULONGEST) struct_addr);
3859 }
3860
3861 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3862
3863 if (global_pointer != 0)
3864 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3865
3866 /* The following is not necessary on HP-UX, because we're using
3867 a dummy code sequence pushed on the stack to make the call, and
3868 this sequence doesn't need b0 to be set in order for our dummy
3869 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3870 it's needed for other OSes, so we do this unconditionaly. */
3871 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3872
3873 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3874
3875 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3876
3877 return sp;
3878 }
3879
3880 static const struct ia64_infcall_ops ia64_infcall_ops =
3881 {
3882 ia64_allocate_new_rse_frame,
3883 ia64_store_argument_in_slot,
3884 ia64_set_function_addr
3885 };
3886
3887 static struct frame_id
3888 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3889 {
3890 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3891 gdb_byte buf[8];
3892 CORE_ADDR sp, bsp;
3893
3894 get_frame_register (this_frame, sp_regnum, buf);
3895 sp = extract_unsigned_integer (buf, 8, byte_order);
3896
3897 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3898 bsp = extract_unsigned_integer (buf, 8, byte_order);
3899
3900 if (gdbarch_debug >= 1)
3901 fprintf_unfiltered (gdb_stdlog,
3902 "dummy frame id: code %s, stack %s, special %s\n",
3903 paddress (gdbarch, get_frame_pc (this_frame)),
3904 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3905
3906 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3907 }
3908
3909 static CORE_ADDR
3910 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3911 {
3912 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3913 gdb_byte buf[8];
3914 CORE_ADDR ip, psr, pc;
3915
3916 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3917 ip = extract_unsigned_integer (buf, 8, byte_order);
3918 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3919 psr = extract_unsigned_integer (buf, 8, byte_order);
3920
3921 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3922 return pc;
3923 }
3924
3925 static int
3926 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3927 {
3928 info->bytes_per_line = SLOT_MULTIPLIER;
3929 return default_print_insn (memaddr, info);
3930 }
3931
3932 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3933
3934 static int
3935 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3936 {
3937 return (cfm & 0x7f);
3938 }
3939
3940 static struct gdbarch *
3941 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3942 {
3943 struct gdbarch *gdbarch;
3944 struct gdbarch_tdep *tdep;
3945
3946 /* If there is already a candidate, use it. */
3947 arches = gdbarch_list_lookup_by_info (arches, &info);
3948 if (arches != NULL)
3949 return arches->gdbarch;
3950
3951 tdep = XCNEW (struct gdbarch_tdep);
3952 gdbarch = gdbarch_alloc (&info, tdep);
3953
3954 tdep->size_of_register_frame = ia64_size_of_register_frame;
3955
3956 /* According to the ia64 specs, instructions that store long double
3957 floats in memory use a long-double format different than that
3958 used in the floating registers. The memory format matches the
3959 x86 extended float format which is 80 bits. An OS may choose to
3960 use this format (e.g. GNU/Linux) or choose to use a different
3961 format for storing long doubles (e.g. HPUX). In the latter case,
3962 the setting of the format may be moved/overridden in an
3963 OS-specific tdep file. */
3964 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3965
3966 set_gdbarch_short_bit (gdbarch, 16);
3967 set_gdbarch_int_bit (gdbarch, 32);
3968 set_gdbarch_long_bit (gdbarch, 64);
3969 set_gdbarch_long_long_bit (gdbarch, 64);
3970 set_gdbarch_float_bit (gdbarch, 32);
3971 set_gdbarch_double_bit (gdbarch, 64);
3972 set_gdbarch_long_double_bit (gdbarch, 128);
3973 set_gdbarch_ptr_bit (gdbarch, 64);
3974
3975 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3976 set_gdbarch_num_pseudo_regs (gdbarch,
3977 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3978 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3979 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3980
3981 set_gdbarch_register_name (gdbarch, ia64_register_name);
3982 set_gdbarch_register_type (gdbarch, ia64_register_type);
3983
3984 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3985 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3986 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3987 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3988 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3989 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3990 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3991
3992 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3993
3994 set_gdbarch_return_value (gdbarch, ia64_return_value);
3995
3996 set_gdbarch_memory_insert_breakpoint (gdbarch,
3997 ia64_memory_insert_breakpoint);
3998 set_gdbarch_memory_remove_breakpoint (gdbarch,
3999 ia64_memory_remove_breakpoint);
4000 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
4001 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
4002 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
4003 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
4004
4005 /* Settings for calling functions in the inferior. */
4006 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
4007 tdep->infcall_ops = ia64_infcall_ops;
4008 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
4009 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
4010
4011 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4012 #ifdef HAVE_LIBUNWIND_IA64_H
4013 frame_unwind_append_unwinder (gdbarch,
4014 &ia64_libunwind_sigtramp_frame_unwind);
4015 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4016 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4017 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4018 #else
4019 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4020 #endif
4021 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4022 frame_base_set_default (gdbarch, &ia64_frame_base);
4023
4024 /* Settings that should be unnecessary. */
4025 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4026
4027 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4028 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4029 ia64_convert_from_func_ptr_addr);
4030
4031 /* The virtual table contains 16-byte descriptors, not pointers to
4032 descriptors. */
4033 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4034
4035 /* Hook in ABI-specific overrides, if they have been registered. */
4036 gdbarch_init_osabi (info, gdbarch);
4037
4038 return gdbarch;
4039 }
4040
4041 extern initialize_file_ftype _initialize_ia64_tdep; /* -Wmissing-prototypes */
4042
4043 void
4044 _initialize_ia64_tdep (void)
4045 {
4046 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4047 }
This page took 0.233167 seconds and 4 git commands to generate.