* config/mips/linux.mt (DEPRECATED_TM_FILE): Delete.
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
CommitLineData
771b4502 1/* SPU target-dependent code for GDB, the GNU debugger.
6aba47ca 2 Copyright (C) 2006, 2007 Free Software Foundation, Inc.
771b4502
UW
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
23
24#include "defs.h"
25#include "arch-utils.h"
26#include "gdbtypes.h"
27#include "gdbcmd.h"
28#include "gdbcore.h"
29#include "gdb_string.h"
30#include "gdb_assert.h"
31#include "frame.h"
32#include "frame-unwind.h"
33#include "frame-base.h"
34#include "trad-frame.h"
35#include "symtab.h"
36#include "symfile.h"
37#include "value.h"
38#include "inferior.h"
39#include "dis-asm.h"
40#include "objfiles.h"
41#include "language.h"
42#include "regcache.h"
43#include "reggroups.h"
44#include "floatformat.h"
dcf52cd8 45#include "observer.h"
771b4502
UW
46
47#include "spu-tdep.h"
48
f2d43c2c
UW
49/* SPU-specific vector type. */
50struct type *spu_builtin_type_vec128;
771b4502
UW
51
52/* Registers. */
53
54static const char *
55spu_register_name (int reg_nr)
56{
57 static char *register_names[] =
58 {
59 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
60 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
61 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
62 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
63 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
64 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
65 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
66 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
67 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
68 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
69 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
70 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
71 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
72 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
73 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
74 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
75 "id", "pc", "sp"
76 };
77
78 if (reg_nr < 0)
79 return NULL;
80 if (reg_nr >= sizeof register_names / sizeof *register_names)
81 return NULL;
82
83 return register_names[reg_nr];
84}
85
86static struct type *
87spu_register_type (struct gdbarch *gdbarch, int reg_nr)
88{
89 if (reg_nr < SPU_NUM_GPRS)
f2d43c2c 90 return spu_builtin_type_vec128;
771b4502
UW
91
92 switch (reg_nr)
93 {
94 case SPU_ID_REGNUM:
95 return builtin_type_uint32;
96
97 case SPU_PC_REGNUM:
98 return builtin_type_void_func_ptr;
99
100 case SPU_SP_REGNUM:
101 return builtin_type_void_data_ptr;
102
103 default:
104 internal_error (__FILE__, __LINE__, "invalid regnum");
105 }
106}
107
108/* Pseudo registers for preferred slots - stack pointer. */
109
110static void
111spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
112 int regnum, gdb_byte *buf)
113{
114 gdb_byte reg[16];
115
116 switch (regnum)
117 {
118 case SPU_SP_REGNUM:
119 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
120 memcpy (buf, reg, 4);
121 break;
122
123 default:
124 internal_error (__FILE__, __LINE__, _("invalid regnum"));
125 }
126}
127
128static void
129spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
130 int regnum, const gdb_byte *buf)
131{
132 gdb_byte reg[16];
133
134 switch (regnum)
135 {
136 case SPU_SP_REGNUM:
137 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
138 memcpy (reg, buf, 4);
139 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
140 break;
141
142 default:
143 internal_error (__FILE__, __LINE__, _("invalid regnum"));
144 }
145}
146
147/* Value conversion -- access scalar values at the preferred slot. */
148
9acbedc0
UW
149static struct value *
150spu_value_from_register (struct type *type, int regnum,
151 struct frame_info *frame)
771b4502 152{
9acbedc0
UW
153 struct value *value = default_value_from_register (type, regnum, frame);
154 int len = TYPE_LENGTH (type);
771b4502 155
9acbedc0
UW
156 if (regnum < SPU_NUM_GPRS && len < 16)
157 {
158 int preferred_slot = len < 4 ? 4 - len : 0;
159 set_value_offset (value, preferred_slot);
160 }
771b4502 161
9acbedc0 162 return value;
771b4502
UW
163}
164
165/* Register groups. */
166
167static int
168spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
169 struct reggroup *group)
170{
171 /* Registers displayed via 'info regs'. */
172 if (group == general_reggroup)
173 return 1;
174
175 /* Registers displayed via 'info float'. */
176 if (group == float_reggroup)
177 return 0;
178
179 /* Registers that need to be saved/restored in order to
180 push or pop frames. */
181 if (group == save_reggroup || group == restore_reggroup)
182 return 1;
183
184 return default_register_reggroup_p (gdbarch, regnum, group);
185}
186
187
188/* Decoding SPU instructions. */
189
190enum
191 {
192 op_lqd = 0x34,
193 op_lqx = 0x3c4,
194 op_lqa = 0x61,
195 op_lqr = 0x67,
196 op_stqd = 0x24,
197 op_stqx = 0x144,
198 op_stqa = 0x41,
199 op_stqr = 0x47,
200
201 op_il = 0x081,
202 op_ila = 0x21,
203 op_a = 0x0c0,
204 op_ai = 0x1c,
205
206 op_selb = 0x4,
207
208 op_br = 0x64,
209 op_bra = 0x60,
210 op_brsl = 0x66,
211 op_brasl = 0x62,
212 op_brnz = 0x42,
213 op_brz = 0x40,
214 op_brhnz = 0x46,
215 op_brhz = 0x44,
216 op_bi = 0x1a8,
217 op_bisl = 0x1a9,
218 op_biz = 0x128,
219 op_binz = 0x129,
220 op_bihz = 0x12a,
221 op_bihnz = 0x12b,
222 };
223
224static int
225is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
226{
227 if ((insn >> 21) == op)
228 {
229 *rt = insn & 127;
230 *ra = (insn >> 7) & 127;
231 *rb = (insn >> 14) & 127;
232 return 1;
233 }
234
235 return 0;
236}
237
238static int
239is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
240{
241 if ((insn >> 28) == op)
242 {
243 *rt = (insn >> 21) & 127;
244 *ra = (insn >> 7) & 127;
245 *rb = (insn >> 14) & 127;
246 *rc = insn & 127;
247 return 1;
248 }
249
250 return 0;
251}
252
253static int
254is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
255{
256 if ((insn >> 21) == op)
257 {
258 *rt = insn & 127;
259 *ra = (insn >> 7) & 127;
260 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
261 return 1;
262 }
263
264 return 0;
265}
266
267static int
268is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
269{
270 if ((insn >> 24) == op)
271 {
272 *rt = insn & 127;
273 *ra = (insn >> 7) & 127;
274 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
275 return 1;
276 }
277
278 return 0;
279}
280
281static int
282is_ri16 (unsigned int insn, int op, int *rt, int *i16)
283{
284 if ((insn >> 23) == op)
285 {
286 *rt = insn & 127;
287 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
288 return 1;
289 }
290
291 return 0;
292}
293
294static int
295is_ri18 (unsigned int insn, int op, int *rt, int *i18)
296{
297 if ((insn >> 25) == op)
298 {
299 *rt = insn & 127;
300 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
301 return 1;
302 }
303
304 return 0;
305}
306
307static int
308is_branch (unsigned int insn, int *offset, int *reg)
309{
310 int rt, i7, i16;
311
312 if (is_ri16 (insn, op_br, &rt, &i16)
313 || is_ri16 (insn, op_brsl, &rt, &i16)
314 || is_ri16 (insn, op_brnz, &rt, &i16)
315 || is_ri16 (insn, op_brz, &rt, &i16)
316 || is_ri16 (insn, op_brhnz, &rt, &i16)
317 || is_ri16 (insn, op_brhz, &rt, &i16))
318 {
319 *reg = SPU_PC_REGNUM;
320 *offset = i16 << 2;
321 return 1;
322 }
323
324 if (is_ri16 (insn, op_bra, &rt, &i16)
325 || is_ri16 (insn, op_brasl, &rt, &i16))
326 {
327 *reg = -1;
328 *offset = i16 << 2;
329 return 1;
330 }
331
332 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
333 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
334 || is_ri7 (insn, op_biz, &rt, reg, &i7)
335 || is_ri7 (insn, op_binz, &rt, reg, &i7)
336 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
337 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
338 {
339 *offset = 0;
340 return 1;
341 }
342
343 return 0;
344}
345
346
347/* Prolog parsing. */
348
349struct spu_prologue_data
350 {
351 /* Stack frame size. -1 if analysis was unsuccessful. */
352 int size;
353
354 /* How to find the CFA. The CFA is equal to SP at function entry. */
355 int cfa_reg;
356 int cfa_offset;
357
358 /* Offset relative to CFA where a register is saved. -1 if invalid. */
359 int reg_offset[SPU_NUM_GPRS];
360 };
361
362static CORE_ADDR
363spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
364 struct spu_prologue_data *data)
365{
366 int found_sp = 0;
367 int found_fp = 0;
368 int found_lr = 0;
369 int reg_immed[SPU_NUM_GPRS];
370 gdb_byte buf[16];
371 CORE_ADDR prolog_pc = start_pc;
372 CORE_ADDR pc;
373 int i;
374
375
376 /* Initialize DATA to default values. */
377 data->size = -1;
378
379 data->cfa_reg = SPU_RAW_SP_REGNUM;
380 data->cfa_offset = 0;
381
382 for (i = 0; i < SPU_NUM_GPRS; i++)
383 data->reg_offset[i] = -1;
384
385 /* Set up REG_IMMED array. This is non-zero for a register if we know its
386 preferred slot currently holds this immediate value. */
387 for (i = 0; i < SPU_NUM_GPRS; i++)
388 reg_immed[i] = 0;
389
390 /* Scan instructions until the first branch.
391
392 The following instructions are important prolog components:
393
394 - The first instruction to set up the stack pointer.
395 - The first instruction to set up the frame pointer.
396 - The first instruction to save the link register.
397
398 We return the instruction after the latest of these three,
399 or the incoming PC if none is found. The first instruction
400 to set up the stack pointer also defines the frame size.
401
402 Note that instructions saving incoming arguments to their stack
403 slots are not counted as important, because they are hard to
404 identify with certainty. This should not matter much, because
405 arguments are relevant only in code compiled with debug data,
406 and in such code the GDB core will advance until the first source
407 line anyway, using SAL data.
408
409 For purposes of stack unwinding, we analyze the following types
410 of instructions in addition:
411
412 - Any instruction adding to the current frame pointer.
413 - Any instruction loading an immediate constant into a register.
414 - Any instruction storing a register onto the stack.
415
416 These are used to compute the CFA and REG_OFFSET output. */
417
418 for (pc = start_pc; pc < end_pc; pc += 4)
419 {
420 unsigned int insn;
421 int rt, ra, rb, rc, immed;
422
423 if (target_read_memory (pc, buf, 4))
424 break;
425 insn = extract_unsigned_integer (buf, 4);
426
427 /* AI is the typical instruction to set up a stack frame.
428 It is also used to initialize the frame pointer. */
429 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
430 {
431 if (rt == data->cfa_reg && ra == data->cfa_reg)
432 data->cfa_offset -= immed;
433
434 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
435 && !found_sp)
436 {
437 found_sp = 1;
438 prolog_pc = pc + 4;
439
440 data->size = -immed;
441 }
442 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
443 && !found_fp)
444 {
445 found_fp = 1;
446 prolog_pc = pc + 4;
447
448 data->cfa_reg = SPU_FP_REGNUM;
449 data->cfa_offset -= immed;
450 }
451 }
452
453 /* A is used to set up stack frames of size >= 512 bytes.
454 If we have tracked the contents of the addend register,
455 we can handle this as well. */
456 else if (is_rr (insn, op_a, &rt, &ra, &rb))
457 {
458 if (rt == data->cfa_reg && ra == data->cfa_reg)
459 {
460 if (reg_immed[rb] != 0)
461 data->cfa_offset -= reg_immed[rb];
462 else
463 data->cfa_reg = -1; /* We don't know the CFA any more. */
464 }
465
466 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
467 && !found_sp)
468 {
469 found_sp = 1;
470 prolog_pc = pc + 4;
471
472 if (reg_immed[rb] != 0)
473 data->size = -reg_immed[rb];
474 }
475 }
476
477 /* We need to track IL and ILA used to load immediate constants
478 in case they are later used as input to an A instruction. */
479 else if (is_ri16 (insn, op_il, &rt, &immed))
480 {
481 reg_immed[rt] = immed;
12102450
UW
482
483 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
484 found_sp = 1;
771b4502
UW
485 }
486
487 else if (is_ri18 (insn, op_ila, &rt, &immed))
488 {
489 reg_immed[rt] = immed & 0x3ffff;
12102450
UW
490
491 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
492 found_sp = 1;
771b4502
UW
493 }
494
495 /* STQD is used to save registers to the stack. */
496 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
497 {
498 if (ra == data->cfa_reg)
499 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
500
501 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
502 && !found_lr)
503 {
504 found_lr = 1;
505 prolog_pc = pc + 4;
506 }
507 }
508
509 /* _start uses SELB to set up the stack pointer. */
510 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
511 {
512 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
513 found_sp = 1;
514 }
515
516 /* We terminate if we find a branch. */
517 else if (is_branch (insn, &immed, &ra))
518 break;
519 }
520
521
522 /* If we successfully parsed until here, and didn't find any instruction
523 modifying SP, we assume we have a frameless function. */
524 if (!found_sp)
525 data->size = 0;
526
527 /* Return cooked instead of raw SP. */
528 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
529 data->cfa_reg = SPU_SP_REGNUM;
530
531 return prolog_pc;
532}
533
534/* Return the first instruction after the prologue starting at PC. */
535static CORE_ADDR
536spu_skip_prologue (CORE_ADDR pc)
537{
538 struct spu_prologue_data data;
539 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
540}
541
542/* Return the frame pointer in use at address PC. */
543static void
544spu_virtual_frame_pointer (CORE_ADDR pc, int *reg, LONGEST *offset)
545{
546 struct spu_prologue_data data;
547 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
548
549 if (data.size != -1 && data.cfa_reg != -1)
550 {
551 /* The 'frame pointer' address is CFA minus frame size. */
552 *reg = data.cfa_reg;
553 *offset = data.cfa_offset - data.size;
554 }
555 else
556 {
557 /* ??? We don't really know ... */
558 *reg = SPU_SP_REGNUM;
559 *offset = 0;
560 }
561}
562
fe5febed
UW
563/* Return true if we are in the function's epilogue, i.e. after the
564 instruction that destroyed the function's stack frame.
565
566 1) scan forward from the point of execution:
567 a) If you find an instruction that modifies the stack pointer
568 or transfers control (except a return), execution is not in
569 an epilogue, return.
570 b) Stop scanning if you find a return instruction or reach the
571 end of the function or reach the hard limit for the size of
572 an epilogue.
573 2) scan backward from the point of execution:
574 a) If you find an instruction that modifies the stack pointer,
575 execution *is* in an epilogue, return.
576 b) Stop scanning if you reach an instruction that transfers
577 control or the beginning of the function or reach the hard
578 limit for the size of an epilogue. */
579
580static int
581spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
582{
583 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
584 bfd_byte buf[4];
585 unsigned int insn;
586 int rt, ra, rb, rc, immed;
587
588 /* Find the search limits based on function boundaries and hard limit.
589 We assume the epilogue can be up to 64 instructions long. */
590
591 const int spu_max_epilogue_size = 64 * 4;
592
593 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
594 return 0;
595
596 if (pc - func_start < spu_max_epilogue_size)
597 epilogue_start = func_start;
598 else
599 epilogue_start = pc - spu_max_epilogue_size;
600
601 if (func_end - pc < spu_max_epilogue_size)
602 epilogue_end = func_end;
603 else
604 epilogue_end = pc + spu_max_epilogue_size;
605
606 /* Scan forward until next 'bi $0'. */
607
608 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
609 {
610 if (target_read_memory (scan_pc, buf, 4))
611 return 0;
612 insn = extract_unsigned_integer (buf, 4);
613
614 if (is_branch (insn, &immed, &ra))
615 {
616 if (immed == 0 && ra == SPU_LR_REGNUM)
617 break;
618
619 return 0;
620 }
621
622 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
623 || is_rr (insn, op_a, &rt, &ra, &rb)
624 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
625 {
626 if (rt == SPU_RAW_SP_REGNUM)
627 return 0;
628 }
629 }
630
631 if (scan_pc >= epilogue_end)
632 return 0;
633
634 /* Scan backward until adjustment to stack pointer (R1). */
635
636 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
637 {
638 if (target_read_memory (scan_pc, buf, 4))
639 return 0;
640 insn = extract_unsigned_integer (buf, 4);
641
642 if (is_branch (insn, &immed, &ra))
643 return 0;
644
645 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
646 || is_rr (insn, op_a, &rt, &ra, &rb)
647 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
648 {
649 if (rt == SPU_RAW_SP_REGNUM)
650 return 1;
651 }
652 }
653
654 return 0;
655}
656
657
771b4502
UW
658/* Normal stack frames. */
659
660struct spu_unwind_cache
661{
662 CORE_ADDR func;
663 CORE_ADDR frame_base;
664 CORE_ADDR local_base;
665
666 struct trad_frame_saved_reg *saved_regs;
667};
668
669static struct spu_unwind_cache *
670spu_frame_unwind_cache (struct frame_info *next_frame,
671 void **this_prologue_cache)
672{
673 struct spu_unwind_cache *info;
674 struct spu_prologue_data data;
dcf52cd8 675 gdb_byte buf[16];
771b4502
UW
676
677 if (*this_prologue_cache)
678 return *this_prologue_cache;
679
680 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
681 *this_prologue_cache = info;
682 info->saved_regs = trad_frame_alloc_saved_regs (next_frame);
683 info->frame_base = 0;
684 info->local_base = 0;
685
686 /* Find the start of the current function, and analyze its prologue. */
93d42b30 687 info->func = frame_func_unwind (next_frame, NORMAL_FRAME);
771b4502
UW
688 if (info->func == 0)
689 {
690 /* Fall back to using the current PC as frame ID. */
691 info->func = frame_pc_unwind (next_frame);
692 data.size = -1;
693 }
694 else
695 spu_analyze_prologue (info->func, frame_pc_unwind (next_frame), &data);
696
697
698 /* If successful, use prologue analysis data. */
699 if (data.size != -1 && data.cfa_reg != -1)
700 {
701 CORE_ADDR cfa;
702 int i;
771b4502
UW
703
704 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
705 frame_unwind_register (next_frame, data.cfa_reg, buf);
706 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
707
708 /* Call-saved register slots. */
709 for (i = 0; i < SPU_NUM_GPRS; i++)
710 if (i == SPU_LR_REGNUM
711 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
712 if (data.reg_offset[i] != -1)
713 info->saved_regs[i].addr = cfa - data.reg_offset[i];
714
771b4502
UW
715 /* Frame bases. */
716 info->frame_base = cfa;
717 info->local_base = cfa - data.size;
718 }
719
720 /* Otherwise, fall back to reading the backchain link. */
721 else
722 {
723 CORE_ADDR reg, backchain;
724
725 /* Get the backchain. */
726 reg = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
727 backchain = read_memory_unsigned_integer (reg, 4);
728
729 /* A zero backchain terminates the frame chain. Also, sanity
730 check against the local store size limit. */
731 if (backchain != 0 && backchain < SPU_LS_SIZE)
732 {
733 /* Assume the link register is saved into its slot. */
734 if (backchain + 16 < SPU_LS_SIZE)
735 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
736
771b4502
UW
737 /* Frame bases. */
738 info->frame_base = backchain;
739 info->local_base = reg;
740 }
741 }
dcf52cd8
UW
742
743 /* The previous SP is equal to the CFA. */
744 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
745
0a44cb36
UW
746 /* Read full contents of the unwound link register in order to
747 be able to determine the return address. */
dcf52cd8
UW
748 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
749 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
750 else
751 frame_unwind_register (next_frame, SPU_LR_REGNUM, buf);
752
0a44cb36
UW
753 /* Normally, the return address is contained in the slot 0 of the
754 link register, and slots 1-3 are zero. For an overlay return,
755 slot 0 contains the address of the overlay manager return stub,
756 slot 1 contains the partition number of the overlay section to
757 be returned to, and slot 2 contains the return address within
758 that section. Return the latter address in that case. */
dcf52cd8
UW
759 if (extract_unsigned_integer (buf + 8, 4) != 0)
760 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
761 extract_unsigned_integer (buf + 8, 4));
762 else
763 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
764 extract_unsigned_integer (buf, 4));
771b4502
UW
765
766 return info;
767}
768
769static void
770spu_frame_this_id (struct frame_info *next_frame,
771 void **this_prologue_cache, struct frame_id *this_id)
772{
773 struct spu_unwind_cache *info =
774 spu_frame_unwind_cache (next_frame, this_prologue_cache);
775
776 if (info->frame_base == 0)
777 return;
778
779 *this_id = frame_id_build (info->frame_base, info->func);
780}
781
782static void
783spu_frame_prev_register (struct frame_info *next_frame,
784 void **this_prologue_cache,
785 int regnum, int *optimizedp,
786 enum lval_type *lvalp, CORE_ADDR * addrp,
787 int *realnump, gdb_byte *bufferp)
788{
789 struct spu_unwind_cache *info
790 = spu_frame_unwind_cache (next_frame, this_prologue_cache);
791
792 /* Special-case the stack pointer. */
793 if (regnum == SPU_RAW_SP_REGNUM)
794 regnum = SPU_SP_REGNUM;
795
796 trad_frame_get_prev_register (next_frame, info->saved_regs, regnum,
797 optimizedp, lvalp, addrp, realnump, bufferp);
798}
799
800static const struct frame_unwind spu_frame_unwind = {
801 NORMAL_FRAME,
802 spu_frame_this_id,
803 spu_frame_prev_register
804};
805
806const struct frame_unwind *
807spu_frame_sniffer (struct frame_info *next_frame)
808{
809 return &spu_frame_unwind;
810}
811
812static CORE_ADDR
813spu_frame_base_address (struct frame_info *next_frame, void **this_cache)
814{
815 struct spu_unwind_cache *info
816 = spu_frame_unwind_cache (next_frame, this_cache);
817 return info->local_base;
818}
819
820static const struct frame_base spu_frame_base = {
821 &spu_frame_unwind,
822 spu_frame_base_address,
823 spu_frame_base_address,
824 spu_frame_base_address
825};
826
827static CORE_ADDR
828spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
829{
118dfbaf
UW
830 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
831 /* Mask off interrupt enable bit. */
832 return pc & -4;
771b4502
UW
833}
834
835static CORE_ADDR
836spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
837{
838 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
839}
840
118dfbaf
UW
841static CORE_ADDR
842spu_read_pc (ptid_t ptid)
843{
844 CORE_ADDR pc = read_register_pid (SPU_PC_REGNUM, ptid);
845 /* Mask off interrupt enable bit. */
846 return pc & -4;
847}
848
849static void
850spu_write_pc (CORE_ADDR pc, ptid_t ptid)
851{
852 /* Keep interrupt enabled state unchanged. */
853 CORE_ADDR old_pc = read_register_pid (SPU_PC_REGNUM, ptid);
854 write_register_pid (SPU_PC_REGNUM, (pc & -4) | (old_pc & 3), ptid);
855}
856
771b4502
UW
857
858/* Function calling convention. */
859
7b3dc0b7
UW
860static CORE_ADDR
861spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
862{
863 return sp & ~15;
864}
865
771b4502
UW
866static int
867spu_scalar_value_p (struct type *type)
868{
869 switch (TYPE_CODE (type))
870 {
871 case TYPE_CODE_INT:
872 case TYPE_CODE_ENUM:
873 case TYPE_CODE_RANGE:
874 case TYPE_CODE_CHAR:
875 case TYPE_CODE_BOOL:
876 case TYPE_CODE_PTR:
877 case TYPE_CODE_REF:
878 return TYPE_LENGTH (type) <= 16;
879
880 default:
881 return 0;
882 }
883}
884
885static void
886spu_value_to_regcache (struct regcache *regcache, int regnum,
887 struct type *type, const gdb_byte *in)
888{
889 int len = TYPE_LENGTH (type);
890
891 if (spu_scalar_value_p (type))
892 {
893 int preferred_slot = len < 4 ? 4 - len : 0;
894 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
895 }
896 else
897 {
898 while (len >= 16)
899 {
900 regcache_cooked_write (regcache, regnum++, in);
901 in += 16;
902 len -= 16;
903 }
904
905 if (len > 0)
906 regcache_cooked_write_part (regcache, regnum, 0, len, in);
907 }
908}
909
910static void
911spu_regcache_to_value (struct regcache *regcache, int regnum,
912 struct type *type, gdb_byte *out)
913{
914 int len = TYPE_LENGTH (type);
915
916 if (spu_scalar_value_p (type))
917 {
918 int preferred_slot = len < 4 ? 4 - len : 0;
919 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
920 }
921 else
922 {
923 while (len >= 16)
924 {
925 regcache_cooked_read (regcache, regnum++, out);
926 out += 16;
927 len -= 16;
928 }
929
930 if (len > 0)
931 regcache_cooked_read_part (regcache, regnum, 0, len, out);
932 }
933}
934
935static CORE_ADDR
936spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
937 struct regcache *regcache, CORE_ADDR bp_addr,
938 int nargs, struct value **args, CORE_ADDR sp,
939 int struct_return, CORE_ADDR struct_addr)
940{
941 int i;
942 int regnum = SPU_ARG1_REGNUM;
943 int stack_arg = -1;
944 gdb_byte buf[16];
945
946 /* Set the return address. */
947 memset (buf, 0, sizeof buf);
948 store_unsigned_integer (buf, 4, bp_addr);
949 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
950
951 /* If STRUCT_RETURN is true, then the struct return address (in
952 STRUCT_ADDR) will consume the first argument-passing register.
953 Both adjust the register count and store that value. */
954 if (struct_return)
955 {
956 memset (buf, 0, sizeof buf);
957 store_unsigned_integer (buf, 4, struct_addr);
958 regcache_cooked_write (regcache, regnum++, buf);
959 }
960
961 /* Fill in argument registers. */
962 for (i = 0; i < nargs; i++)
963 {
964 struct value *arg = args[i];
965 struct type *type = check_typedef (value_type (arg));
966 const gdb_byte *contents = value_contents (arg);
967 int len = TYPE_LENGTH (type);
968 int n_regs = align_up (len, 16) / 16;
969
970 /* If the argument doesn't wholly fit into registers, it and
971 all subsequent arguments go to the stack. */
972 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
973 {
974 stack_arg = i;
975 break;
976 }
977
978 spu_value_to_regcache (regcache, regnum, type, contents);
979 regnum += n_regs;
980 }
981
982 /* Overflow arguments go to the stack. */
983 if (stack_arg != -1)
984 {
985 CORE_ADDR ap;
986
987 /* Allocate all required stack size. */
988 for (i = stack_arg; i < nargs; i++)
989 {
990 struct type *type = check_typedef (value_type (args[i]));
991 sp -= align_up (TYPE_LENGTH (type), 16);
992 }
993
994 /* Fill in stack arguments. */
995 ap = sp;
996 for (i = stack_arg; i < nargs; i++)
997 {
998 struct value *arg = args[i];
999 struct type *type = check_typedef (value_type (arg));
1000 int len = TYPE_LENGTH (type);
1001 int preferred_slot;
1002
1003 if (spu_scalar_value_p (type))
1004 preferred_slot = len < 4 ? 4 - len : 0;
1005 else
1006 preferred_slot = 0;
1007
1008 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1009 ap += align_up (TYPE_LENGTH (type), 16);
1010 }
1011 }
1012
1013 /* Allocate stack frame header. */
1014 sp -= 32;
1015
1016 /* Finally, update the SP register. */
1017 regcache_cooked_write_unsigned (regcache, SPU_SP_REGNUM, sp);
1018
1019 return sp;
1020}
1021
1022static struct frame_id
1023spu_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1024{
1025 return frame_id_build (spu_unwind_sp (gdbarch, next_frame),
1026 spu_unwind_pc (gdbarch, next_frame));
1027}
1028
1029/* Function return value access. */
1030
1031static enum return_value_convention
1032spu_return_value (struct gdbarch *gdbarch, struct type *type,
1033 struct regcache *regcache, gdb_byte *out, const gdb_byte *in)
1034{
1035 enum return_value_convention rvc;
1036
1037 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1038 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1039 else
1040 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1041
1042 if (in)
1043 {
1044 switch (rvc)
1045 {
1046 case RETURN_VALUE_REGISTER_CONVENTION:
1047 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1048 break;
1049
1050 case RETURN_VALUE_STRUCT_CONVENTION:
1051 error ("Cannot set function return value.");
1052 break;
1053 }
1054 }
1055 else if (out)
1056 {
1057 switch (rvc)
1058 {
1059 case RETURN_VALUE_REGISTER_CONVENTION:
1060 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1061 break;
1062
1063 case RETURN_VALUE_STRUCT_CONVENTION:
1064 error ("Function return value unknown.");
1065 break;
1066 }
1067 }
1068
1069 return rvc;
1070}
1071
1072
1073/* Breakpoints. */
1074
1075static const gdb_byte *
1076spu_breakpoint_from_pc (CORE_ADDR * pcptr, int *lenptr)
1077{
1078 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1079
1080 *lenptr = sizeof breakpoint;
1081 return breakpoint;
1082}
1083
1084
1085/* Software single-stepping support. */
1086
e6590a1b 1087int
e0cd558a 1088spu_software_single_step (struct regcache *regcache)
771b4502 1089{
e0cd558a
UW
1090 CORE_ADDR pc, next_pc;
1091 unsigned int insn;
1092 int offset, reg;
1093 gdb_byte buf[4];
771b4502 1094
e0cd558a
UW
1095 regcache_cooked_read (regcache, SPU_PC_REGNUM, buf);
1096 /* Mask off interrupt enable bit. */
1097 pc = extract_unsigned_integer (buf, 4) & -4;
771b4502 1098
e0cd558a
UW
1099 if (target_read_memory (pc, buf, 4))
1100 return 1;
1101 insn = extract_unsigned_integer (buf, 4);
771b4502 1102
e0cd558a
UW
1103 /* Next sequential instruction is at PC + 4, except if the current
1104 instruction is a PPE-assisted call, in which case it is at PC + 8.
1105 Wrap around LS limit to be on the safe side. */
1106 if ((insn & 0xffffff00) == 0x00002100)
1107 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1108 else
1109 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
771b4502 1110
e0cd558a 1111 insert_single_step_breakpoint (next_pc);
771b4502 1112
e0cd558a
UW
1113 if (is_branch (insn, &offset, &reg))
1114 {
1115 CORE_ADDR target = offset;
771b4502 1116
e0cd558a
UW
1117 if (reg == SPU_PC_REGNUM)
1118 target += pc;
1119 else if (reg != -1)
1120 {
1121 regcache_cooked_read_part (regcache, reg, 0, 4, buf);
1122 target += extract_unsigned_integer (buf, 4) & -4;
771b4502 1123 }
e0cd558a
UW
1124
1125 target = target & (SPU_LS_SIZE - 1);
1126 if (target != next_pc)
1127 insert_single_step_breakpoint (target);
771b4502 1128 }
e6590a1b
UW
1129
1130 return 1;
771b4502
UW
1131}
1132
dcf52cd8
UW
1133/* Target overlays for the SPU overlay manager.
1134
1135 See the documentation of simple_overlay_update for how the
1136 interface is supposed to work.
1137
1138 Data structures used by the overlay manager:
1139
1140 struct ovly_table
1141 {
1142 u32 vma;
1143 u32 size;
1144 u32 pos;
1145 u32 buf;
1146 } _ovly_table[]; -- one entry per overlay section
1147
1148 struct ovly_buf_table
1149 {
1150 u32 mapped;
1151 } _ovly_buf_table[]; -- one entry per overlay buffer
1152
1153 _ovly_table should never change.
1154
1155 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1156 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1157 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1158
1159 mapped is an index into _ovly_table. Both the mapped and buf indices start
1160 from one to reference the first entry in their respective tables. */
1161
1162/* Using the per-objfile private data mechanism, we store for each
1163 objfile an array of "struct spu_overlay_table" structures, one
1164 for each obj_section of the objfile. This structure holds two
1165 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1166 is *not* an overlay section. If it is non-zero, it represents
1167 a target address. The overlay section is mapped iff the target
1168 integer at this location equals MAPPED_VAL. */
1169
1170static const struct objfile_data *spu_overlay_data;
1171
1172struct spu_overlay_table
1173 {
1174 CORE_ADDR mapped_ptr;
1175 CORE_ADDR mapped_val;
1176 };
1177
1178/* Retrieve the overlay table for OBJFILE. If not already cached, read
1179 the _ovly_table data structure from the target and initialize the
1180 spu_overlay_table data structure from it. */
1181static struct spu_overlay_table *
1182spu_get_overlay_table (struct objfile *objfile)
1183{
1184 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1185 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1186 unsigned ovly_table_size, ovly_buf_table_size;
1187 struct spu_overlay_table *tbl;
1188 struct obj_section *osect;
1189 char *ovly_table;
1190 int i;
1191
1192 tbl = objfile_data (objfile, spu_overlay_data);
1193 if (tbl)
1194 return tbl;
1195
1196 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1197 if (!ovly_table_msym)
1198 return NULL;
1199
1200 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1201 if (!ovly_buf_table_msym)
1202 return NULL;
1203
1204 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1205 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1206
1207 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1208 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1209
1210 ovly_table = xmalloc (ovly_table_size);
1211 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1212
1213 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1214 objfile->sections_end - objfile->sections,
1215 struct spu_overlay_table);
1216
1217 for (i = 0; i < ovly_table_size / 16; i++)
1218 {
1219 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1220 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1221 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1222 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1223
1224 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1225 continue;
1226
1227 ALL_OBJFILE_OSECTIONS (objfile, osect)
1228 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1229 && pos == osect->the_bfd_section->filepos)
1230 {
1231 int ndx = osect - objfile->sections;
1232 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1233 tbl[ndx].mapped_val = i + 1;
1234 break;
1235 }
1236 }
1237
1238 xfree (ovly_table);
1239 set_objfile_data (objfile, spu_overlay_data, tbl);
1240 return tbl;
1241}
1242
1243/* Read _ovly_buf_table entry from the target to dermine whether
1244 OSECT is currently mapped, and update the mapped state. */
1245static void
1246spu_overlay_update_osect (struct obj_section *osect)
1247{
1248 struct spu_overlay_table *ovly_table;
1249 CORE_ADDR val;
1250
1251 ovly_table = spu_get_overlay_table (osect->objfile);
1252 if (!ovly_table)
1253 return;
1254
1255 ovly_table += osect - osect->objfile->sections;
1256 if (ovly_table->mapped_ptr == 0)
1257 return;
1258
1259 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1260 osect->ovly_mapped = (val == ovly_table->mapped_val);
1261}
1262
1263/* If OSECT is NULL, then update all sections' mapped state.
1264 If OSECT is non-NULL, then update only OSECT's mapped state. */
1265static void
1266spu_overlay_update (struct obj_section *osect)
1267{
1268 /* Just one section. */
1269 if (osect)
1270 spu_overlay_update_osect (osect);
1271
1272 /* All sections. */
1273 else
1274 {
1275 struct objfile *objfile;
1276
1277 ALL_OBJSECTIONS (objfile, osect)
1278 if (section_is_overlay (osect->the_bfd_section))
1279 spu_overlay_update_osect (osect);
1280 }
1281}
1282
1283/* Whenever a new objfile is loaded, read the target's _ovly_table.
1284 If there is one, go through all sections and make sure for non-
1285 overlay sections LMA equals VMA, while for overlay sections LMA
1286 is larger than local store size. */
1287static void
1288spu_overlay_new_objfile (struct objfile *objfile)
1289{
1290 struct spu_overlay_table *ovly_table;
1291 struct obj_section *osect;
1292
1293 /* If we've already touched this file, do nothing. */
1294 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1295 return;
1296
1297 /* Check if this objfile has overlays. */
1298 ovly_table = spu_get_overlay_table (objfile);
1299 if (!ovly_table)
1300 return;
1301
1302 /* Now go and fiddle with all the LMAs. */
1303 ALL_OBJFILE_OSECTIONS (objfile, osect)
1304 {
1305 bfd *obfd = objfile->obfd;
1306 asection *bsect = osect->the_bfd_section;
1307 int ndx = osect - objfile->sections;
1308
1309 if (ovly_table[ndx].mapped_ptr == 0)
1310 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1311 else
1312 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1313 }
1314}
1315
771b4502
UW
1316
1317/* Set up gdbarch struct. */
1318
1319static struct gdbarch *
1320spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
1321{
1322 struct gdbarch *gdbarch;
1323
1324 /* Find a candidate among the list of pre-declared architectures. */
1325 arches = gdbarch_list_lookup_by_info (arches, &info);
1326 if (arches != NULL)
1327 return arches->gdbarch;
1328
1329 /* Is is for us? */
1330 if (info.bfd_arch_info->mach != bfd_mach_spu)
1331 return NULL;
1332
1333 /* Yes, create a new architecture. */
1334 gdbarch = gdbarch_alloc (&info, NULL);
1335
1336 /* Disassembler. */
1337 set_gdbarch_print_insn (gdbarch, print_insn_spu);
1338
1339 /* Registers. */
1340 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
1341 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
1342 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
1343 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
118dfbaf
UW
1344 set_gdbarch_read_pc (gdbarch, spu_read_pc);
1345 set_gdbarch_write_pc (gdbarch, spu_write_pc);
771b4502
UW
1346 set_gdbarch_register_name (gdbarch, spu_register_name);
1347 set_gdbarch_register_type (gdbarch, spu_register_type);
1348 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
1349 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
9acbedc0 1350 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
771b4502
UW
1351 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
1352
1353 /* Data types. */
1354 set_gdbarch_char_signed (gdbarch, 0);
1355 set_gdbarch_ptr_bit (gdbarch, 32);
1356 set_gdbarch_addr_bit (gdbarch, 32);
1357 set_gdbarch_short_bit (gdbarch, 16);
1358 set_gdbarch_int_bit (gdbarch, 32);
1359 set_gdbarch_long_bit (gdbarch, 32);
1360 set_gdbarch_long_long_bit (gdbarch, 64);
1361 set_gdbarch_float_bit (gdbarch, 32);
1362 set_gdbarch_double_bit (gdbarch, 64);
1363 set_gdbarch_long_double_bit (gdbarch, 64);
8da61cc4
DJ
1364 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
1365 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
1366 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
771b4502
UW
1367
1368 /* Inferior function calls. */
7b3dc0b7
UW
1369 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
1370 set_gdbarch_frame_align (gdbarch, spu_frame_align);
771b4502
UW
1371 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
1372 set_gdbarch_unwind_dummy_id (gdbarch, spu_unwind_dummy_id);
1373 set_gdbarch_return_value (gdbarch, spu_return_value);
1374
1375 /* Frame handling. */
1376 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
1377 frame_unwind_append_sniffer (gdbarch, spu_frame_sniffer);
1378 frame_base_set_default (gdbarch, &spu_frame_base);
1379 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
1380 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
1381 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
1382 set_gdbarch_frame_args_skip (gdbarch, 0);
1383 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
fe5febed 1384 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
771b4502
UW
1385
1386 /* Breakpoints. */
1387 set_gdbarch_decr_pc_after_break (gdbarch, 4);
1388 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
1389 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
1390 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
1391
dcf52cd8
UW
1392 /* Overlays. */
1393 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
1394
771b4502
UW
1395 return gdbarch;
1396}
1397
f2d43c2c
UW
1398/* Implement a SPU-specific vector type as replacement
1399 for __gdb_builtin_type_vec128. */
1400static void
1401spu_init_vector_type (void)
1402{
1403 struct type *type;
1404
1405 type = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
1406 append_composite_type_field (type, "uint128", builtin_type_int128);
1407 append_composite_type_field (type, "v2_int64", builtin_type_v2_int64);
1408 append_composite_type_field (type, "v4_int32", builtin_type_v4_int32);
1409 append_composite_type_field (type, "v8_int16", builtin_type_v8_int16);
1410 append_composite_type_field (type, "v16_int8", builtin_type_v16_int8);
1411 append_composite_type_field (type, "v2_double", builtin_type_v2_double);
1412 append_composite_type_field (type, "v4_float", builtin_type_v4_float);
1413
1414 TYPE_FLAGS (type) |= TYPE_FLAG_VECTOR;
1415 TYPE_NAME (type) = "spu_builtin_type_vec128";
1416 spu_builtin_type_vec128 = type;
1417}
1418
771b4502
UW
1419void
1420_initialize_spu_tdep (void)
1421{
1422 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
f2d43c2c
UW
1423
1424 spu_init_vector_type ();
dcf52cd8
UW
1425
1426 /* Add ourselves to objfile event chain. */
1427 observer_attach_new_objfile (spu_overlay_new_objfile);
1428 spu_overlay_data = register_objfile_data ();
771b4502 1429}
This page took 0.121578 seconds and 4 git commands to generate.