2011-01-11 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
6 Based on a port by Sid Manning <sid@us.ibm.com>.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "arch-utils.h"
25 #include "gdbtypes.h"
26 #include "gdbcmd.h"
27 #include "gdbcore.h"
28 #include "gdb_string.h"
29 #include "gdb_assert.h"
30 #include "frame.h"
31 #include "frame-unwind.h"
32 #include "frame-base.h"
33 #include "trad-frame.h"
34 #include "symtab.h"
35 #include "symfile.h"
36 #include "value.h"
37 #include "inferior.h"
38 #include "dis-asm.h"
39 #include "objfiles.h"
40 #include "language.h"
41 #include "regcache.h"
42 #include "reggroups.h"
43 #include "floatformat.h"
44 #include "block.h"
45 #include "observer.h"
46 #include "infcall.h"
47
48 #include "spu-tdep.h"
49
50
51 /* The list of available "set spu " and "show spu " commands. */
52 static struct cmd_list_element *setspucmdlist = NULL;
53 static struct cmd_list_element *showspucmdlist = NULL;
54
55 /* Whether to stop for new SPE contexts. */
56 static int spu_stop_on_load_p = 0;
57 /* Whether to automatically flush the SW-managed cache. */
58 static int spu_auto_flush_cache_p = 1;
59
60
61 /* The tdep structure. */
62 struct gdbarch_tdep
63 {
64 /* The spufs ID identifying our address space. */
65 int id;
66
67 /* SPU-specific vector type. */
68 struct type *spu_builtin_type_vec128;
69 };
70
71
72 /* SPU-specific vector type. */
73 static struct type *
74 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
75 {
76 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
77
78 if (!tdep->spu_builtin_type_vec128)
79 {
80 const struct builtin_type *bt = builtin_type (gdbarch);
81 struct type *t;
82
83 t = arch_composite_type (gdbarch,
84 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
85 append_composite_type_field (t, "uint128", bt->builtin_int128);
86 append_composite_type_field (t, "v2_int64",
87 init_vector_type (bt->builtin_int64, 2));
88 append_composite_type_field (t, "v4_int32",
89 init_vector_type (bt->builtin_int32, 4));
90 append_composite_type_field (t, "v8_int16",
91 init_vector_type (bt->builtin_int16, 8));
92 append_composite_type_field (t, "v16_int8",
93 init_vector_type (bt->builtin_int8, 16));
94 append_composite_type_field (t, "v2_double",
95 init_vector_type (bt->builtin_double, 2));
96 append_composite_type_field (t, "v4_float",
97 init_vector_type (bt->builtin_float, 4));
98
99 TYPE_VECTOR (t) = 1;
100 TYPE_NAME (t) = "spu_builtin_type_vec128";
101
102 tdep->spu_builtin_type_vec128 = t;
103 }
104
105 return tdep->spu_builtin_type_vec128;
106 }
107
108
109 /* The list of available "info spu " commands. */
110 static struct cmd_list_element *infospucmdlist = NULL;
111
112 /* Registers. */
113
114 static const char *
115 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
116 {
117 static char *register_names[] =
118 {
119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
136 };
137
138 if (reg_nr < 0)
139 return NULL;
140 if (reg_nr >= sizeof register_names / sizeof *register_names)
141 return NULL;
142
143 return register_names[reg_nr];
144 }
145
146 static struct type *
147 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
148 {
149 if (reg_nr < SPU_NUM_GPRS)
150 return spu_builtin_type_vec128 (gdbarch);
151
152 switch (reg_nr)
153 {
154 case SPU_ID_REGNUM:
155 return builtin_type (gdbarch)->builtin_uint32;
156
157 case SPU_PC_REGNUM:
158 return builtin_type (gdbarch)->builtin_func_ptr;
159
160 case SPU_SP_REGNUM:
161 return builtin_type (gdbarch)->builtin_data_ptr;
162
163 case SPU_FPSCR_REGNUM:
164 return builtin_type (gdbarch)->builtin_uint128;
165
166 case SPU_SRR0_REGNUM:
167 return builtin_type (gdbarch)->builtin_uint32;
168
169 case SPU_LSLR_REGNUM:
170 return builtin_type (gdbarch)->builtin_uint32;
171
172 case SPU_DECR_REGNUM:
173 return builtin_type (gdbarch)->builtin_uint32;
174
175 case SPU_DECR_STATUS_REGNUM:
176 return builtin_type (gdbarch)->builtin_uint32;
177
178 default:
179 internal_error (__FILE__, __LINE__, _("invalid regnum"));
180 }
181 }
182
183 /* Pseudo registers for preferred slots - stack pointer. */
184
185 static void
186 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
187 gdb_byte *buf)
188 {
189 struct gdbarch *gdbarch = get_regcache_arch (regcache);
190 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
191 gdb_byte reg[32];
192 char annex[32];
193 ULONGEST id;
194
195 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
196 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
197 memset (reg, 0, sizeof reg);
198 target_read (&current_target, TARGET_OBJECT_SPU, annex,
199 reg, 0, sizeof reg);
200
201 store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
202 }
203
204 static void
205 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
206 int regnum, gdb_byte *buf)
207 {
208 gdb_byte reg[16];
209 char annex[32];
210 ULONGEST id;
211
212 switch (regnum)
213 {
214 case SPU_SP_REGNUM:
215 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
216 memcpy (buf, reg, 4);
217 break;
218
219 case SPU_FPSCR_REGNUM:
220 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
221 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
222 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
223 break;
224
225 case SPU_SRR0_REGNUM:
226 spu_pseudo_register_read_spu (regcache, "srr0", buf);
227 break;
228
229 case SPU_LSLR_REGNUM:
230 spu_pseudo_register_read_spu (regcache, "lslr", buf);
231 break;
232
233 case SPU_DECR_REGNUM:
234 spu_pseudo_register_read_spu (regcache, "decr", buf);
235 break;
236
237 case SPU_DECR_STATUS_REGNUM:
238 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
239 break;
240
241 default:
242 internal_error (__FILE__, __LINE__, _("invalid regnum"));
243 }
244 }
245
246 static void
247 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
248 const gdb_byte *buf)
249 {
250 struct gdbarch *gdbarch = get_regcache_arch (regcache);
251 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
252 gdb_byte reg[32];
253 char annex[32];
254 ULONGEST id;
255
256 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
257 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
258 xsnprintf (reg, sizeof reg, "0x%s",
259 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
260 target_write (&current_target, TARGET_OBJECT_SPU, annex,
261 reg, 0, strlen (reg));
262 }
263
264 static void
265 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
266 int regnum, const gdb_byte *buf)
267 {
268 gdb_byte reg[16];
269 char annex[32];
270 ULONGEST id;
271
272 switch (regnum)
273 {
274 case SPU_SP_REGNUM:
275 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
276 memcpy (reg, buf, 4);
277 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
278 break;
279
280 case SPU_FPSCR_REGNUM:
281 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
282 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
283 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
284 break;
285
286 case SPU_SRR0_REGNUM:
287 spu_pseudo_register_write_spu (regcache, "srr0", buf);
288 break;
289
290 case SPU_LSLR_REGNUM:
291 spu_pseudo_register_write_spu (regcache, "lslr", buf);
292 break;
293
294 case SPU_DECR_REGNUM:
295 spu_pseudo_register_write_spu (regcache, "decr", buf);
296 break;
297
298 case SPU_DECR_STATUS_REGNUM:
299 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
300 break;
301
302 default:
303 internal_error (__FILE__, __LINE__, _("invalid regnum"));
304 }
305 }
306
307 /* Value conversion -- access scalar values at the preferred slot. */
308
309 static struct value *
310 spu_value_from_register (struct type *type, int regnum,
311 struct frame_info *frame)
312 {
313 struct value *value = default_value_from_register (type, regnum, frame);
314 int len = TYPE_LENGTH (type);
315
316 if (regnum < SPU_NUM_GPRS && len < 16)
317 {
318 int preferred_slot = len < 4 ? 4 - len : 0;
319 set_value_offset (value, preferred_slot);
320 }
321
322 return value;
323 }
324
325 /* Register groups. */
326
327 static int
328 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
329 struct reggroup *group)
330 {
331 /* Registers displayed via 'info regs'. */
332 if (group == general_reggroup)
333 return 1;
334
335 /* Registers displayed via 'info float'. */
336 if (group == float_reggroup)
337 return 0;
338
339 /* Registers that need to be saved/restored in order to
340 push or pop frames. */
341 if (group == save_reggroup || group == restore_reggroup)
342 return 1;
343
344 return default_register_reggroup_p (gdbarch, regnum, group);
345 }
346
347
348 /* Address handling. */
349
350 static int
351 spu_gdbarch_id (struct gdbarch *gdbarch)
352 {
353 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
354 int id = tdep->id;
355
356 /* The objfile architecture of a standalone SPU executable does not
357 provide an SPU ID. Retrieve it from the the objfile's relocated
358 address range in this special case. */
359 if (id == -1
360 && symfile_objfile && symfile_objfile->obfd
361 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
362 && symfile_objfile->sections != symfile_objfile->sections_end)
363 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
364
365 return id;
366 }
367
368 static int
369 spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
370 {
371 if (dwarf2_addr_class == 1)
372 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
373 else
374 return 0;
375 }
376
377 static const char *
378 spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
379 {
380 if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
381 return "__ea";
382 else
383 return NULL;
384 }
385
386 static int
387 spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
388 const char *name, int *type_flags_ptr)
389 {
390 if (strcmp (name, "__ea") == 0)
391 {
392 *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
393 return 1;
394 }
395 else
396 return 0;
397 }
398
399 static void
400 spu_address_to_pointer (struct gdbarch *gdbarch,
401 struct type *type, gdb_byte *buf, CORE_ADDR addr)
402 {
403 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
404 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
405 SPUADDR_ADDR (addr));
406 }
407
408 static CORE_ADDR
409 spu_pointer_to_address (struct gdbarch *gdbarch,
410 struct type *type, const gdb_byte *buf)
411 {
412 int id = spu_gdbarch_id (gdbarch);
413 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
414 ULONGEST addr
415 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
416
417 /* Do not convert __ea pointers. */
418 if (TYPE_ADDRESS_CLASS_1 (type))
419 return addr;
420
421 return addr? SPUADDR (id, addr) : 0;
422 }
423
424 static CORE_ADDR
425 spu_integer_to_address (struct gdbarch *gdbarch,
426 struct type *type, const gdb_byte *buf)
427 {
428 int id = spu_gdbarch_id (gdbarch);
429 ULONGEST addr = unpack_long (type, buf);
430
431 return SPUADDR (id, addr);
432 }
433
434
435 /* Decoding SPU instructions. */
436
437 enum
438 {
439 op_lqd = 0x34,
440 op_lqx = 0x3c4,
441 op_lqa = 0x61,
442 op_lqr = 0x67,
443 op_stqd = 0x24,
444 op_stqx = 0x144,
445 op_stqa = 0x41,
446 op_stqr = 0x47,
447
448 op_il = 0x081,
449 op_ila = 0x21,
450 op_a = 0x0c0,
451 op_ai = 0x1c,
452
453 op_selb = 0x4,
454
455 op_br = 0x64,
456 op_bra = 0x60,
457 op_brsl = 0x66,
458 op_brasl = 0x62,
459 op_brnz = 0x42,
460 op_brz = 0x40,
461 op_brhnz = 0x46,
462 op_brhz = 0x44,
463 op_bi = 0x1a8,
464 op_bisl = 0x1a9,
465 op_biz = 0x128,
466 op_binz = 0x129,
467 op_bihz = 0x12a,
468 op_bihnz = 0x12b,
469 };
470
471 static int
472 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
473 {
474 if ((insn >> 21) == op)
475 {
476 *rt = insn & 127;
477 *ra = (insn >> 7) & 127;
478 *rb = (insn >> 14) & 127;
479 return 1;
480 }
481
482 return 0;
483 }
484
485 static int
486 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
487 {
488 if ((insn >> 28) == op)
489 {
490 *rt = (insn >> 21) & 127;
491 *ra = (insn >> 7) & 127;
492 *rb = (insn >> 14) & 127;
493 *rc = insn & 127;
494 return 1;
495 }
496
497 return 0;
498 }
499
500 static int
501 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
502 {
503 if ((insn >> 21) == op)
504 {
505 *rt = insn & 127;
506 *ra = (insn >> 7) & 127;
507 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
508 return 1;
509 }
510
511 return 0;
512 }
513
514 static int
515 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
516 {
517 if ((insn >> 24) == op)
518 {
519 *rt = insn & 127;
520 *ra = (insn >> 7) & 127;
521 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
522 return 1;
523 }
524
525 return 0;
526 }
527
528 static int
529 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
530 {
531 if ((insn >> 23) == op)
532 {
533 *rt = insn & 127;
534 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
535 return 1;
536 }
537
538 return 0;
539 }
540
541 static int
542 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
543 {
544 if ((insn >> 25) == op)
545 {
546 *rt = insn & 127;
547 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
548 return 1;
549 }
550
551 return 0;
552 }
553
554 static int
555 is_branch (unsigned int insn, int *offset, int *reg)
556 {
557 int rt, i7, i16;
558
559 if (is_ri16 (insn, op_br, &rt, &i16)
560 || is_ri16 (insn, op_brsl, &rt, &i16)
561 || is_ri16 (insn, op_brnz, &rt, &i16)
562 || is_ri16 (insn, op_brz, &rt, &i16)
563 || is_ri16 (insn, op_brhnz, &rt, &i16)
564 || is_ri16 (insn, op_brhz, &rt, &i16))
565 {
566 *reg = SPU_PC_REGNUM;
567 *offset = i16 << 2;
568 return 1;
569 }
570
571 if (is_ri16 (insn, op_bra, &rt, &i16)
572 || is_ri16 (insn, op_brasl, &rt, &i16))
573 {
574 *reg = -1;
575 *offset = i16 << 2;
576 return 1;
577 }
578
579 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
580 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
581 || is_ri7 (insn, op_biz, &rt, reg, &i7)
582 || is_ri7 (insn, op_binz, &rt, reg, &i7)
583 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
584 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
585 {
586 *offset = 0;
587 return 1;
588 }
589
590 return 0;
591 }
592
593
594 /* Prolog parsing. */
595
596 struct spu_prologue_data
597 {
598 /* Stack frame size. -1 if analysis was unsuccessful. */
599 int size;
600
601 /* How to find the CFA. The CFA is equal to SP at function entry. */
602 int cfa_reg;
603 int cfa_offset;
604
605 /* Offset relative to CFA where a register is saved. -1 if invalid. */
606 int reg_offset[SPU_NUM_GPRS];
607 };
608
609 static CORE_ADDR
610 spu_analyze_prologue (struct gdbarch *gdbarch,
611 CORE_ADDR start_pc, CORE_ADDR end_pc,
612 struct spu_prologue_data *data)
613 {
614 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
615 int found_sp = 0;
616 int found_fp = 0;
617 int found_lr = 0;
618 int found_bc = 0;
619 int reg_immed[SPU_NUM_GPRS];
620 gdb_byte buf[16];
621 CORE_ADDR prolog_pc = start_pc;
622 CORE_ADDR pc;
623 int i;
624
625
626 /* Initialize DATA to default values. */
627 data->size = -1;
628
629 data->cfa_reg = SPU_RAW_SP_REGNUM;
630 data->cfa_offset = 0;
631
632 for (i = 0; i < SPU_NUM_GPRS; i++)
633 data->reg_offset[i] = -1;
634
635 /* Set up REG_IMMED array. This is non-zero for a register if we know its
636 preferred slot currently holds this immediate value. */
637 for (i = 0; i < SPU_NUM_GPRS; i++)
638 reg_immed[i] = 0;
639
640 /* Scan instructions until the first branch.
641
642 The following instructions are important prolog components:
643
644 - The first instruction to set up the stack pointer.
645 - The first instruction to set up the frame pointer.
646 - The first instruction to save the link register.
647 - The first instruction to save the backchain.
648
649 We return the instruction after the latest of these four,
650 or the incoming PC if none is found. The first instruction
651 to set up the stack pointer also defines the frame size.
652
653 Note that instructions saving incoming arguments to their stack
654 slots are not counted as important, because they are hard to
655 identify with certainty. This should not matter much, because
656 arguments are relevant only in code compiled with debug data,
657 and in such code the GDB core will advance until the first source
658 line anyway, using SAL data.
659
660 For purposes of stack unwinding, we analyze the following types
661 of instructions in addition:
662
663 - Any instruction adding to the current frame pointer.
664 - Any instruction loading an immediate constant into a register.
665 - Any instruction storing a register onto the stack.
666
667 These are used to compute the CFA and REG_OFFSET output. */
668
669 for (pc = start_pc; pc < end_pc; pc += 4)
670 {
671 unsigned int insn;
672 int rt, ra, rb, rc, immed;
673
674 if (target_read_memory (pc, buf, 4))
675 break;
676 insn = extract_unsigned_integer (buf, 4, byte_order);
677
678 /* AI is the typical instruction to set up a stack frame.
679 It is also used to initialize the frame pointer. */
680 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
681 {
682 if (rt == data->cfa_reg && ra == data->cfa_reg)
683 data->cfa_offset -= immed;
684
685 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
686 && !found_sp)
687 {
688 found_sp = 1;
689 prolog_pc = pc + 4;
690
691 data->size = -immed;
692 }
693 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
694 && !found_fp)
695 {
696 found_fp = 1;
697 prolog_pc = pc + 4;
698
699 data->cfa_reg = SPU_FP_REGNUM;
700 data->cfa_offset -= immed;
701 }
702 }
703
704 /* A is used to set up stack frames of size >= 512 bytes.
705 If we have tracked the contents of the addend register,
706 we can handle this as well. */
707 else if (is_rr (insn, op_a, &rt, &ra, &rb))
708 {
709 if (rt == data->cfa_reg && ra == data->cfa_reg)
710 {
711 if (reg_immed[rb] != 0)
712 data->cfa_offset -= reg_immed[rb];
713 else
714 data->cfa_reg = -1; /* We don't know the CFA any more. */
715 }
716
717 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
718 && !found_sp)
719 {
720 found_sp = 1;
721 prolog_pc = pc + 4;
722
723 if (reg_immed[rb] != 0)
724 data->size = -reg_immed[rb];
725 }
726 }
727
728 /* We need to track IL and ILA used to load immediate constants
729 in case they are later used as input to an A instruction. */
730 else if (is_ri16 (insn, op_il, &rt, &immed))
731 {
732 reg_immed[rt] = immed;
733
734 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
735 found_sp = 1;
736 }
737
738 else if (is_ri18 (insn, op_ila, &rt, &immed))
739 {
740 reg_immed[rt] = immed & 0x3ffff;
741
742 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
743 found_sp = 1;
744 }
745
746 /* STQD is used to save registers to the stack. */
747 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
748 {
749 if (ra == data->cfa_reg)
750 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
751
752 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
753 && !found_lr)
754 {
755 found_lr = 1;
756 prolog_pc = pc + 4;
757 }
758
759 if (ra == SPU_RAW_SP_REGNUM
760 && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
761 && !found_bc)
762 {
763 found_bc = 1;
764 prolog_pc = pc + 4;
765 }
766 }
767
768 /* _start uses SELB to set up the stack pointer. */
769 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
770 {
771 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
772 found_sp = 1;
773 }
774
775 /* We terminate if we find a branch. */
776 else if (is_branch (insn, &immed, &ra))
777 break;
778 }
779
780
781 /* If we successfully parsed until here, and didn't find any instruction
782 modifying SP, we assume we have a frameless function. */
783 if (!found_sp)
784 data->size = 0;
785
786 /* Return cooked instead of raw SP. */
787 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
788 data->cfa_reg = SPU_SP_REGNUM;
789
790 return prolog_pc;
791 }
792
793 /* Return the first instruction after the prologue starting at PC. */
794 static CORE_ADDR
795 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
796 {
797 struct spu_prologue_data data;
798 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
799 }
800
801 /* Return the frame pointer in use at address PC. */
802 static void
803 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
804 int *reg, LONGEST *offset)
805 {
806 struct spu_prologue_data data;
807 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
808
809 if (data.size != -1 && data.cfa_reg != -1)
810 {
811 /* The 'frame pointer' address is CFA minus frame size. */
812 *reg = data.cfa_reg;
813 *offset = data.cfa_offset - data.size;
814 }
815 else
816 {
817 /* ??? We don't really know ... */
818 *reg = SPU_SP_REGNUM;
819 *offset = 0;
820 }
821 }
822
823 /* Return true if we are in the function's epilogue, i.e. after the
824 instruction that destroyed the function's stack frame.
825
826 1) scan forward from the point of execution:
827 a) If you find an instruction that modifies the stack pointer
828 or transfers control (except a return), execution is not in
829 an epilogue, return.
830 b) Stop scanning if you find a return instruction or reach the
831 end of the function or reach the hard limit for the size of
832 an epilogue.
833 2) scan backward from the point of execution:
834 a) If you find an instruction that modifies the stack pointer,
835 execution *is* in an epilogue, return.
836 b) Stop scanning if you reach an instruction that transfers
837 control or the beginning of the function or reach the hard
838 limit for the size of an epilogue. */
839
840 static int
841 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
842 {
843 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
844 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
845 bfd_byte buf[4];
846 unsigned int insn;
847 int rt, ra, rb, rc, immed;
848
849 /* Find the search limits based on function boundaries and hard limit.
850 We assume the epilogue can be up to 64 instructions long. */
851
852 const int spu_max_epilogue_size = 64 * 4;
853
854 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
855 return 0;
856
857 if (pc - func_start < spu_max_epilogue_size)
858 epilogue_start = func_start;
859 else
860 epilogue_start = pc - spu_max_epilogue_size;
861
862 if (func_end - pc < spu_max_epilogue_size)
863 epilogue_end = func_end;
864 else
865 epilogue_end = pc + spu_max_epilogue_size;
866
867 /* Scan forward until next 'bi $0'. */
868
869 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
870 {
871 if (target_read_memory (scan_pc, buf, 4))
872 return 0;
873 insn = extract_unsigned_integer (buf, 4, byte_order);
874
875 if (is_branch (insn, &immed, &ra))
876 {
877 if (immed == 0 && ra == SPU_LR_REGNUM)
878 break;
879
880 return 0;
881 }
882
883 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
884 || is_rr (insn, op_a, &rt, &ra, &rb)
885 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
886 {
887 if (rt == SPU_RAW_SP_REGNUM)
888 return 0;
889 }
890 }
891
892 if (scan_pc >= epilogue_end)
893 return 0;
894
895 /* Scan backward until adjustment to stack pointer (R1). */
896
897 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
898 {
899 if (target_read_memory (scan_pc, buf, 4))
900 return 0;
901 insn = extract_unsigned_integer (buf, 4, byte_order);
902
903 if (is_branch (insn, &immed, &ra))
904 return 0;
905
906 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
907 || is_rr (insn, op_a, &rt, &ra, &rb)
908 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
909 {
910 if (rt == SPU_RAW_SP_REGNUM)
911 return 1;
912 }
913 }
914
915 return 0;
916 }
917
918
919 /* Normal stack frames. */
920
921 struct spu_unwind_cache
922 {
923 CORE_ADDR func;
924 CORE_ADDR frame_base;
925 CORE_ADDR local_base;
926
927 struct trad_frame_saved_reg *saved_regs;
928 };
929
930 static struct spu_unwind_cache *
931 spu_frame_unwind_cache (struct frame_info *this_frame,
932 void **this_prologue_cache)
933 {
934 struct gdbarch *gdbarch = get_frame_arch (this_frame);
935 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
937 struct spu_unwind_cache *info;
938 struct spu_prologue_data data;
939 CORE_ADDR id = tdep->id;
940 gdb_byte buf[16];
941
942 if (*this_prologue_cache)
943 return *this_prologue_cache;
944
945 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
946 *this_prologue_cache = info;
947 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
948 info->frame_base = 0;
949 info->local_base = 0;
950
951 /* Find the start of the current function, and analyze its prologue. */
952 info->func = get_frame_func (this_frame);
953 if (info->func == 0)
954 {
955 /* Fall back to using the current PC as frame ID. */
956 info->func = get_frame_pc (this_frame);
957 data.size = -1;
958 }
959 else
960 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
961 &data);
962
963 /* If successful, use prologue analysis data. */
964 if (data.size != -1 && data.cfa_reg != -1)
965 {
966 CORE_ADDR cfa;
967 int i;
968
969 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
970 get_frame_register (this_frame, data.cfa_reg, buf);
971 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
972 cfa = SPUADDR (id, cfa);
973
974 /* Call-saved register slots. */
975 for (i = 0; i < SPU_NUM_GPRS; i++)
976 if (i == SPU_LR_REGNUM
977 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
978 if (data.reg_offset[i] != -1)
979 info->saved_regs[i].addr = cfa - data.reg_offset[i];
980
981 /* Frame bases. */
982 info->frame_base = cfa;
983 info->local_base = cfa - data.size;
984 }
985
986 /* Otherwise, fall back to reading the backchain link. */
987 else
988 {
989 CORE_ADDR reg;
990 LONGEST backchain;
991 ULONGEST lslr;
992 int status;
993
994 /* Get local store limit. */
995 lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
996 if (!lslr)
997 lslr = (ULONGEST) -1;
998
999 /* Get the backchain. */
1000 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1001 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1002 &backchain);
1003
1004 /* A zero backchain terminates the frame chain. Also, sanity
1005 check against the local store size limit. */
1006 if (status && backchain > 0 && backchain <= lslr)
1007 {
1008 /* Assume the link register is saved into its slot. */
1009 if (backchain + 16 <= lslr)
1010 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id,
1011 backchain + 16);
1012
1013 /* Frame bases. */
1014 info->frame_base = SPUADDR (id, backchain);
1015 info->local_base = SPUADDR (id, reg);
1016 }
1017 }
1018
1019 /* If we didn't find a frame, we cannot determine SP / return address. */
1020 if (info->frame_base == 0)
1021 return info;
1022
1023 /* The previous SP is equal to the CFA. */
1024 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1025 SPUADDR_ADDR (info->frame_base));
1026
1027 /* Read full contents of the unwound link register in order to
1028 be able to determine the return address. */
1029 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1030 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1031 else
1032 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1033
1034 /* Normally, the return address is contained in the slot 0 of the
1035 link register, and slots 1-3 are zero. For an overlay return,
1036 slot 0 contains the address of the overlay manager return stub,
1037 slot 1 contains the partition number of the overlay section to
1038 be returned to, and slot 2 contains the return address within
1039 that section. Return the latter address in that case. */
1040 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1041 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1042 extract_unsigned_integer (buf + 8, 4, byte_order));
1043 else
1044 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1045 extract_unsigned_integer (buf, 4, byte_order));
1046
1047 return info;
1048 }
1049
1050 static void
1051 spu_frame_this_id (struct frame_info *this_frame,
1052 void **this_prologue_cache, struct frame_id *this_id)
1053 {
1054 struct spu_unwind_cache *info =
1055 spu_frame_unwind_cache (this_frame, this_prologue_cache);
1056
1057 if (info->frame_base == 0)
1058 return;
1059
1060 *this_id = frame_id_build (info->frame_base, info->func);
1061 }
1062
1063 static struct value *
1064 spu_frame_prev_register (struct frame_info *this_frame,
1065 void **this_prologue_cache, int regnum)
1066 {
1067 struct spu_unwind_cache *info
1068 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1069
1070 /* Special-case the stack pointer. */
1071 if (regnum == SPU_RAW_SP_REGNUM)
1072 regnum = SPU_SP_REGNUM;
1073
1074 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1075 }
1076
1077 static const struct frame_unwind spu_frame_unwind = {
1078 NORMAL_FRAME,
1079 spu_frame_this_id,
1080 spu_frame_prev_register,
1081 NULL,
1082 default_frame_sniffer
1083 };
1084
1085 static CORE_ADDR
1086 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1087 {
1088 struct spu_unwind_cache *info
1089 = spu_frame_unwind_cache (this_frame, this_cache);
1090 return info->local_base;
1091 }
1092
1093 static const struct frame_base spu_frame_base = {
1094 &spu_frame_unwind,
1095 spu_frame_base_address,
1096 spu_frame_base_address,
1097 spu_frame_base_address
1098 };
1099
1100 static CORE_ADDR
1101 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1102 {
1103 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1104 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1105 /* Mask off interrupt enable bit. */
1106 return SPUADDR (tdep->id, pc & -4);
1107 }
1108
1109 static CORE_ADDR
1110 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1111 {
1112 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1113 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1114 return SPUADDR (tdep->id, sp);
1115 }
1116
1117 static CORE_ADDR
1118 spu_read_pc (struct regcache *regcache)
1119 {
1120 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1121 ULONGEST pc;
1122 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1123 /* Mask off interrupt enable bit. */
1124 return SPUADDR (tdep->id, pc & -4);
1125 }
1126
1127 static void
1128 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1129 {
1130 /* Keep interrupt enabled state unchanged. */
1131 ULONGEST old_pc;
1132 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1133 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1134 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1135 }
1136
1137
1138 /* Cell/B.E. cross-architecture unwinder support. */
1139
1140 struct spu2ppu_cache
1141 {
1142 struct frame_id frame_id;
1143 struct regcache *regcache;
1144 };
1145
1146 static struct gdbarch *
1147 spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1148 {
1149 struct spu2ppu_cache *cache = *this_cache;
1150 return get_regcache_arch (cache->regcache);
1151 }
1152
1153 static void
1154 spu2ppu_this_id (struct frame_info *this_frame,
1155 void **this_cache, struct frame_id *this_id)
1156 {
1157 struct spu2ppu_cache *cache = *this_cache;
1158 *this_id = cache->frame_id;
1159 }
1160
1161 static struct value *
1162 spu2ppu_prev_register (struct frame_info *this_frame,
1163 void **this_cache, int regnum)
1164 {
1165 struct spu2ppu_cache *cache = *this_cache;
1166 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1167 gdb_byte *buf;
1168
1169 buf = alloca (register_size (gdbarch, regnum));
1170 regcache_cooked_read (cache->regcache, regnum, buf);
1171 return frame_unwind_got_bytes (this_frame, regnum, buf);
1172 }
1173
1174 static int
1175 spu2ppu_sniffer (const struct frame_unwind *self,
1176 struct frame_info *this_frame, void **this_prologue_cache)
1177 {
1178 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1179 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1180 CORE_ADDR base, func, backchain;
1181 gdb_byte buf[4];
1182
1183 if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
1184 return 0;
1185
1186 base = get_frame_sp (this_frame);
1187 func = get_frame_pc (this_frame);
1188 if (target_read_memory (base, buf, 4))
1189 return 0;
1190 backchain = extract_unsigned_integer (buf, 4, byte_order);
1191
1192 if (!backchain)
1193 {
1194 struct frame_info *fi;
1195
1196 struct spu2ppu_cache *cache
1197 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1198
1199 cache->frame_id = frame_id_build (base + 16, func);
1200
1201 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1202 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1203 break;
1204
1205 if (fi)
1206 {
1207 cache->regcache = frame_save_as_regcache (fi);
1208 *this_prologue_cache = cache;
1209 return 1;
1210 }
1211 else
1212 {
1213 struct regcache *regcache;
1214 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1215 cache->regcache = regcache_dup (regcache);
1216 *this_prologue_cache = cache;
1217 return 1;
1218 }
1219 }
1220
1221 return 0;
1222 }
1223
1224 static void
1225 spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1226 {
1227 struct spu2ppu_cache *cache = this_cache;
1228 regcache_xfree (cache->regcache);
1229 }
1230
1231 static const struct frame_unwind spu2ppu_unwind = {
1232 ARCH_FRAME,
1233 spu2ppu_this_id,
1234 spu2ppu_prev_register,
1235 NULL,
1236 spu2ppu_sniffer,
1237 spu2ppu_dealloc_cache,
1238 spu2ppu_prev_arch,
1239 };
1240
1241
1242 /* Function calling convention. */
1243
1244 static CORE_ADDR
1245 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1246 {
1247 return sp & ~15;
1248 }
1249
1250 static CORE_ADDR
1251 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1252 struct value **args, int nargs, struct type *value_type,
1253 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1254 struct regcache *regcache)
1255 {
1256 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1257 sp = (sp - 4) & ~15;
1258 /* Store the address of that breakpoint */
1259 *bp_addr = sp;
1260 /* The call starts at the callee's entry point. */
1261 *real_pc = funaddr;
1262
1263 return sp;
1264 }
1265
1266 static int
1267 spu_scalar_value_p (struct type *type)
1268 {
1269 switch (TYPE_CODE (type))
1270 {
1271 case TYPE_CODE_INT:
1272 case TYPE_CODE_ENUM:
1273 case TYPE_CODE_RANGE:
1274 case TYPE_CODE_CHAR:
1275 case TYPE_CODE_BOOL:
1276 case TYPE_CODE_PTR:
1277 case TYPE_CODE_REF:
1278 return TYPE_LENGTH (type) <= 16;
1279
1280 default:
1281 return 0;
1282 }
1283 }
1284
1285 static void
1286 spu_value_to_regcache (struct regcache *regcache, int regnum,
1287 struct type *type, const gdb_byte *in)
1288 {
1289 int len = TYPE_LENGTH (type);
1290
1291 if (spu_scalar_value_p (type))
1292 {
1293 int preferred_slot = len < 4 ? 4 - len : 0;
1294 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1295 }
1296 else
1297 {
1298 while (len >= 16)
1299 {
1300 regcache_cooked_write (regcache, regnum++, in);
1301 in += 16;
1302 len -= 16;
1303 }
1304
1305 if (len > 0)
1306 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1307 }
1308 }
1309
1310 static void
1311 spu_regcache_to_value (struct regcache *regcache, int regnum,
1312 struct type *type, gdb_byte *out)
1313 {
1314 int len = TYPE_LENGTH (type);
1315
1316 if (spu_scalar_value_p (type))
1317 {
1318 int preferred_slot = len < 4 ? 4 - len : 0;
1319 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1320 }
1321 else
1322 {
1323 while (len >= 16)
1324 {
1325 regcache_cooked_read (regcache, regnum++, out);
1326 out += 16;
1327 len -= 16;
1328 }
1329
1330 if (len > 0)
1331 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1332 }
1333 }
1334
1335 static CORE_ADDR
1336 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1337 struct regcache *regcache, CORE_ADDR bp_addr,
1338 int nargs, struct value **args, CORE_ADDR sp,
1339 int struct_return, CORE_ADDR struct_addr)
1340 {
1341 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1342 CORE_ADDR sp_delta;
1343 int i;
1344 int regnum = SPU_ARG1_REGNUM;
1345 int stack_arg = -1;
1346 gdb_byte buf[16];
1347
1348 /* Set the return address. */
1349 memset (buf, 0, sizeof buf);
1350 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1351 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1352
1353 /* If STRUCT_RETURN is true, then the struct return address (in
1354 STRUCT_ADDR) will consume the first argument-passing register.
1355 Both adjust the register count and store that value. */
1356 if (struct_return)
1357 {
1358 memset (buf, 0, sizeof buf);
1359 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1360 regcache_cooked_write (regcache, regnum++, buf);
1361 }
1362
1363 /* Fill in argument registers. */
1364 for (i = 0; i < nargs; i++)
1365 {
1366 struct value *arg = args[i];
1367 struct type *type = check_typedef (value_type (arg));
1368 const gdb_byte *contents = value_contents (arg);
1369 int len = TYPE_LENGTH (type);
1370 int n_regs = align_up (len, 16) / 16;
1371
1372 /* If the argument doesn't wholly fit into registers, it and
1373 all subsequent arguments go to the stack. */
1374 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1375 {
1376 stack_arg = i;
1377 break;
1378 }
1379
1380 spu_value_to_regcache (regcache, regnum, type, contents);
1381 regnum += n_regs;
1382 }
1383
1384 /* Overflow arguments go to the stack. */
1385 if (stack_arg != -1)
1386 {
1387 CORE_ADDR ap;
1388
1389 /* Allocate all required stack size. */
1390 for (i = stack_arg; i < nargs; i++)
1391 {
1392 struct type *type = check_typedef (value_type (args[i]));
1393 sp -= align_up (TYPE_LENGTH (type), 16);
1394 }
1395
1396 /* Fill in stack arguments. */
1397 ap = sp;
1398 for (i = stack_arg; i < nargs; i++)
1399 {
1400 struct value *arg = args[i];
1401 struct type *type = check_typedef (value_type (arg));
1402 int len = TYPE_LENGTH (type);
1403 int preferred_slot;
1404
1405 if (spu_scalar_value_p (type))
1406 preferred_slot = len < 4 ? 4 - len : 0;
1407 else
1408 preferred_slot = 0;
1409
1410 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1411 ap += align_up (TYPE_LENGTH (type), 16);
1412 }
1413 }
1414
1415 /* Allocate stack frame header. */
1416 sp -= 32;
1417
1418 /* Store stack back chain. */
1419 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1420 target_write_memory (sp, buf, 16);
1421
1422 /* Finally, update all slots of the SP register. */
1423 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1424 for (i = 0; i < 4; i++)
1425 {
1426 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1427 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1428 }
1429 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1430
1431 return sp;
1432 }
1433
1434 static struct frame_id
1435 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1436 {
1437 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1438 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1439 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1440 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1441 }
1442
1443 /* Function return value access. */
1444
1445 static enum return_value_convention
1446 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1447 struct type *type, struct regcache *regcache,
1448 gdb_byte *out, const gdb_byte *in)
1449 {
1450 enum return_value_convention rvc;
1451
1452 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1453 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1454 else
1455 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1456
1457 if (in)
1458 {
1459 switch (rvc)
1460 {
1461 case RETURN_VALUE_REGISTER_CONVENTION:
1462 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1463 break;
1464
1465 case RETURN_VALUE_STRUCT_CONVENTION:
1466 error (_("Cannot set function return value."));
1467 break;
1468 }
1469 }
1470 else if (out)
1471 {
1472 switch (rvc)
1473 {
1474 case RETURN_VALUE_REGISTER_CONVENTION:
1475 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1476 break;
1477
1478 case RETURN_VALUE_STRUCT_CONVENTION:
1479 error (_("Function return value unknown."));
1480 break;
1481 }
1482 }
1483
1484 return rvc;
1485 }
1486
1487
1488 /* Breakpoints. */
1489
1490 static const gdb_byte *
1491 spu_breakpoint_from_pc (struct gdbarch *gdbarch,
1492 CORE_ADDR * pcptr, int *lenptr)
1493 {
1494 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1495
1496 *lenptr = sizeof breakpoint;
1497 return breakpoint;
1498 }
1499
1500 static int
1501 spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1502 struct bp_target_info *bp_tgt)
1503 {
1504 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1505 that in a combined application, we have some breakpoints inserted in SPU
1506 code, and now the application forks (on the PPU side). GDB common code
1507 will assume that the fork system call copied all breakpoints into the new
1508 process' address space, and that all those copies now need to be removed
1509 (see breakpoint.c:detach_breakpoints).
1510
1511 While this is certainly true for PPU side breakpoints, it is not true
1512 for SPU side breakpoints. fork will clone the SPU context file
1513 descriptors, so that all the existing SPU contexts are in accessible
1514 in the new process. However, the contents of the SPU contexts themselves
1515 are *not* cloned. Therefore the effect of detach_breakpoints is to
1516 remove SPU breakpoints from the *original* SPU context's local store
1517 -- this is not the correct behaviour.
1518
1519 The workaround is to check whether the PID we are asked to remove this
1520 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1521 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1522 true in the context of detach_breakpoints. If so, we simply do nothing.
1523 [ Note that for the fork child process, it does not matter if breakpoints
1524 remain inserted, because those SPU contexts are not runnable anyway --
1525 the Linux kernel allows only the original process to invoke spu_run. */
1526
1527 if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1528 return 0;
1529
1530 return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1531 }
1532
1533
1534 /* Software single-stepping support. */
1535
1536 static int
1537 spu_software_single_step (struct frame_info *frame)
1538 {
1539 struct gdbarch *gdbarch = get_frame_arch (frame);
1540 struct address_space *aspace = get_frame_address_space (frame);
1541 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1542 CORE_ADDR pc, next_pc;
1543 unsigned int insn;
1544 int offset, reg;
1545 gdb_byte buf[4];
1546 ULONGEST lslr;
1547
1548 pc = get_frame_pc (frame);
1549
1550 if (target_read_memory (pc, buf, 4))
1551 return 1;
1552 insn = extract_unsigned_integer (buf, 4, byte_order);
1553
1554 /* Get local store limit. */
1555 lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1556 if (!lslr)
1557 lslr = (ULONGEST) -1;
1558
1559 /* Next sequential instruction is at PC + 4, except if the current
1560 instruction is a PPE-assisted call, in which case it is at PC + 8.
1561 Wrap around LS limit to be on the safe side. */
1562 if ((insn & 0xffffff00) == 0x00002100)
1563 next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
1564 else
1565 next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
1566
1567 insert_single_step_breakpoint (gdbarch,
1568 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1569
1570 if (is_branch (insn, &offset, &reg))
1571 {
1572 CORE_ADDR target = offset;
1573
1574 if (reg == SPU_PC_REGNUM)
1575 target += SPUADDR_ADDR (pc);
1576 else if (reg != -1)
1577 {
1578 get_frame_register_bytes (frame, reg, 0, 4, buf);
1579 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1580 }
1581
1582 target = target & lslr;
1583 if (target != next_pc)
1584 insert_single_step_breakpoint (gdbarch, aspace,
1585 SPUADDR (SPUADDR_SPU (pc), target));
1586 }
1587
1588 return 1;
1589 }
1590
1591
1592 /* Longjmp support. */
1593
1594 static int
1595 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1596 {
1597 struct gdbarch *gdbarch = get_frame_arch (frame);
1598 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1599 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1600 gdb_byte buf[4];
1601 CORE_ADDR jb_addr;
1602
1603 /* Jump buffer is pointed to by the argument register $r3. */
1604 get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
1605 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1606 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1607 return 0;
1608
1609 *pc = extract_unsigned_integer (buf, 4, byte_order);
1610 *pc = SPUADDR (tdep->id, *pc);
1611 return 1;
1612 }
1613
1614
1615 /* Disassembler. */
1616
1617 struct spu_dis_asm_data
1618 {
1619 struct gdbarch *gdbarch;
1620 int id;
1621 };
1622
1623 static void
1624 spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1625 {
1626 struct spu_dis_asm_data *data = info->application_data;
1627 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1628 }
1629
1630 static int
1631 gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1632 {
1633 /* The opcodes disassembler does 18-bit address arithmetic. Make
1634 sure the SPU ID encoded in the high bits is added back when we
1635 call print_address. */
1636 struct disassemble_info spu_info = *info;
1637 struct spu_dis_asm_data data;
1638 data.gdbarch = info->application_data;
1639 data.id = SPUADDR_SPU (memaddr);
1640
1641 spu_info.application_data = &data;
1642 spu_info.print_address_func = spu_dis_asm_print_address;
1643 return print_insn_spu (memaddr, &spu_info);
1644 }
1645
1646
1647 /* Target overlays for the SPU overlay manager.
1648
1649 See the documentation of simple_overlay_update for how the
1650 interface is supposed to work.
1651
1652 Data structures used by the overlay manager:
1653
1654 struct ovly_table
1655 {
1656 u32 vma;
1657 u32 size;
1658 u32 pos;
1659 u32 buf;
1660 } _ovly_table[]; -- one entry per overlay section
1661
1662 struct ovly_buf_table
1663 {
1664 u32 mapped;
1665 } _ovly_buf_table[]; -- one entry per overlay buffer
1666
1667 _ovly_table should never change.
1668
1669 Both tables are aligned to a 16-byte boundary, the symbols
1670 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1671 size set to the size of the respective array. buf in _ovly_table is
1672 an index into _ovly_buf_table.
1673
1674 mapped is an index into _ovly_table. Both the mapped and buf indices start
1675 from one to reference the first entry in their respective tables. */
1676
1677 /* Using the per-objfile private data mechanism, we store for each
1678 objfile an array of "struct spu_overlay_table" structures, one
1679 for each obj_section of the objfile. This structure holds two
1680 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1681 is *not* an overlay section. If it is non-zero, it represents
1682 a target address. The overlay section is mapped iff the target
1683 integer at this location equals MAPPED_VAL. */
1684
1685 static const struct objfile_data *spu_overlay_data;
1686
1687 struct spu_overlay_table
1688 {
1689 CORE_ADDR mapped_ptr;
1690 CORE_ADDR mapped_val;
1691 };
1692
1693 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1694 the _ovly_table data structure from the target and initialize the
1695 spu_overlay_table data structure from it. */
1696 static struct spu_overlay_table *
1697 spu_get_overlay_table (struct objfile *objfile)
1698 {
1699 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1700 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1701 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1702 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1703 unsigned ovly_table_size, ovly_buf_table_size;
1704 struct spu_overlay_table *tbl;
1705 struct obj_section *osect;
1706 char *ovly_table;
1707 int i;
1708
1709 tbl = objfile_data (objfile, spu_overlay_data);
1710 if (tbl)
1711 return tbl;
1712
1713 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1714 if (!ovly_table_msym)
1715 return NULL;
1716
1717 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
1718 NULL, objfile);
1719 if (!ovly_buf_table_msym)
1720 return NULL;
1721
1722 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1723 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1724
1725 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1726 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1727
1728 ovly_table = xmalloc (ovly_table_size);
1729 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1730
1731 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1732 objfile->sections_end - objfile->sections,
1733 struct spu_overlay_table);
1734
1735 for (i = 0; i < ovly_table_size / 16; i++)
1736 {
1737 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1738 4, byte_order);
1739 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1740 4, byte_order);
1741 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1742 4, byte_order);
1743 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1744 4, byte_order);
1745
1746 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1747 continue;
1748
1749 ALL_OBJFILE_OSECTIONS (objfile, osect)
1750 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1751 && pos == osect->the_bfd_section->filepos)
1752 {
1753 int ndx = osect - objfile->sections;
1754 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1755 tbl[ndx].mapped_val = i + 1;
1756 break;
1757 }
1758 }
1759
1760 xfree (ovly_table);
1761 set_objfile_data (objfile, spu_overlay_data, tbl);
1762 return tbl;
1763 }
1764
1765 /* Read _ovly_buf_table entry from the target to dermine whether
1766 OSECT is currently mapped, and update the mapped state. */
1767 static void
1768 spu_overlay_update_osect (struct obj_section *osect)
1769 {
1770 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1771 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1772 struct spu_overlay_table *ovly_table;
1773 CORE_ADDR id, val;
1774
1775 ovly_table = spu_get_overlay_table (osect->objfile);
1776 if (!ovly_table)
1777 return;
1778
1779 ovly_table += osect - osect->objfile->sections;
1780 if (ovly_table->mapped_ptr == 0)
1781 return;
1782
1783 id = SPUADDR_SPU (obj_section_addr (osect));
1784 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1785 4, byte_order);
1786 osect->ovly_mapped = (val == ovly_table->mapped_val);
1787 }
1788
1789 /* If OSECT is NULL, then update all sections' mapped state.
1790 If OSECT is non-NULL, then update only OSECT's mapped state. */
1791 static void
1792 spu_overlay_update (struct obj_section *osect)
1793 {
1794 /* Just one section. */
1795 if (osect)
1796 spu_overlay_update_osect (osect);
1797
1798 /* All sections. */
1799 else
1800 {
1801 struct objfile *objfile;
1802
1803 ALL_OBJSECTIONS (objfile, osect)
1804 if (section_is_overlay (osect))
1805 spu_overlay_update_osect (osect);
1806 }
1807 }
1808
1809 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1810 If there is one, go through all sections and make sure for non-
1811 overlay sections LMA equals VMA, while for overlay sections LMA
1812 is larger than SPU_OVERLAY_LMA. */
1813 static void
1814 spu_overlay_new_objfile (struct objfile *objfile)
1815 {
1816 struct spu_overlay_table *ovly_table;
1817 struct obj_section *osect;
1818
1819 /* If we've already touched this file, do nothing. */
1820 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1821 return;
1822
1823 /* Consider only SPU objfiles. */
1824 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1825 return;
1826
1827 /* Check if this objfile has overlays. */
1828 ovly_table = spu_get_overlay_table (objfile);
1829 if (!ovly_table)
1830 return;
1831
1832 /* Now go and fiddle with all the LMAs. */
1833 ALL_OBJFILE_OSECTIONS (objfile, osect)
1834 {
1835 bfd *obfd = objfile->obfd;
1836 asection *bsect = osect->the_bfd_section;
1837 int ndx = osect - objfile->sections;
1838
1839 if (ovly_table[ndx].mapped_ptr == 0)
1840 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1841 else
1842 bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
1843 }
1844 }
1845
1846
1847 /* Insert temporary breakpoint on "main" function of newly loaded
1848 SPE context OBJFILE. */
1849 static void
1850 spu_catch_start (struct objfile *objfile)
1851 {
1852 struct minimal_symbol *minsym;
1853 struct symtab *symtab;
1854 CORE_ADDR pc;
1855 char buf[32];
1856
1857 /* Do this only if requested by "set spu stop-on-load on". */
1858 if (!spu_stop_on_load_p)
1859 return;
1860
1861 /* Consider only SPU objfiles. */
1862 if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1863 return;
1864
1865 /* The main objfile is handled differently. */
1866 if (objfile == symfile_objfile)
1867 return;
1868
1869 /* There can be multiple symbols named "main". Search for the
1870 "main" in *this* objfile. */
1871 minsym = lookup_minimal_symbol ("main", NULL, objfile);
1872 if (!minsym)
1873 return;
1874
1875 /* If we have debugging information, try to use it -- this
1876 will allow us to properly skip the prologue. */
1877 pc = SYMBOL_VALUE_ADDRESS (minsym);
1878 symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (minsym));
1879 if (symtab != NULL)
1880 {
1881 struct blockvector *bv = BLOCKVECTOR (symtab);
1882 struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1883 struct symbol *sym;
1884 struct symtab_and_line sal;
1885
1886 sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
1887 if (sym)
1888 {
1889 fixup_symbol_section (sym, objfile);
1890 sal = find_function_start_sal (sym, 1);
1891 pc = sal.pc;
1892 }
1893 }
1894
1895 /* Use a numerical address for the set_breakpoint command to avoid having
1896 the breakpoint re-set incorrectly. */
1897 xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
1898 create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
1899 NULL /* cond_string */, -1 /* thread */,
1900 0 /* parse_condition_and_thread */, 1 /* tempflag */,
1901 bp_breakpoint /* type_wanted */,
1902 0 /* ignore_count */,
1903 AUTO_BOOLEAN_FALSE /* pending_break_support */,
1904 NULL /* ops */, 0 /* from_tty */, 1 /* enabled */,
1905 0 /* internal */);
1906 }
1907
1908
1909 /* Look up OBJFILE loaded into FRAME's SPU context. */
1910 static struct objfile *
1911 spu_objfile_from_frame (struct frame_info *frame)
1912 {
1913 struct gdbarch *gdbarch = get_frame_arch (frame);
1914 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1915 struct objfile *obj;
1916
1917 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1918 return NULL;
1919
1920 ALL_OBJFILES (obj)
1921 {
1922 if (obj->sections != obj->sections_end
1923 && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
1924 return obj;
1925 }
1926
1927 return NULL;
1928 }
1929
1930 /* Flush cache for ea pointer access if available. */
1931 static void
1932 flush_ea_cache (void)
1933 {
1934 struct minimal_symbol *msymbol;
1935 struct objfile *obj;
1936
1937 if (!has_stack_frames ())
1938 return;
1939
1940 obj = spu_objfile_from_frame (get_current_frame ());
1941 if (obj == NULL)
1942 return;
1943
1944 /* Lookup inferior function __cache_flush. */
1945 msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
1946 if (msymbol != NULL)
1947 {
1948 struct type *type;
1949 CORE_ADDR addr;
1950
1951 type = objfile_type (obj)->builtin_void;
1952 type = lookup_function_type (type);
1953 type = lookup_pointer_type (type);
1954 addr = SYMBOL_VALUE_ADDRESS (msymbol);
1955
1956 call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
1957 }
1958 }
1959
1960 /* This handler is called when the inferior has stopped. If it is stopped in
1961 SPU architecture then flush the ea cache if used. */
1962 static void
1963 spu_attach_normal_stop (struct bpstats *bs, int print_frame)
1964 {
1965 if (!spu_auto_flush_cache_p)
1966 return;
1967
1968 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
1969 re-entering this function when __cache_flush stops. */
1970 spu_auto_flush_cache_p = 0;
1971 flush_ea_cache ();
1972 spu_auto_flush_cache_p = 1;
1973 }
1974
1975
1976 /* "info spu" commands. */
1977
1978 static void
1979 info_spu_event_command (char *args, int from_tty)
1980 {
1981 struct frame_info *frame = get_selected_frame (NULL);
1982 ULONGEST event_status = 0;
1983 ULONGEST event_mask = 0;
1984 struct cleanup *chain;
1985 gdb_byte buf[100];
1986 char annex[32];
1987 LONGEST len;
1988 int rc, id;
1989
1990 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1991 error (_("\"info spu\" is only supported on the SPU architecture."));
1992
1993 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1994
1995 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1996 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1997 buf, 0, (sizeof (buf) - 1));
1998 if (len <= 0)
1999 error (_("Could not read event_status."));
2000 buf[len] = '\0';
2001 event_status = strtoulst (buf, NULL, 16);
2002
2003 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
2004 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2005 buf, 0, (sizeof (buf) - 1));
2006 if (len <= 0)
2007 error (_("Could not read event_mask."));
2008 buf[len] = '\0';
2009 event_mask = strtoulst (buf, NULL, 16);
2010
2011 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
2012
2013 if (ui_out_is_mi_like_p (uiout))
2014 {
2015 ui_out_field_fmt (uiout, "event_status",
2016 "0x%s", phex_nz (event_status, 4));
2017 ui_out_field_fmt (uiout, "event_mask",
2018 "0x%s", phex_nz (event_mask, 4));
2019 }
2020 else
2021 {
2022 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2023 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
2024 }
2025
2026 do_cleanups (chain);
2027 }
2028
2029 static void
2030 info_spu_signal_command (char *args, int from_tty)
2031 {
2032 struct frame_info *frame = get_selected_frame (NULL);
2033 struct gdbarch *gdbarch = get_frame_arch (frame);
2034 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2035 ULONGEST signal1 = 0;
2036 ULONGEST signal1_type = 0;
2037 int signal1_pending = 0;
2038 ULONGEST signal2 = 0;
2039 ULONGEST signal2_type = 0;
2040 int signal2_pending = 0;
2041 struct cleanup *chain;
2042 char annex[32];
2043 gdb_byte buf[100];
2044 LONGEST len;
2045 int rc, id;
2046
2047 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2048 error (_("\"info spu\" is only supported on the SPU architecture."));
2049
2050 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2051
2052 xsnprintf (annex, sizeof annex, "%d/signal1", id);
2053 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2054 if (len < 0)
2055 error (_("Could not read signal1."));
2056 else if (len == 4)
2057 {
2058 signal1 = extract_unsigned_integer (buf, 4, byte_order);
2059 signal1_pending = 1;
2060 }
2061
2062 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2063 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2064 buf, 0, (sizeof (buf) - 1));
2065 if (len <= 0)
2066 error (_("Could not read signal1_type."));
2067 buf[len] = '\0';
2068 signal1_type = strtoulst (buf, NULL, 16);
2069
2070 xsnprintf (annex, sizeof annex, "%d/signal2", id);
2071 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2072 if (len < 0)
2073 error (_("Could not read signal2."));
2074 else if (len == 4)
2075 {
2076 signal2 = extract_unsigned_integer (buf, 4, byte_order);
2077 signal2_pending = 1;
2078 }
2079
2080 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2081 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2082 buf, 0, (sizeof (buf) - 1));
2083 if (len <= 0)
2084 error (_("Could not read signal2_type."));
2085 buf[len] = '\0';
2086 signal2_type = strtoulst (buf, NULL, 16);
2087
2088 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
2089
2090 if (ui_out_is_mi_like_p (uiout))
2091 {
2092 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
2093 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2094 ui_out_field_int (uiout, "signal1_type", signal1_type);
2095 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
2096 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2097 ui_out_field_int (uiout, "signal2_type", signal2_type);
2098 }
2099 else
2100 {
2101 if (signal1_pending)
2102 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2103 else
2104 printf_filtered (_("Signal 1 not pending "));
2105
2106 if (signal1_type)
2107 printf_filtered (_("(Type Or)\n"));
2108 else
2109 printf_filtered (_("(Type Overwrite)\n"));
2110
2111 if (signal2_pending)
2112 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2113 else
2114 printf_filtered (_("Signal 2 not pending "));
2115
2116 if (signal2_type)
2117 printf_filtered (_("(Type Or)\n"));
2118 else
2119 printf_filtered (_("(Type Overwrite)\n"));
2120 }
2121
2122 do_cleanups (chain);
2123 }
2124
2125 static void
2126 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2127 const char *field, const char *msg)
2128 {
2129 struct cleanup *chain;
2130 int i;
2131
2132 if (nr <= 0)
2133 return;
2134
2135 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
2136
2137 ui_out_table_header (uiout, 32, ui_left, field, msg);
2138 ui_out_table_body (uiout);
2139
2140 for (i = 0; i < nr; i++)
2141 {
2142 struct cleanup *val_chain;
2143 ULONGEST val;
2144 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
2145 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2146 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
2147 do_cleanups (val_chain);
2148
2149 if (!ui_out_is_mi_like_p (uiout))
2150 printf_filtered ("\n");
2151 }
2152
2153 do_cleanups (chain);
2154 }
2155
2156 static void
2157 info_spu_mailbox_command (char *args, int from_tty)
2158 {
2159 struct frame_info *frame = get_selected_frame (NULL);
2160 struct gdbarch *gdbarch = get_frame_arch (frame);
2161 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2162 struct cleanup *chain;
2163 char annex[32];
2164 gdb_byte buf[1024];
2165 LONGEST len;
2166 int i, id;
2167
2168 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2169 error (_("\"info spu\" is only supported on the SPU architecture."));
2170
2171 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2172
2173 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
2174
2175 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2176 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2177 buf, 0, sizeof buf);
2178 if (len < 0)
2179 error (_("Could not read mbox_info."));
2180
2181 info_spu_mailbox_list (buf, len / 4, byte_order,
2182 "mbox", "SPU Outbound Mailbox");
2183
2184 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2185 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2186 buf, 0, sizeof buf);
2187 if (len < 0)
2188 error (_("Could not read ibox_info."));
2189
2190 info_spu_mailbox_list (buf, len / 4, byte_order,
2191 "ibox", "SPU Outbound Interrupt Mailbox");
2192
2193 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2194 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2195 buf, 0, sizeof buf);
2196 if (len < 0)
2197 error (_("Could not read wbox_info."));
2198
2199 info_spu_mailbox_list (buf, len / 4, byte_order,
2200 "wbox", "SPU Inbound Mailbox");
2201
2202 do_cleanups (chain);
2203 }
2204
2205 static ULONGEST
2206 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2207 {
2208 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2209 return (word >> (63 - last)) & mask;
2210 }
2211
2212 static void
2213 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2214 {
2215 static char *spu_mfc_opcode[256] =
2216 {
2217 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2218 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2219 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2220 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2221 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2222 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2223 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2224 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2225 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2226 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2227 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2228 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2229 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2230 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2231 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2232 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2233 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2234 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2235 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2236 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2237 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2238 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2239 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2240 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2241 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2242 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2243 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2244 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2245 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2246 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2247 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2248 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2249 };
2250
2251 int *seq = alloca (nr * sizeof (int));
2252 int done = 0;
2253 struct cleanup *chain;
2254 int i, j;
2255
2256
2257 /* Determine sequence in which to display (valid) entries. */
2258 for (i = 0; i < nr; i++)
2259 {
2260 /* Search for the first valid entry all of whose
2261 dependencies are met. */
2262 for (j = 0; j < nr; j++)
2263 {
2264 ULONGEST mfc_cq_dw3;
2265 ULONGEST dependencies;
2266
2267 if (done & (1 << (nr - 1 - j)))
2268 continue;
2269
2270 mfc_cq_dw3
2271 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2272 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2273 continue;
2274
2275 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2276 if ((dependencies & done) != dependencies)
2277 continue;
2278
2279 seq[i] = j;
2280 done |= 1 << (nr - 1 - j);
2281 break;
2282 }
2283
2284 if (j == nr)
2285 break;
2286 }
2287
2288 nr = i;
2289
2290
2291 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
2292
2293 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
2294 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
2295 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
2296 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
2297 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
2298 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
2299 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
2300 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
2301 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
2302 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
2303
2304 ui_out_table_body (uiout);
2305
2306 for (i = 0; i < nr; i++)
2307 {
2308 struct cleanup *cmd_chain;
2309 ULONGEST mfc_cq_dw0;
2310 ULONGEST mfc_cq_dw1;
2311 ULONGEST mfc_cq_dw2;
2312 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2313 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
2314 ULONGEST mfc_ea;
2315 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2316
2317 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2318 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2319
2320 mfc_cq_dw0
2321 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2322 mfc_cq_dw1
2323 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2324 mfc_cq_dw2
2325 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2326
2327 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2328 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2329 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2330 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2331 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2332 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2333 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2334
2335 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2336 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2337
2338 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2339 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2340 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2341 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2342 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2343 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2344
2345 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
2346
2347 if (spu_mfc_opcode[mfc_cmd_opcode])
2348 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2349 else
2350 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
2351
2352 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
2353 ui_out_field_int (uiout, "tid", tclass_id);
2354 ui_out_field_int (uiout, "rid", rclass_id);
2355
2356 if (ea_valid_p)
2357 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
2358 else
2359 ui_out_field_skip (uiout, "ea");
2360
2361 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
2362 if (qw_valid_p)
2363 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
2364 else
2365 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
2366
2367 if (list_valid_p)
2368 {
2369 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
2370 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
2371 }
2372 else
2373 {
2374 ui_out_field_skip (uiout, "lstaddr");
2375 ui_out_field_skip (uiout, "lstsize");
2376 }
2377
2378 if (cmd_error_p)
2379 ui_out_field_string (uiout, "error_p", "*");
2380 else
2381 ui_out_field_skip (uiout, "error_p");
2382
2383 do_cleanups (cmd_chain);
2384
2385 if (!ui_out_is_mi_like_p (uiout))
2386 printf_filtered ("\n");
2387 }
2388
2389 do_cleanups (chain);
2390 }
2391
2392 static void
2393 info_spu_dma_command (char *args, int from_tty)
2394 {
2395 struct frame_info *frame = get_selected_frame (NULL);
2396 struct gdbarch *gdbarch = get_frame_arch (frame);
2397 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2398 ULONGEST dma_info_type;
2399 ULONGEST dma_info_mask;
2400 ULONGEST dma_info_status;
2401 ULONGEST dma_info_stall_and_notify;
2402 ULONGEST dma_info_atomic_command_status;
2403 struct cleanup *chain;
2404 char annex[32];
2405 gdb_byte buf[1024];
2406 LONGEST len;
2407 int i, id;
2408
2409 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2410 error (_("\"info spu\" is only supported on the SPU architecture."));
2411
2412 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2413
2414 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2415 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2416 buf, 0, 40 + 16 * 32);
2417 if (len <= 0)
2418 error (_("Could not read dma_info."));
2419
2420 dma_info_type
2421 = extract_unsigned_integer (buf, 8, byte_order);
2422 dma_info_mask
2423 = extract_unsigned_integer (buf + 8, 8, byte_order);
2424 dma_info_status
2425 = extract_unsigned_integer (buf + 16, 8, byte_order);
2426 dma_info_stall_and_notify
2427 = extract_unsigned_integer (buf + 24, 8, byte_order);
2428 dma_info_atomic_command_status
2429 = extract_unsigned_integer (buf + 32, 8, byte_order);
2430
2431 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2432
2433 if (ui_out_is_mi_like_p (uiout))
2434 {
2435 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2436 phex_nz (dma_info_type, 4));
2437 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2438 phex_nz (dma_info_mask, 4));
2439 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2440 phex_nz (dma_info_status, 4));
2441 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2442 phex_nz (dma_info_stall_and_notify, 4));
2443 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2444 phex_nz (dma_info_atomic_command_status, 4));
2445 }
2446 else
2447 {
2448 const char *query_msg = _("no query pending");
2449
2450 if (dma_info_type & 4)
2451 switch (dma_info_type & 3)
2452 {
2453 case 1: query_msg = _("'any' query pending"); break;
2454 case 2: query_msg = _("'all' query pending"); break;
2455 default: query_msg = _("undefined query type"); break;
2456 }
2457
2458 printf_filtered (_("Tag-Group Status 0x%s\n"),
2459 phex (dma_info_status, 4));
2460 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2461 phex (dma_info_mask, 4), query_msg);
2462 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2463 phex (dma_info_stall_and_notify, 4));
2464 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2465 phex (dma_info_atomic_command_status, 4));
2466 printf_filtered ("\n");
2467 }
2468
2469 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2470 do_cleanups (chain);
2471 }
2472
2473 static void
2474 info_spu_proxydma_command (char *args, int from_tty)
2475 {
2476 struct frame_info *frame = get_selected_frame (NULL);
2477 struct gdbarch *gdbarch = get_frame_arch (frame);
2478 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2479 ULONGEST dma_info_type;
2480 ULONGEST dma_info_mask;
2481 ULONGEST dma_info_status;
2482 struct cleanup *chain;
2483 char annex[32];
2484 gdb_byte buf[1024];
2485 LONGEST len;
2486 int i, id;
2487
2488 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2489 error (_("\"info spu\" is only supported on the SPU architecture."));
2490
2491 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2492
2493 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2494 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2495 buf, 0, 24 + 8 * 32);
2496 if (len <= 0)
2497 error (_("Could not read proxydma_info."));
2498
2499 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2500 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2501 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2502
2503 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2504
2505 if (ui_out_is_mi_like_p (uiout))
2506 {
2507 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2508 phex_nz (dma_info_type, 4));
2509 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2510 phex_nz (dma_info_mask, 4));
2511 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2512 phex_nz (dma_info_status, 4));
2513 }
2514 else
2515 {
2516 const char *query_msg;
2517
2518 switch (dma_info_type & 3)
2519 {
2520 case 0: query_msg = _("no query pending"); break;
2521 case 1: query_msg = _("'any' query pending"); break;
2522 case 2: query_msg = _("'all' query pending"); break;
2523 default: query_msg = _("undefined query type"); break;
2524 }
2525
2526 printf_filtered (_("Tag-Group Status 0x%s\n"),
2527 phex (dma_info_status, 4));
2528 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2529 phex (dma_info_mask, 4), query_msg);
2530 printf_filtered ("\n");
2531 }
2532
2533 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2534 do_cleanups (chain);
2535 }
2536
2537 static void
2538 info_spu_command (char *args, int from_tty)
2539 {
2540 printf_unfiltered (_("\"info spu\" must be followed by "
2541 "the name of an SPU facility.\n"));
2542 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2543 }
2544
2545
2546 /* Root of all "set spu "/"show spu " commands. */
2547
2548 static void
2549 show_spu_command (char *args, int from_tty)
2550 {
2551 help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2552 }
2553
2554 static void
2555 set_spu_command (char *args, int from_tty)
2556 {
2557 help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2558 }
2559
2560 static void
2561 show_spu_stop_on_load (struct ui_file *file, int from_tty,
2562 struct cmd_list_element *c, const char *value)
2563 {
2564 fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2565 value);
2566 }
2567
2568 static void
2569 show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2570 struct cmd_list_element *c, const char *value)
2571 {
2572 fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2573 value);
2574 }
2575
2576
2577 /* Set up gdbarch struct. */
2578
2579 static struct gdbarch *
2580 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2581 {
2582 struct gdbarch *gdbarch;
2583 struct gdbarch_tdep *tdep;
2584 int id = -1;
2585
2586 /* Which spufs ID was requested as address space? */
2587 if (info.tdep_info)
2588 id = *(int *)info.tdep_info;
2589 /* For objfile architectures of SPU solibs, decode the ID from the name.
2590 This assumes the filename convention employed by solib-spu.c. */
2591 else if (info.abfd)
2592 {
2593 char *name = strrchr (info.abfd->filename, '@');
2594 if (name)
2595 sscanf (name, "@0x%*x <%d>", &id);
2596 }
2597
2598 /* Find a candidate among extant architectures. */
2599 for (arches = gdbarch_list_lookup_by_info (arches, &info);
2600 arches != NULL;
2601 arches = gdbarch_list_lookup_by_info (arches->next, &info))
2602 {
2603 tdep = gdbarch_tdep (arches->gdbarch);
2604 if (tdep && tdep->id == id)
2605 return arches->gdbarch;
2606 }
2607
2608 /* None found, so create a new architecture. */
2609 tdep = XCALLOC (1, struct gdbarch_tdep);
2610 tdep->id = id;
2611 gdbarch = gdbarch_alloc (&info, tdep);
2612
2613 /* Disassembler. */
2614 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2615
2616 /* Registers. */
2617 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2618 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2619 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2620 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2621 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2622 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2623 set_gdbarch_register_name (gdbarch, spu_register_name);
2624 set_gdbarch_register_type (gdbarch, spu_register_type);
2625 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2626 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2627 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2628 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2629
2630 /* Data types. */
2631 set_gdbarch_char_signed (gdbarch, 0);
2632 set_gdbarch_ptr_bit (gdbarch, 32);
2633 set_gdbarch_addr_bit (gdbarch, 32);
2634 set_gdbarch_short_bit (gdbarch, 16);
2635 set_gdbarch_int_bit (gdbarch, 32);
2636 set_gdbarch_long_bit (gdbarch, 32);
2637 set_gdbarch_long_long_bit (gdbarch, 64);
2638 set_gdbarch_float_bit (gdbarch, 32);
2639 set_gdbarch_double_bit (gdbarch, 64);
2640 set_gdbarch_long_double_bit (gdbarch, 64);
2641 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2642 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2643 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2644
2645 /* Address handling. */
2646 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2647 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2648 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2649 set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2650 set_gdbarch_address_class_type_flags_to_name
2651 (gdbarch, spu_address_class_type_flags_to_name);
2652 set_gdbarch_address_class_name_to_type_flags
2653 (gdbarch, spu_address_class_name_to_type_flags);
2654
2655
2656 /* Inferior function calls. */
2657 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2658 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2659 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2660 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2661 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2662 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2663 set_gdbarch_return_value (gdbarch, spu_return_value);
2664
2665 /* Frame handling. */
2666 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2667 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2668 frame_base_set_default (gdbarch, &spu_frame_base);
2669 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2670 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2671 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2672 set_gdbarch_frame_args_skip (gdbarch, 0);
2673 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2674 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2675
2676 /* Cell/B.E. cross-architecture unwinder support. */
2677 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2678
2679 /* Breakpoints. */
2680 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2681 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2682 set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
2683 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2684 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2685 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2686
2687 /* Overlays. */
2688 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2689
2690 return gdbarch;
2691 }
2692
2693 /* Provide a prototype to silence -Wmissing-prototypes. */
2694 extern initialize_file_ftype _initialize_spu_tdep;
2695
2696 void
2697 _initialize_spu_tdep (void)
2698 {
2699 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2700
2701 /* Add ourselves to objfile event chain. */
2702 observer_attach_new_objfile (spu_overlay_new_objfile);
2703 spu_overlay_data = register_objfile_data ();
2704
2705 /* Install spu stop-on-load handler. */
2706 observer_attach_new_objfile (spu_catch_start);
2707
2708 /* Add ourselves to normal_stop event chain. */
2709 observer_attach_normal_stop (spu_attach_normal_stop);
2710
2711 /* Add root prefix command for all "set spu"/"show spu" commands. */
2712 add_prefix_cmd ("spu", no_class, set_spu_command,
2713 _("Various SPU specific commands."),
2714 &setspucmdlist, "set spu ", 0, &setlist);
2715 add_prefix_cmd ("spu", no_class, show_spu_command,
2716 _("Various SPU specific commands."),
2717 &showspucmdlist, "show spu ", 0, &showlist);
2718
2719 /* Toggle whether or not to add a temporary breakpoint at the "main"
2720 function of new SPE contexts. */
2721 add_setshow_boolean_cmd ("stop-on-load", class_support,
2722 &spu_stop_on_load_p, _("\
2723 Set whether to stop for new SPE threads."),
2724 _("\
2725 Show whether to stop for new SPE threads."),
2726 _("\
2727 Use \"on\" to give control to the user when a new SPE thread\n\
2728 enters its \"main\" function.\n\
2729 Use \"off\" to disable stopping for new SPE threads."),
2730 NULL,
2731 show_spu_stop_on_load,
2732 &setspucmdlist, &showspucmdlist);
2733
2734 /* Toggle whether or not to automatically flush the software-managed
2735 cache whenever SPE execution stops. */
2736 add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2737 &spu_auto_flush_cache_p, _("\
2738 Set whether to automatically flush the software-managed cache."),
2739 _("\
2740 Show whether to automatically flush the software-managed cache."),
2741 _("\
2742 Use \"on\" to automatically flush the software-managed cache\n\
2743 whenever SPE execution stops.\n\
2744 Use \"off\" to never automatically flush the software-managed cache."),
2745 NULL,
2746 show_spu_auto_flush_cache,
2747 &setspucmdlist, &showspucmdlist);
2748
2749 /* Add root prefix command for all "info spu" commands. */
2750 add_prefix_cmd ("spu", class_info, info_spu_command,
2751 _("Various SPU specific commands."),
2752 &infospucmdlist, "info spu ", 0, &infolist);
2753
2754 /* Add various "info spu" commands. */
2755 add_cmd ("event", class_info, info_spu_event_command,
2756 _("Display SPU event facility status.\n"),
2757 &infospucmdlist);
2758 add_cmd ("signal", class_info, info_spu_signal_command,
2759 _("Display SPU signal notification facility status.\n"),
2760 &infospucmdlist);
2761 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2762 _("Display SPU mailbox facility status.\n"),
2763 &infospucmdlist);
2764 add_cmd ("dma", class_info, info_spu_dma_command,
2765 _("Display MFC DMA status.\n"),
2766 &infospucmdlist);
2767 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2768 _("Display MFC Proxy-DMA status.\n"),
2769 &infospucmdlist);
2770 }
This page took 0.144318 seconds and 4 git commands to generate.