Merge {i386,amd64}_linux_read_description
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
CommitLineData
771b4502 1/* SPU target-dependent code for GDB, the GNU debugger.
ecd75fc8 2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
771b4502
UW
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
a9762ec7 11 the Free Software Foundation; either version 3 of the License, or
771b4502
UW
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
a9762ec7 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
771b4502
UW
21
22#include "defs.h"
23#include "arch-utils.h"
24#include "gdbtypes.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
0e9f083f 27#include <string.h>
771b4502
UW
28#include "gdb_assert.h"
29#include "frame.h"
30#include "frame-unwind.h"
31#include "frame-base.h"
32#include "trad-frame.h"
33#include "symtab.h"
34#include "symfile.h"
35#include "value.h"
36#include "inferior.h"
37#include "dis-asm.h"
38#include "objfiles.h"
39#include "language.h"
40#include "regcache.h"
41#include "reggroups.h"
42#include "floatformat.h"
3285f3fe 43#include "block.h"
dcf52cd8 44#include "observer.h"
ff1a52c6 45#include "infcall.h"
54fcddd0 46#include "dwarf2.h"
7ce16bd4
UW
47#include "dwarf2-frame.h"
48#include "ax.h"
8dccd430 49#include "exceptions.h"
771b4502
UW
50#include "spu-tdep.h"
51
794ac428 52
3285f3fe
UW
53/* The list of available "set spu " and "show spu " commands. */
54static struct cmd_list_element *setspucmdlist = NULL;
55static struct cmd_list_element *showspucmdlist = NULL;
56
57/* Whether to stop for new SPE contexts. */
58static int spu_stop_on_load_p = 0;
ff1a52c6
UW
59/* Whether to automatically flush the SW-managed cache. */
60static int spu_auto_flush_cache_p = 1;
3285f3fe
UW
61
62
794ac428
UW
63/* The tdep structure. */
64struct gdbarch_tdep
65{
85e747d2
UW
66 /* The spufs ID identifying our address space. */
67 int id;
68
794ac428
UW
69 /* SPU-specific vector type. */
70 struct type *spu_builtin_type_vec128;
71};
72
73
f2d43c2c 74/* SPU-specific vector type. */
794ac428
UW
75static struct type *
76spu_builtin_type_vec128 (struct gdbarch *gdbarch)
77{
78 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
79
80 if (!tdep->spu_builtin_type_vec128)
81 {
df4df182 82 const struct builtin_type *bt = builtin_type (gdbarch);
794ac428
UW
83 struct type *t;
84
e9bb382b
UW
85 t = arch_composite_type (gdbarch,
86 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
df4df182 87 append_composite_type_field (t, "uint128", bt->builtin_int128);
794ac428 88 append_composite_type_field (t, "v2_int64",
df4df182 89 init_vector_type (bt->builtin_int64, 2));
794ac428 90 append_composite_type_field (t, "v4_int32",
df4df182 91 init_vector_type (bt->builtin_int32, 4));
794ac428 92 append_composite_type_field (t, "v8_int16",
df4df182 93 init_vector_type (bt->builtin_int16, 8));
794ac428 94 append_composite_type_field (t, "v16_int8",
df4df182 95 init_vector_type (bt->builtin_int8, 16));
794ac428 96 append_composite_type_field (t, "v2_double",
df4df182 97 init_vector_type (bt->builtin_double, 2));
794ac428 98 append_composite_type_field (t, "v4_float",
df4df182 99 init_vector_type (bt->builtin_float, 4));
794ac428 100
876cecd0 101 TYPE_VECTOR (t) = 1;
794ac428
UW
102 TYPE_NAME (t) = "spu_builtin_type_vec128";
103
104 tdep->spu_builtin_type_vec128 = t;
105 }
106
107 return tdep->spu_builtin_type_vec128;
108}
109
771b4502 110
23d964e7
UW
111/* The list of available "info spu " commands. */
112static struct cmd_list_element *infospucmdlist = NULL;
113
771b4502
UW
114/* Registers. */
115
116static const char *
d93859e2 117spu_register_name (struct gdbarch *gdbarch, int reg_nr)
771b4502
UW
118{
119 static char *register_names[] =
120 {
121 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
122 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
123 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
124 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
125 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
126 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
127 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
128 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
129 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
130 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
131 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
132 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
133 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
134 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
135 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
136 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
23d964e7 137 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
771b4502
UW
138 };
139
140 if (reg_nr < 0)
141 return NULL;
142 if (reg_nr >= sizeof register_names / sizeof *register_names)
143 return NULL;
144
145 return register_names[reg_nr];
146}
147
148static struct type *
149spu_register_type (struct gdbarch *gdbarch, int reg_nr)
150{
151 if (reg_nr < SPU_NUM_GPRS)
794ac428 152 return spu_builtin_type_vec128 (gdbarch);
771b4502
UW
153
154 switch (reg_nr)
155 {
156 case SPU_ID_REGNUM:
df4df182 157 return builtin_type (gdbarch)->builtin_uint32;
771b4502
UW
158
159 case SPU_PC_REGNUM:
0dfff4cb 160 return builtin_type (gdbarch)->builtin_func_ptr;
771b4502
UW
161
162 case SPU_SP_REGNUM:
0dfff4cb 163 return builtin_type (gdbarch)->builtin_data_ptr;
771b4502 164
23d964e7 165 case SPU_FPSCR_REGNUM:
df4df182 166 return builtin_type (gdbarch)->builtin_uint128;
23d964e7
UW
167
168 case SPU_SRR0_REGNUM:
df4df182 169 return builtin_type (gdbarch)->builtin_uint32;
23d964e7
UW
170
171 case SPU_LSLR_REGNUM:
df4df182 172 return builtin_type (gdbarch)->builtin_uint32;
23d964e7
UW
173
174 case SPU_DECR_REGNUM:
df4df182 175 return builtin_type (gdbarch)->builtin_uint32;
23d964e7
UW
176
177 case SPU_DECR_STATUS_REGNUM:
df4df182 178 return builtin_type (gdbarch)->builtin_uint32;
23d964e7 179
771b4502 180 default:
a73c6dcd 181 internal_error (__FILE__, __LINE__, _("invalid regnum"));
771b4502
UW
182 }
183}
184
185/* Pseudo registers for preferred slots - stack pointer. */
186
05d1431c 187static enum register_status
23d964e7
UW
188spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
189 gdb_byte *buf)
190{
e17a4113
UW
191 struct gdbarch *gdbarch = get_regcache_arch (regcache);
192 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
05d1431c 193 enum register_status status;
23d964e7
UW
194 gdb_byte reg[32];
195 char annex[32];
196 ULONGEST id;
001f13d8 197 ULONGEST ul;
23d964e7 198
05d1431c
PA
199 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
200 if (status != REG_VALID)
201 return status;
23d964e7
UW
202 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
203 memset (reg, 0, sizeof reg);
204 target_read (&current_target, TARGET_OBJECT_SPU, annex,
205 reg, 0, sizeof reg);
206
001f13d8
PA
207 ul = strtoulst ((char *) reg, NULL, 16);
208 store_unsigned_integer (buf, 4, byte_order, ul);
05d1431c 209 return REG_VALID;
23d964e7
UW
210}
211
05d1431c 212static enum register_status
771b4502
UW
213spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
214 int regnum, gdb_byte *buf)
215{
216 gdb_byte reg[16];
23d964e7
UW
217 char annex[32];
218 ULONGEST id;
05d1431c 219 enum register_status status;
771b4502
UW
220
221 switch (regnum)
222 {
223 case SPU_SP_REGNUM:
05d1431c
PA
224 status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
225 if (status != REG_VALID)
226 return status;
771b4502 227 memcpy (buf, reg, 4);
05d1431c 228 return status;
771b4502 229
23d964e7 230 case SPU_FPSCR_REGNUM:
05d1431c
PA
231 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
232 if (status != REG_VALID)
233 return status;
23d964e7
UW
234 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
235 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
05d1431c 236 return status;
23d964e7
UW
237
238 case SPU_SRR0_REGNUM:
05d1431c 239 return spu_pseudo_register_read_spu (regcache, "srr0", buf);
23d964e7
UW
240
241 case SPU_LSLR_REGNUM:
05d1431c 242 return spu_pseudo_register_read_spu (regcache, "lslr", buf);
23d964e7
UW
243
244 case SPU_DECR_REGNUM:
05d1431c 245 return spu_pseudo_register_read_spu (regcache, "decr", buf);
23d964e7
UW
246
247 case SPU_DECR_STATUS_REGNUM:
05d1431c 248 return spu_pseudo_register_read_spu (regcache, "decr_status", buf);
23d964e7 249
771b4502
UW
250 default:
251 internal_error (__FILE__, __LINE__, _("invalid regnum"));
252 }
253}
254
23d964e7
UW
255static void
256spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
257 const gdb_byte *buf)
258{
e17a4113
UW
259 struct gdbarch *gdbarch = get_regcache_arch (regcache);
260 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
001f13d8 261 char reg[32];
23d964e7
UW
262 char annex[32];
263 ULONGEST id;
264
265 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
266 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
267 xsnprintf (reg, sizeof reg, "0x%s",
e17a4113 268 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
23d964e7 269 target_write (&current_target, TARGET_OBJECT_SPU, annex,
001f13d8 270 (gdb_byte *) reg, 0, strlen (reg));
23d964e7
UW
271}
272
771b4502
UW
273static void
274spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
275 int regnum, const gdb_byte *buf)
276{
277 gdb_byte reg[16];
23d964e7
UW
278 char annex[32];
279 ULONGEST id;
771b4502
UW
280
281 switch (regnum)
282 {
283 case SPU_SP_REGNUM:
284 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
285 memcpy (reg, buf, 4);
286 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
287 break;
288
23d964e7
UW
289 case SPU_FPSCR_REGNUM:
290 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
291 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
292 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
293 break;
294
295 case SPU_SRR0_REGNUM:
296 spu_pseudo_register_write_spu (regcache, "srr0", buf);
297 break;
298
299 case SPU_LSLR_REGNUM:
300 spu_pseudo_register_write_spu (regcache, "lslr", buf);
301 break;
302
303 case SPU_DECR_REGNUM:
304 spu_pseudo_register_write_spu (regcache, "decr", buf);
305 break;
306
307 case SPU_DECR_STATUS_REGNUM:
308 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
309 break;
310
771b4502
UW
311 default:
312 internal_error (__FILE__, __LINE__, _("invalid regnum"));
313 }
314}
315
7ce16bd4
UW
316static int
317spu_ax_pseudo_register_collect (struct gdbarch *gdbarch,
318 struct agent_expr *ax, int regnum)
319{
320 switch (regnum)
321 {
322 case SPU_SP_REGNUM:
323 ax_reg_mask (ax, SPU_RAW_SP_REGNUM);
324 return 0;
325
326 case SPU_FPSCR_REGNUM:
327 case SPU_SRR0_REGNUM:
328 case SPU_LSLR_REGNUM:
329 case SPU_DECR_REGNUM:
330 case SPU_DECR_STATUS_REGNUM:
331 return -1;
332
333 default:
334 internal_error (__FILE__, __LINE__, _("invalid regnum"));
335 }
336}
337
338static int
339spu_ax_pseudo_register_push_stack (struct gdbarch *gdbarch,
340 struct agent_expr *ax, int regnum)
341{
342 switch (regnum)
343 {
344 case SPU_SP_REGNUM:
345 ax_reg (ax, SPU_RAW_SP_REGNUM);
346 return 0;
347
348 case SPU_FPSCR_REGNUM:
349 case SPU_SRR0_REGNUM:
350 case SPU_LSLR_REGNUM:
351 case SPU_DECR_REGNUM:
352 case SPU_DECR_STATUS_REGNUM:
353 return -1;
354
355 default:
356 internal_error (__FILE__, __LINE__, _("invalid regnum"));
357 }
358}
359
360
771b4502
UW
361/* Value conversion -- access scalar values at the preferred slot. */
362
9acbedc0 363static struct value *
2ed3c037
UW
364spu_value_from_register (struct gdbarch *gdbarch, struct type *type,
365 int regnum, struct frame_id frame_id)
771b4502 366{
2ed3c037
UW
367 struct value *value = default_value_from_register (gdbarch, type,
368 regnum, frame_id);
bad43aa5 369 int len = TYPE_LENGTH (type);
771b4502 370
bad43aa5 371 if (regnum < SPU_NUM_GPRS && len < 16)
9acbedc0 372 {
bad43aa5 373 int preferred_slot = len < 4 ? 4 - len : 0;
9acbedc0
UW
374 set_value_offset (value, preferred_slot);
375 }
771b4502 376
9acbedc0 377 return value;
771b4502
UW
378}
379
380/* Register groups. */
381
382static int
383spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
384 struct reggroup *group)
385{
386 /* Registers displayed via 'info regs'. */
387 if (group == general_reggroup)
388 return 1;
389
390 /* Registers displayed via 'info float'. */
391 if (group == float_reggroup)
392 return 0;
393
394 /* Registers that need to be saved/restored in order to
395 push or pop frames. */
396 if (group == save_reggroup || group == restore_reggroup)
397 return 1;
398
399 return default_register_reggroup_p (gdbarch, regnum, group);
400}
401
7ce16bd4
UW
402/* DWARF-2 register numbers. */
403
404static int
405spu_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
406{
407 /* Use cooked instead of raw SP. */
408 return (reg == SPU_RAW_SP_REGNUM)? SPU_SP_REGNUM : reg;
409}
410
ff1a52c6
UW
411
412/* Address handling. */
36acd84e 413
85e747d2
UW
414static int
415spu_gdbarch_id (struct gdbarch *gdbarch)
416{
417 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
418 int id = tdep->id;
419
420 /* The objfile architecture of a standalone SPU executable does not
b021a221 421 provide an SPU ID. Retrieve it from the objfile's relocated
85e747d2
UW
422 address range in this special case. */
423 if (id == -1
424 && symfile_objfile && symfile_objfile->obfd
425 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
426 && symfile_objfile->sections != symfile_objfile->sections_end)
427 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
428
429 return id;
430}
431
ff1a52c6
UW
432static int
433spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
434{
435 if (dwarf2_addr_class == 1)
436 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
437 else
438 return 0;
439}
440
441static const char *
442spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
443{
444 if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
445 return "__ea";
446 else
447 return NULL;
448}
449
450static int
451spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
452 const char *name, int *type_flags_ptr)
453{
454 if (strcmp (name, "__ea") == 0)
455 {
456 *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
457 return 1;
458 }
459 else
460 return 0;
461}
462
85e747d2
UW
463static void
464spu_address_to_pointer (struct gdbarch *gdbarch,
465 struct type *type, gdb_byte *buf, CORE_ADDR addr)
466{
467 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
468 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
469 SPUADDR_ADDR (addr));
470}
471
36acd84e 472static CORE_ADDR
9898f801
UW
473spu_pointer_to_address (struct gdbarch *gdbarch,
474 struct type *type, const gdb_byte *buf)
36acd84e 475{
85e747d2 476 int id = spu_gdbarch_id (gdbarch);
e17a4113
UW
477 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
478 ULONGEST addr
479 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
36acd84e 480
ff1a52c6
UW
481 /* Do not convert __ea pointers. */
482 if (TYPE_ADDRESS_CLASS_1 (type))
483 return addr;
484
d2ed6730 485 return addr? SPUADDR (id, addr) : 0;
36acd84e
UW
486}
487
488static CORE_ADDR
489spu_integer_to_address (struct gdbarch *gdbarch,
490 struct type *type, const gdb_byte *buf)
491{
85e747d2 492 int id = spu_gdbarch_id (gdbarch);
36acd84e 493 ULONGEST addr = unpack_long (type, buf);
36acd84e 494
d2ed6730 495 return SPUADDR (id, addr);
36acd84e
UW
496}
497
771b4502
UW
498
499/* Decoding SPU instructions. */
500
501enum
502 {
503 op_lqd = 0x34,
504 op_lqx = 0x3c4,
505 op_lqa = 0x61,
506 op_lqr = 0x67,
507 op_stqd = 0x24,
508 op_stqx = 0x144,
509 op_stqa = 0x41,
510 op_stqr = 0x47,
511
512 op_il = 0x081,
513 op_ila = 0x21,
514 op_a = 0x0c0,
515 op_ai = 0x1c,
516
a536c6d7 517 op_selb = 0x8,
771b4502
UW
518
519 op_br = 0x64,
520 op_bra = 0x60,
521 op_brsl = 0x66,
522 op_brasl = 0x62,
523 op_brnz = 0x42,
524 op_brz = 0x40,
525 op_brhnz = 0x46,
526 op_brhz = 0x44,
527 op_bi = 0x1a8,
528 op_bisl = 0x1a9,
529 op_biz = 0x128,
530 op_binz = 0x129,
531 op_bihz = 0x12a,
532 op_bihnz = 0x12b,
533 };
534
535static int
536is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
537{
538 if ((insn >> 21) == op)
539 {
540 *rt = insn & 127;
541 *ra = (insn >> 7) & 127;
542 *rb = (insn >> 14) & 127;
543 return 1;
544 }
545
546 return 0;
547}
548
549static int
550is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
551{
552 if ((insn >> 28) == op)
553 {
554 *rt = (insn >> 21) & 127;
555 *ra = (insn >> 7) & 127;
556 *rb = (insn >> 14) & 127;
557 *rc = insn & 127;
558 return 1;
559 }
560
561 return 0;
562}
563
564static int
565is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
566{
567 if ((insn >> 21) == op)
568 {
569 *rt = insn & 127;
570 *ra = (insn >> 7) & 127;
571 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
572 return 1;
573 }
574
575 return 0;
576}
577
578static int
579is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
580{
581 if ((insn >> 24) == op)
582 {
583 *rt = insn & 127;
584 *ra = (insn >> 7) & 127;
585 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
586 return 1;
587 }
588
589 return 0;
590}
591
592static int
593is_ri16 (unsigned int insn, int op, int *rt, int *i16)
594{
595 if ((insn >> 23) == op)
596 {
597 *rt = insn & 127;
598 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
599 return 1;
600 }
601
602 return 0;
603}
604
605static int
606is_ri18 (unsigned int insn, int op, int *rt, int *i18)
607{
608 if ((insn >> 25) == op)
609 {
610 *rt = insn & 127;
611 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
612 return 1;
613 }
614
615 return 0;
616}
617
618static int
619is_branch (unsigned int insn, int *offset, int *reg)
620{
621 int rt, i7, i16;
622
623 if (is_ri16 (insn, op_br, &rt, &i16)
624 || is_ri16 (insn, op_brsl, &rt, &i16)
625 || is_ri16 (insn, op_brnz, &rt, &i16)
626 || is_ri16 (insn, op_brz, &rt, &i16)
627 || is_ri16 (insn, op_brhnz, &rt, &i16)
628 || is_ri16 (insn, op_brhz, &rt, &i16))
629 {
630 *reg = SPU_PC_REGNUM;
631 *offset = i16 << 2;
632 return 1;
633 }
634
635 if (is_ri16 (insn, op_bra, &rt, &i16)
636 || is_ri16 (insn, op_brasl, &rt, &i16))
637 {
638 *reg = -1;
639 *offset = i16 << 2;
640 return 1;
641 }
642
643 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
644 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
645 || is_ri7 (insn, op_biz, &rt, reg, &i7)
646 || is_ri7 (insn, op_binz, &rt, reg, &i7)
647 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
648 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
649 {
650 *offset = 0;
651 return 1;
652 }
653
654 return 0;
655}
656
657
658/* Prolog parsing. */
659
660struct spu_prologue_data
661 {
662 /* Stack frame size. -1 if analysis was unsuccessful. */
663 int size;
664
665 /* How to find the CFA. The CFA is equal to SP at function entry. */
666 int cfa_reg;
667 int cfa_offset;
668
669 /* Offset relative to CFA where a register is saved. -1 if invalid. */
670 int reg_offset[SPU_NUM_GPRS];
671 };
672
673static CORE_ADDR
e17a4113
UW
674spu_analyze_prologue (struct gdbarch *gdbarch,
675 CORE_ADDR start_pc, CORE_ADDR end_pc,
771b4502
UW
676 struct spu_prologue_data *data)
677{
e17a4113 678 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
771b4502
UW
679 int found_sp = 0;
680 int found_fp = 0;
681 int found_lr = 0;
ce50d78b 682 int found_bc = 0;
771b4502
UW
683 int reg_immed[SPU_NUM_GPRS];
684 gdb_byte buf[16];
685 CORE_ADDR prolog_pc = start_pc;
686 CORE_ADDR pc;
687 int i;
688
689
690 /* Initialize DATA to default values. */
691 data->size = -1;
692
693 data->cfa_reg = SPU_RAW_SP_REGNUM;
694 data->cfa_offset = 0;
695
696 for (i = 0; i < SPU_NUM_GPRS; i++)
697 data->reg_offset[i] = -1;
698
699 /* Set up REG_IMMED array. This is non-zero for a register if we know its
700 preferred slot currently holds this immediate value. */
701 for (i = 0; i < SPU_NUM_GPRS; i++)
702 reg_immed[i] = 0;
703
704 /* Scan instructions until the first branch.
705
706 The following instructions are important prolog components:
707
708 - The first instruction to set up the stack pointer.
709 - The first instruction to set up the frame pointer.
710 - The first instruction to save the link register.
ce50d78b 711 - The first instruction to save the backchain.
771b4502 712
ce50d78b 713 We return the instruction after the latest of these four,
771b4502
UW
714 or the incoming PC if none is found. The first instruction
715 to set up the stack pointer also defines the frame size.
716
717 Note that instructions saving incoming arguments to their stack
718 slots are not counted as important, because they are hard to
719 identify with certainty. This should not matter much, because
720 arguments are relevant only in code compiled with debug data,
721 and in such code the GDB core will advance until the first source
722 line anyway, using SAL data.
723
724 For purposes of stack unwinding, we analyze the following types
725 of instructions in addition:
726
727 - Any instruction adding to the current frame pointer.
728 - Any instruction loading an immediate constant into a register.
729 - Any instruction storing a register onto the stack.
730
731 These are used to compute the CFA and REG_OFFSET output. */
732
733 for (pc = start_pc; pc < end_pc; pc += 4)
734 {
735 unsigned int insn;
736 int rt, ra, rb, rc, immed;
737
738 if (target_read_memory (pc, buf, 4))
739 break;
e17a4113 740 insn = extract_unsigned_integer (buf, 4, byte_order);
771b4502
UW
741
742 /* AI is the typical instruction to set up a stack frame.
743 It is also used to initialize the frame pointer. */
744 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
745 {
746 if (rt == data->cfa_reg && ra == data->cfa_reg)
747 data->cfa_offset -= immed;
748
749 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
750 && !found_sp)
751 {
752 found_sp = 1;
753 prolog_pc = pc + 4;
754
755 data->size = -immed;
756 }
757 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
758 && !found_fp)
759 {
760 found_fp = 1;
761 prolog_pc = pc + 4;
762
763 data->cfa_reg = SPU_FP_REGNUM;
764 data->cfa_offset -= immed;
765 }
766 }
767
768 /* A is used to set up stack frames of size >= 512 bytes.
769 If we have tracked the contents of the addend register,
770 we can handle this as well. */
771 else if (is_rr (insn, op_a, &rt, &ra, &rb))
772 {
773 if (rt == data->cfa_reg && ra == data->cfa_reg)
774 {
775 if (reg_immed[rb] != 0)
776 data->cfa_offset -= reg_immed[rb];
777 else
778 data->cfa_reg = -1; /* We don't know the CFA any more. */
779 }
780
781 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
782 && !found_sp)
783 {
784 found_sp = 1;
785 prolog_pc = pc + 4;
786
787 if (reg_immed[rb] != 0)
788 data->size = -reg_immed[rb];
789 }
790 }
791
792 /* We need to track IL and ILA used to load immediate constants
793 in case they are later used as input to an A instruction. */
794 else if (is_ri16 (insn, op_il, &rt, &immed))
795 {
796 reg_immed[rt] = immed;
12102450
UW
797
798 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
799 found_sp = 1;
771b4502
UW
800 }
801
802 else if (is_ri18 (insn, op_ila, &rt, &immed))
803 {
804 reg_immed[rt] = immed & 0x3ffff;
12102450
UW
805
806 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
807 found_sp = 1;
771b4502
UW
808 }
809
810 /* STQD is used to save registers to the stack. */
811 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
812 {
813 if (ra == data->cfa_reg)
814 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
815
816 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
817 && !found_lr)
818 {
819 found_lr = 1;
820 prolog_pc = pc + 4;
821 }
ce50d78b
UW
822
823 if (ra == SPU_RAW_SP_REGNUM
824 && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
825 && !found_bc)
826 {
827 found_bc = 1;
828 prolog_pc = pc + 4;
829 }
771b4502
UW
830 }
831
832 /* _start uses SELB to set up the stack pointer. */
833 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
834 {
835 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
836 found_sp = 1;
837 }
838
839 /* We terminate if we find a branch. */
840 else if (is_branch (insn, &immed, &ra))
841 break;
842 }
843
844
845 /* If we successfully parsed until here, and didn't find any instruction
846 modifying SP, we assume we have a frameless function. */
847 if (!found_sp)
848 data->size = 0;
849
850 /* Return cooked instead of raw SP. */
851 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
852 data->cfa_reg = SPU_SP_REGNUM;
853
854 return prolog_pc;
855}
856
857/* Return the first instruction after the prologue starting at PC. */
858static CORE_ADDR
6093d2eb 859spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
771b4502
UW
860{
861 struct spu_prologue_data data;
e17a4113 862 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
771b4502
UW
863}
864
865/* Return the frame pointer in use at address PC. */
866static void
a54fba4c
MD
867spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
868 int *reg, LONGEST *offset)
771b4502
UW
869{
870 struct spu_prologue_data data;
e17a4113 871 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
771b4502
UW
872
873 if (data.size != -1 && data.cfa_reg != -1)
874 {
875 /* The 'frame pointer' address is CFA minus frame size. */
876 *reg = data.cfa_reg;
877 *offset = data.cfa_offset - data.size;
878 }
879 else
880 {
c378eb4e 881 /* ??? We don't really know ... */
771b4502
UW
882 *reg = SPU_SP_REGNUM;
883 *offset = 0;
884 }
885}
886
fe5febed
UW
887/* Return true if we are in the function's epilogue, i.e. after the
888 instruction that destroyed the function's stack frame.
889
890 1) scan forward from the point of execution:
891 a) If you find an instruction that modifies the stack pointer
892 or transfers control (except a return), execution is not in
893 an epilogue, return.
894 b) Stop scanning if you find a return instruction or reach the
895 end of the function or reach the hard limit for the size of
896 an epilogue.
897 2) scan backward from the point of execution:
898 a) If you find an instruction that modifies the stack pointer,
899 execution *is* in an epilogue, return.
900 b) Stop scanning if you reach an instruction that transfers
901 control or the beginning of the function or reach the hard
902 limit for the size of an epilogue. */
903
904static int
905spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
906{
e17a4113 907 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
fe5febed
UW
908 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
909 bfd_byte buf[4];
910 unsigned int insn;
22e048c9 911 int rt, ra, rb, immed;
fe5febed
UW
912
913 /* Find the search limits based on function boundaries and hard limit.
914 We assume the epilogue can be up to 64 instructions long. */
915
916 const int spu_max_epilogue_size = 64 * 4;
917
918 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
919 return 0;
920
921 if (pc - func_start < spu_max_epilogue_size)
922 epilogue_start = func_start;
923 else
924 epilogue_start = pc - spu_max_epilogue_size;
925
926 if (func_end - pc < spu_max_epilogue_size)
927 epilogue_end = func_end;
928 else
929 epilogue_end = pc + spu_max_epilogue_size;
930
931 /* Scan forward until next 'bi $0'. */
932
933 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
934 {
935 if (target_read_memory (scan_pc, buf, 4))
936 return 0;
e17a4113 937 insn = extract_unsigned_integer (buf, 4, byte_order);
fe5febed
UW
938
939 if (is_branch (insn, &immed, &ra))
940 {
941 if (immed == 0 && ra == SPU_LR_REGNUM)
942 break;
943
944 return 0;
945 }
946
947 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
948 || is_rr (insn, op_a, &rt, &ra, &rb)
949 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
950 {
951 if (rt == SPU_RAW_SP_REGNUM)
952 return 0;
953 }
954 }
955
956 if (scan_pc >= epilogue_end)
957 return 0;
958
959 /* Scan backward until adjustment to stack pointer (R1). */
960
961 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
962 {
963 if (target_read_memory (scan_pc, buf, 4))
964 return 0;
e17a4113 965 insn = extract_unsigned_integer (buf, 4, byte_order);
fe5febed
UW
966
967 if (is_branch (insn, &immed, &ra))
968 return 0;
969
970 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
971 || is_rr (insn, op_a, &rt, &ra, &rb)
972 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
973 {
974 if (rt == SPU_RAW_SP_REGNUM)
975 return 1;
976 }
977 }
978
979 return 0;
980}
981
982
771b4502
UW
983/* Normal stack frames. */
984
985struct spu_unwind_cache
986{
987 CORE_ADDR func;
988 CORE_ADDR frame_base;
989 CORE_ADDR local_base;
990
991 struct trad_frame_saved_reg *saved_regs;
992};
993
994static struct spu_unwind_cache *
8d998b8f 995spu_frame_unwind_cache (struct frame_info *this_frame,
771b4502
UW
996 void **this_prologue_cache)
997{
e17a4113 998 struct gdbarch *gdbarch = get_frame_arch (this_frame);
85e747d2 999 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
e17a4113 1000 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
771b4502
UW
1001 struct spu_unwind_cache *info;
1002 struct spu_prologue_data data;
85e747d2 1003 CORE_ADDR id = tdep->id;
dcf52cd8 1004 gdb_byte buf[16];
771b4502
UW
1005
1006 if (*this_prologue_cache)
1007 return *this_prologue_cache;
1008
1009 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
1010 *this_prologue_cache = info;
8d998b8f 1011 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
771b4502
UW
1012 info->frame_base = 0;
1013 info->local_base = 0;
1014
1015 /* Find the start of the current function, and analyze its prologue. */
8d998b8f 1016 info->func = get_frame_func (this_frame);
771b4502
UW
1017 if (info->func == 0)
1018 {
1019 /* Fall back to using the current PC as frame ID. */
8d998b8f 1020 info->func = get_frame_pc (this_frame);
771b4502
UW
1021 data.size = -1;
1022 }
1023 else
e17a4113
UW
1024 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
1025 &data);
771b4502
UW
1026
1027 /* If successful, use prologue analysis data. */
1028 if (data.size != -1 && data.cfa_reg != -1)
1029 {
1030 CORE_ADDR cfa;
1031 int i;
771b4502
UW
1032
1033 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
8d998b8f 1034 get_frame_register (this_frame, data.cfa_reg, buf);
e17a4113 1035 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
85e747d2 1036 cfa = SPUADDR (id, cfa);
771b4502
UW
1037
1038 /* Call-saved register slots. */
1039 for (i = 0; i < SPU_NUM_GPRS; i++)
1040 if (i == SPU_LR_REGNUM
1041 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
1042 if (data.reg_offset[i] != -1)
1043 info->saved_regs[i].addr = cfa - data.reg_offset[i];
1044
771b4502
UW
1045 /* Frame bases. */
1046 info->frame_base = cfa;
1047 info->local_base = cfa - data.size;
1048 }
1049
1050 /* Otherwise, fall back to reading the backchain link. */
1051 else
1052 {
cdc9523a
UW
1053 CORE_ADDR reg;
1054 LONGEST backchain;
13def385 1055 ULONGEST lslr;
cdc9523a 1056 int status;
771b4502 1057
13def385
UW
1058 /* Get local store limit. */
1059 lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
1060 if (!lslr)
1061 lslr = (ULONGEST) -1;
1062
771b4502 1063 /* Get the backchain. */
8d998b8f 1064 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
85e747d2
UW
1065 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1066 &backchain);
771b4502
UW
1067
1068 /* A zero backchain terminates the frame chain. Also, sanity
1069 check against the local store size limit. */
13def385 1070 if (status && backchain > 0 && backchain <= lslr)
771b4502
UW
1071 {
1072 /* Assume the link register is saved into its slot. */
13def385 1073 if (backchain + 16 <= lslr)
c378eb4e
MS
1074 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id,
1075 backchain + 16);
771b4502 1076
771b4502 1077 /* Frame bases. */
85e747d2
UW
1078 info->frame_base = SPUADDR (id, backchain);
1079 info->local_base = SPUADDR (id, reg);
771b4502
UW
1080 }
1081 }
dcf52cd8 1082
c4891da7
UW
1083 /* If we didn't find a frame, we cannot determine SP / return address. */
1084 if (info->frame_base == 0)
1085 return info;
1086
dcf52cd8 1087 /* The previous SP is equal to the CFA. */
85e747d2
UW
1088 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1089 SPUADDR_ADDR (info->frame_base));
dcf52cd8 1090
0a44cb36
UW
1091 /* Read full contents of the unwound link register in order to
1092 be able to determine the return address. */
dcf52cd8
UW
1093 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1094 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1095 else
8d998b8f 1096 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
dcf52cd8 1097
0a44cb36
UW
1098 /* Normally, the return address is contained in the slot 0 of the
1099 link register, and slots 1-3 are zero. For an overlay return,
1100 slot 0 contains the address of the overlay manager return stub,
1101 slot 1 contains the partition number of the overlay section to
1102 be returned to, and slot 2 contains the return address within
1103 that section. Return the latter address in that case. */
e17a4113 1104 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
dcf52cd8 1105 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
e17a4113 1106 extract_unsigned_integer (buf + 8, 4, byte_order));
dcf52cd8
UW
1107 else
1108 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
e17a4113 1109 extract_unsigned_integer (buf, 4, byte_order));
771b4502
UW
1110
1111 return info;
1112}
1113
1114static void
8d998b8f 1115spu_frame_this_id (struct frame_info *this_frame,
771b4502
UW
1116 void **this_prologue_cache, struct frame_id *this_id)
1117{
1118 struct spu_unwind_cache *info =
8d998b8f 1119 spu_frame_unwind_cache (this_frame, this_prologue_cache);
771b4502
UW
1120
1121 if (info->frame_base == 0)
1122 return;
1123
1124 *this_id = frame_id_build (info->frame_base, info->func);
1125}
1126
8d998b8f
UW
1127static struct value *
1128spu_frame_prev_register (struct frame_info *this_frame,
1129 void **this_prologue_cache, int regnum)
771b4502
UW
1130{
1131 struct spu_unwind_cache *info
8d998b8f 1132 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
771b4502
UW
1133
1134 /* Special-case the stack pointer. */
1135 if (regnum == SPU_RAW_SP_REGNUM)
1136 regnum = SPU_SP_REGNUM;
1137
8d998b8f 1138 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
771b4502
UW
1139}
1140
1141static const struct frame_unwind spu_frame_unwind = {
1142 NORMAL_FRAME,
8fbca658 1143 default_frame_unwind_stop_reason,
771b4502 1144 spu_frame_this_id,
8d998b8f
UW
1145 spu_frame_prev_register,
1146 NULL,
1147 default_frame_sniffer
771b4502
UW
1148};
1149
771b4502 1150static CORE_ADDR
8d998b8f 1151spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
771b4502
UW
1152{
1153 struct spu_unwind_cache *info
8d998b8f 1154 = spu_frame_unwind_cache (this_frame, this_cache);
771b4502
UW
1155 return info->local_base;
1156}
1157
1158static const struct frame_base spu_frame_base = {
1159 &spu_frame_unwind,
1160 spu_frame_base_address,
1161 spu_frame_base_address,
1162 spu_frame_base_address
1163};
1164
1165static CORE_ADDR
1166spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1167{
85e747d2 1168 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
118dfbaf
UW
1169 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1170 /* Mask off interrupt enable bit. */
85e747d2 1171 return SPUADDR (tdep->id, pc & -4);
771b4502
UW
1172}
1173
1174static CORE_ADDR
1175spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1176{
85e747d2
UW
1177 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1178 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1179 return SPUADDR (tdep->id, sp);
771b4502
UW
1180}
1181
118dfbaf 1182static CORE_ADDR
61a1198a 1183spu_read_pc (struct regcache *regcache)
118dfbaf 1184{
85e747d2 1185 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
61a1198a
UW
1186 ULONGEST pc;
1187 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
118dfbaf 1188 /* Mask off interrupt enable bit. */
85e747d2 1189 return SPUADDR (tdep->id, pc & -4);
118dfbaf
UW
1190}
1191
1192static void
61a1198a 1193spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
118dfbaf
UW
1194{
1195 /* Keep interrupt enabled state unchanged. */
61a1198a 1196 ULONGEST old_pc;
30bcb456 1197
61a1198a
UW
1198 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1199 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
85e747d2 1200 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
118dfbaf
UW
1201}
1202
771b4502 1203
cc5f0d61
UW
1204/* Cell/B.E. cross-architecture unwinder support. */
1205
1206struct spu2ppu_cache
1207{
1208 struct frame_id frame_id;
1209 struct regcache *regcache;
1210};
1211
1212static struct gdbarch *
1213spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1214{
1215 struct spu2ppu_cache *cache = *this_cache;
1216 return get_regcache_arch (cache->regcache);
1217}
1218
1219static void
1220spu2ppu_this_id (struct frame_info *this_frame,
1221 void **this_cache, struct frame_id *this_id)
1222{
1223 struct spu2ppu_cache *cache = *this_cache;
1224 *this_id = cache->frame_id;
1225}
1226
1227static struct value *
1228spu2ppu_prev_register (struct frame_info *this_frame,
1229 void **this_cache, int regnum)
1230{
1231 struct spu2ppu_cache *cache = *this_cache;
1232 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1233 gdb_byte *buf;
1234
1235 buf = alloca (register_size (gdbarch, regnum));
1236 regcache_cooked_read (cache->regcache, regnum, buf);
1237 return frame_unwind_got_bytes (this_frame, regnum, buf);
1238}
1239
1240static int
1241spu2ppu_sniffer (const struct frame_unwind *self,
1242 struct frame_info *this_frame, void **this_prologue_cache)
1243{
1244 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1245 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1246 CORE_ADDR base, func, backchain;
1247 gdb_byte buf[4];
1248
f5656ead 1249 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_spu)
cc5f0d61
UW
1250 return 0;
1251
1252 base = get_frame_sp (this_frame);
1253 func = get_frame_pc (this_frame);
1254 if (target_read_memory (base, buf, 4))
1255 return 0;
1256 backchain = extract_unsigned_integer (buf, 4, byte_order);
1257
1258 if (!backchain)
1259 {
1260 struct frame_info *fi;
1261
1262 struct spu2ppu_cache *cache
1263 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1264
1265 cache->frame_id = frame_id_build (base + 16, func);
1266
1267 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1268 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1269 break;
1270
1271 if (fi)
1272 {
1273 cache->regcache = frame_save_as_regcache (fi);
1274 *this_prologue_cache = cache;
1275 return 1;
1276 }
1277 else
1278 {
1279 struct regcache *regcache;
f5656ead 1280 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
cc5f0d61
UW
1281 cache->regcache = regcache_dup (regcache);
1282 *this_prologue_cache = cache;
1283 return 1;
1284 }
1285 }
1286
1287 return 0;
1288}
1289
1290static void
1291spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1292{
1293 struct spu2ppu_cache *cache = this_cache;
1294 regcache_xfree (cache->regcache);
1295}
1296
1297static const struct frame_unwind spu2ppu_unwind = {
1298 ARCH_FRAME,
8fbca658 1299 default_frame_unwind_stop_reason,
cc5f0d61
UW
1300 spu2ppu_this_id,
1301 spu2ppu_prev_register,
1302 NULL,
1303 spu2ppu_sniffer,
1304 spu2ppu_dealloc_cache,
1305 spu2ppu_prev_arch,
1306};
1307
1308
771b4502
UW
1309/* Function calling convention. */
1310
7b3dc0b7
UW
1311static CORE_ADDR
1312spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1313{
1314 return sp & ~15;
1315}
1316
87805e63
UW
1317static CORE_ADDR
1318spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1319 struct value **args, int nargs, struct type *value_type,
1320 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1321 struct regcache *regcache)
1322{
1323 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1324 sp = (sp - 4) & ~15;
1325 /* Store the address of that breakpoint */
1326 *bp_addr = sp;
1327 /* The call starts at the callee's entry point. */
1328 *real_pc = funaddr;
1329
1330 return sp;
1331}
1332
771b4502
UW
1333static int
1334spu_scalar_value_p (struct type *type)
1335{
1336 switch (TYPE_CODE (type))
1337 {
1338 case TYPE_CODE_INT:
1339 case TYPE_CODE_ENUM:
1340 case TYPE_CODE_RANGE:
1341 case TYPE_CODE_CHAR:
1342 case TYPE_CODE_BOOL:
1343 case TYPE_CODE_PTR:
1344 case TYPE_CODE_REF:
1345 return TYPE_LENGTH (type) <= 16;
1346
1347 default:
1348 return 0;
1349 }
1350}
1351
1352static void
1353spu_value_to_regcache (struct regcache *regcache, int regnum,
1354 struct type *type, const gdb_byte *in)
1355{
1356 int len = TYPE_LENGTH (type);
1357
1358 if (spu_scalar_value_p (type))
1359 {
1360 int preferred_slot = len < 4 ? 4 - len : 0;
1361 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1362 }
1363 else
1364 {
1365 while (len >= 16)
1366 {
1367 regcache_cooked_write (regcache, regnum++, in);
1368 in += 16;
1369 len -= 16;
1370 }
1371
1372 if (len > 0)
1373 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1374 }
1375}
1376
1377static void
1378spu_regcache_to_value (struct regcache *regcache, int regnum,
1379 struct type *type, gdb_byte *out)
1380{
1381 int len = TYPE_LENGTH (type);
1382
1383 if (spu_scalar_value_p (type))
1384 {
1385 int preferred_slot = len < 4 ? 4 - len : 0;
1386 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1387 }
1388 else
1389 {
1390 while (len >= 16)
1391 {
1392 regcache_cooked_read (regcache, regnum++, out);
1393 out += 16;
1394 len -= 16;
1395 }
1396
1397 if (len > 0)
1398 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1399 }
1400}
1401
1402static CORE_ADDR
1403spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1404 struct regcache *regcache, CORE_ADDR bp_addr,
1405 int nargs, struct value **args, CORE_ADDR sp,
1406 int struct_return, CORE_ADDR struct_addr)
1407{
e17a4113 1408 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9ff3afda 1409 CORE_ADDR sp_delta;
771b4502
UW
1410 int i;
1411 int regnum = SPU_ARG1_REGNUM;
1412 int stack_arg = -1;
1413 gdb_byte buf[16];
1414
1415 /* Set the return address. */
1416 memset (buf, 0, sizeof buf);
85e747d2 1417 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
771b4502
UW
1418 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1419
1420 /* If STRUCT_RETURN is true, then the struct return address (in
1421 STRUCT_ADDR) will consume the first argument-passing register.
1422 Both adjust the register count and store that value. */
1423 if (struct_return)
1424 {
1425 memset (buf, 0, sizeof buf);
85e747d2 1426 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
771b4502
UW
1427 regcache_cooked_write (regcache, regnum++, buf);
1428 }
1429
1430 /* Fill in argument registers. */
1431 for (i = 0; i < nargs; i++)
1432 {
1433 struct value *arg = args[i];
1434 struct type *type = check_typedef (value_type (arg));
1435 const gdb_byte *contents = value_contents (arg);
354ecfd5 1436 int n_regs = align_up (TYPE_LENGTH (type), 16) / 16;
771b4502
UW
1437
1438 /* If the argument doesn't wholly fit into registers, it and
1439 all subsequent arguments go to the stack. */
1440 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1441 {
1442 stack_arg = i;
1443 break;
1444 }
1445
1446 spu_value_to_regcache (regcache, regnum, type, contents);
1447 regnum += n_regs;
1448 }
1449
1450 /* Overflow arguments go to the stack. */
1451 if (stack_arg != -1)
1452 {
1453 CORE_ADDR ap;
1454
1455 /* Allocate all required stack size. */
1456 for (i = stack_arg; i < nargs; i++)
1457 {
1458 struct type *type = check_typedef (value_type (args[i]));
1459 sp -= align_up (TYPE_LENGTH (type), 16);
1460 }
1461
1462 /* Fill in stack arguments. */
1463 ap = sp;
1464 for (i = stack_arg; i < nargs; i++)
1465 {
1466 struct value *arg = args[i];
1467 struct type *type = check_typedef (value_type (arg));
1468 int len = TYPE_LENGTH (type);
1469 int preferred_slot;
1470
1471 if (spu_scalar_value_p (type))
1472 preferred_slot = len < 4 ? 4 - len : 0;
1473 else
1474 preferred_slot = 0;
1475
1476 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1477 ap += align_up (TYPE_LENGTH (type), 16);
1478 }
1479 }
1480
1481 /* Allocate stack frame header. */
1482 sp -= 32;
1483
ee82e879
UW
1484 /* Store stack back chain. */
1485 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1486 target_write_memory (sp, buf, 16);
1487
9ff3afda 1488 /* Finally, update all slots of the SP register. */
e17a4113 1489 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
9ff3afda
UW
1490 for (i = 0; i < 4; i++)
1491 {
e17a4113
UW
1492 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1493 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
9ff3afda
UW
1494 }
1495 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
771b4502
UW
1496
1497 return sp;
1498}
1499
1500static struct frame_id
8d998b8f 1501spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
771b4502 1502{
85e747d2 1503 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8d998b8f
UW
1504 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1505 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
85e747d2 1506 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
771b4502
UW
1507}
1508
1509/* Function return value access. */
1510
1511static enum return_value_convention
6a3a010b 1512spu_return_value (struct gdbarch *gdbarch, struct value *function,
c055b101
CV
1513 struct type *type, struct regcache *regcache,
1514 gdb_byte *out, const gdb_byte *in)
771b4502 1515{
6a3a010b 1516 struct type *func_type = function ? value_type (function) : NULL;
771b4502 1517 enum return_value_convention rvc;
54fcddd0
UW
1518 int opencl_vector = 0;
1519
598cfb71
UW
1520 if (func_type)
1521 {
1522 func_type = check_typedef (func_type);
1523
1524 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1525 func_type = check_typedef (TYPE_TARGET_TYPE (func_type));
1526
1527 if (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1528 && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL
1529 && TYPE_CODE (type) == TYPE_CODE_ARRAY
1530 && TYPE_VECTOR (type))
1531 opencl_vector = 1;
1532 }
771b4502
UW
1533
1534 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1535 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1536 else
1537 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1538
1539 if (in)
1540 {
1541 switch (rvc)
1542 {
1543 case RETURN_VALUE_REGISTER_CONVENTION:
54fcddd0
UW
1544 if (opencl_vector && TYPE_LENGTH (type) == 2)
1545 regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in);
1546 else
1547 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
771b4502
UW
1548 break;
1549
1550 case RETURN_VALUE_STRUCT_CONVENTION:
a73c6dcd 1551 error (_("Cannot set function return value."));
771b4502
UW
1552 break;
1553 }
1554 }
1555 else if (out)
1556 {
1557 switch (rvc)
1558 {
1559 case RETURN_VALUE_REGISTER_CONVENTION:
54fcddd0
UW
1560 if (opencl_vector && TYPE_LENGTH (type) == 2)
1561 regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out);
1562 else
1563 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
771b4502
UW
1564 break;
1565
1566 case RETURN_VALUE_STRUCT_CONVENTION:
a73c6dcd 1567 error (_("Function return value unknown."));
771b4502
UW
1568 break;
1569 }
1570 }
1571
1572 return rvc;
1573}
1574
1575
1576/* Breakpoints. */
1577
1578static const gdb_byte *
c378eb4e
MS
1579spu_breakpoint_from_pc (struct gdbarch *gdbarch,
1580 CORE_ADDR * pcptr, int *lenptr)
771b4502
UW
1581{
1582 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1583
1584 *lenptr = sizeof breakpoint;
1585 return breakpoint;
1586}
1587
d03285ec
UW
1588static int
1589spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1590 struct bp_target_info *bp_tgt)
1591{
1592 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1593 that in a combined application, we have some breakpoints inserted in SPU
1594 code, and now the application forks (on the PPU side). GDB common code
1595 will assume that the fork system call copied all breakpoints into the new
1596 process' address space, and that all those copies now need to be removed
1597 (see breakpoint.c:detach_breakpoints).
1598
1599 While this is certainly true for PPU side breakpoints, it is not true
1600 for SPU side breakpoints. fork will clone the SPU context file
1601 descriptors, so that all the existing SPU contexts are in accessible
1602 in the new process. However, the contents of the SPU contexts themselves
1603 are *not* cloned. Therefore the effect of detach_breakpoints is to
1604 remove SPU breakpoints from the *original* SPU context's local store
1605 -- this is not the correct behaviour.
1606
1607 The workaround is to check whether the PID we are asked to remove this
1608 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1609 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1610 true in the context of detach_breakpoints. If so, we simply do nothing.
1611 [ Note that for the fork child process, it does not matter if breakpoints
1612 remain inserted, because those SPU contexts are not runnable anyway --
1613 the Linux kernel allows only the original process to invoke spu_run. */
1614
1615 if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1616 return 0;
1617
1618 return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1619}
1620
771b4502
UW
1621
1622/* Software single-stepping support. */
1623
63807e1d 1624static int
0b1b3e42 1625spu_software_single_step (struct frame_info *frame)
771b4502 1626{
a6d9a66e 1627 struct gdbarch *gdbarch = get_frame_arch (frame);
6c95b8df 1628 struct address_space *aspace = get_frame_address_space (frame);
e17a4113 1629 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
e0cd558a
UW
1630 CORE_ADDR pc, next_pc;
1631 unsigned int insn;
1632 int offset, reg;
1633 gdb_byte buf[4];
13def385 1634 ULONGEST lslr;
771b4502 1635
0b1b3e42 1636 pc = get_frame_pc (frame);
771b4502 1637
e0cd558a
UW
1638 if (target_read_memory (pc, buf, 4))
1639 return 1;
e17a4113 1640 insn = extract_unsigned_integer (buf, 4, byte_order);
771b4502 1641
13def385
UW
1642 /* Get local store limit. */
1643 lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1644 if (!lslr)
1645 lslr = (ULONGEST) -1;
1646
e0cd558a
UW
1647 /* Next sequential instruction is at PC + 4, except if the current
1648 instruction is a PPE-assisted call, in which case it is at PC + 8.
1649 Wrap around LS limit to be on the safe side. */
1650 if ((insn & 0xffffff00) == 0x00002100)
13def385 1651 next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
e0cd558a 1652 else
13def385 1653 next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
771b4502 1654
6c95b8df
PA
1655 insert_single_step_breakpoint (gdbarch,
1656 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
771b4502 1657
e0cd558a
UW
1658 if (is_branch (insn, &offset, &reg))
1659 {
1660 CORE_ADDR target = offset;
771b4502 1661
e0cd558a 1662 if (reg == SPU_PC_REGNUM)
85e747d2 1663 target += SPUADDR_ADDR (pc);
e0cd558a
UW
1664 else if (reg != -1)
1665 {
8dccd430
PA
1666 int optim, unavail;
1667
1668 if (get_frame_register_bytes (frame, reg, 0, 4, buf,
1669 &optim, &unavail))
1670 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1671 else
1672 {
1673 if (optim)
710409a2
PA
1674 throw_error (OPTIMIZED_OUT_ERROR,
1675 _("Could not determine address of "
1676 "single-step breakpoint."));
8dccd430
PA
1677 if (unavail)
1678 throw_error (NOT_AVAILABLE_ERROR,
1679 _("Could not determine address of "
1680 "single-step breakpoint."));
1681 }
771b4502 1682 }
e0cd558a 1683
13def385 1684 target = target & lslr;
e0cd558a 1685 if (target != next_pc)
6c95b8df 1686 insert_single_step_breakpoint (gdbarch, aspace,
85e747d2 1687 SPUADDR (SPUADDR_SPU (pc), target));
771b4502 1688 }
e6590a1b
UW
1689
1690 return 1;
771b4502
UW
1691}
1692
6e3f70d7
UW
1693
1694/* Longjmp support. */
1695
1696static int
1697spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1698{
e17a4113 1699 struct gdbarch *gdbarch = get_frame_arch (frame);
85e747d2 1700 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
e17a4113 1701 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6e3f70d7
UW
1702 gdb_byte buf[4];
1703 CORE_ADDR jb_addr;
8dccd430 1704 int optim, unavail;
6e3f70d7
UW
1705
1706 /* Jump buffer is pointed to by the argument register $r3. */
8dccd430
PA
1707 if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf,
1708 &optim, &unavail))
1709 return 0;
1710
e17a4113 1711 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
85e747d2 1712 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
6e3f70d7
UW
1713 return 0;
1714
e17a4113 1715 *pc = extract_unsigned_integer (buf, 4, byte_order);
85e747d2 1716 *pc = SPUADDR (tdep->id, *pc);
6e3f70d7
UW
1717 return 1;
1718}
1719
1720
85e747d2
UW
1721/* Disassembler. */
1722
1723struct spu_dis_asm_data
1724{
1725 struct gdbarch *gdbarch;
1726 int id;
1727};
1728
1729static void
1730spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1731{
1732 struct spu_dis_asm_data *data = info->application_data;
1733 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1734}
1735
1736static int
1737gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1738{
c378eb4e
MS
1739 /* The opcodes disassembler does 18-bit address arithmetic. Make
1740 sure the SPU ID encoded in the high bits is added back when we
1741 call print_address. */
85e747d2
UW
1742 struct disassemble_info spu_info = *info;
1743 struct spu_dis_asm_data data;
1744 data.gdbarch = info->application_data;
1745 data.id = SPUADDR_SPU (memaddr);
1746
1747 spu_info.application_data = &data;
1748 spu_info.print_address_func = spu_dis_asm_print_address;
1749 return print_insn_spu (memaddr, &spu_info);
1750}
1751
1752
dcf52cd8
UW
1753/* Target overlays for the SPU overlay manager.
1754
1755 See the documentation of simple_overlay_update for how the
1756 interface is supposed to work.
1757
1758 Data structures used by the overlay manager:
1759
1760 struct ovly_table
1761 {
1762 u32 vma;
1763 u32 size;
1764 u32 pos;
1765 u32 buf;
1766 } _ovly_table[]; -- one entry per overlay section
1767
1768 struct ovly_buf_table
1769 {
1770 u32 mapped;
1771 } _ovly_buf_table[]; -- one entry per overlay buffer
1772
1773 _ovly_table should never change.
1774
c378eb4e
MS
1775 Both tables are aligned to a 16-byte boundary, the symbols
1776 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1777 size set to the size of the respective array. buf in _ovly_table is
1778 an index into _ovly_buf_table.
dcf52cd8 1779
c378eb4e 1780 mapped is an index into _ovly_table. Both the mapped and buf indices start
dcf52cd8
UW
1781 from one to reference the first entry in their respective tables. */
1782
1783/* Using the per-objfile private data mechanism, we store for each
1784 objfile an array of "struct spu_overlay_table" structures, one
1785 for each obj_section of the objfile. This structure holds two
1786 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1787 is *not* an overlay section. If it is non-zero, it represents
1788 a target address. The overlay section is mapped iff the target
1789 integer at this location equals MAPPED_VAL. */
1790
1791static const struct objfile_data *spu_overlay_data;
1792
1793struct spu_overlay_table
1794 {
1795 CORE_ADDR mapped_ptr;
1796 CORE_ADDR mapped_val;
1797 };
1798
1799/* Retrieve the overlay table for OBJFILE. If not already cached, read
1800 the _ovly_table data structure from the target and initialize the
1801 spu_overlay_table data structure from it. */
1802static struct spu_overlay_table *
1803spu_get_overlay_table (struct objfile *objfile)
1804{
e17a4113
UW
1805 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1806 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
3b7344d5 1807 struct bound_minimal_symbol ovly_table_msym, ovly_buf_table_msym;
dcf52cd8
UW
1808 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1809 unsigned ovly_table_size, ovly_buf_table_size;
1810 struct spu_overlay_table *tbl;
1811 struct obj_section *osect;
948f8e3d 1812 gdb_byte *ovly_table;
dcf52cd8
UW
1813 int i;
1814
1815 tbl = objfile_data (objfile, spu_overlay_data);
1816 if (tbl)
1817 return tbl;
1818
1819 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
3b7344d5 1820 if (!ovly_table_msym.minsym)
dcf52cd8
UW
1821 return NULL;
1822
c378eb4e
MS
1823 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
1824 NULL, objfile);
3b7344d5 1825 if (!ovly_buf_table_msym.minsym)
dcf52cd8
UW
1826 return NULL;
1827
77e371c0 1828 ovly_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_table_msym);
3b7344d5 1829 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym.minsym);
dcf52cd8 1830
77e371c0 1831 ovly_buf_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
3b7344d5 1832 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym.minsym);
dcf52cd8
UW
1833
1834 ovly_table = xmalloc (ovly_table_size);
1835 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1836
1837 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1838 objfile->sections_end - objfile->sections,
1839 struct spu_overlay_table);
1840
1841 for (i = 0; i < ovly_table_size / 16; i++)
1842 {
e17a4113
UW
1843 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1844 4, byte_order);
1845 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1846 4, byte_order);
1847 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1848 4, byte_order);
1849 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1850 4, byte_order);
dcf52cd8
UW
1851
1852 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1853 continue;
1854
1855 ALL_OBJFILE_OSECTIONS (objfile, osect)
1856 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1857 && pos == osect->the_bfd_section->filepos)
1858 {
1859 int ndx = osect - objfile->sections;
1860 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1861 tbl[ndx].mapped_val = i + 1;
1862 break;
1863 }
1864 }
1865
1866 xfree (ovly_table);
1867 set_objfile_data (objfile, spu_overlay_data, tbl);
1868 return tbl;
1869}
1870
1871/* Read _ovly_buf_table entry from the target to dermine whether
1872 OSECT is currently mapped, and update the mapped state. */
1873static void
1874spu_overlay_update_osect (struct obj_section *osect)
1875{
e17a4113
UW
1876 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1877 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
dcf52cd8 1878 struct spu_overlay_table *ovly_table;
85e747d2 1879 CORE_ADDR id, val;
dcf52cd8
UW
1880
1881 ovly_table = spu_get_overlay_table (osect->objfile);
1882 if (!ovly_table)
1883 return;
1884
1885 ovly_table += osect - osect->objfile->sections;
1886 if (ovly_table->mapped_ptr == 0)
1887 return;
1888
85e747d2
UW
1889 id = SPUADDR_SPU (obj_section_addr (osect));
1890 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1891 4, byte_order);
dcf52cd8
UW
1892 osect->ovly_mapped = (val == ovly_table->mapped_val);
1893}
1894
1895/* If OSECT is NULL, then update all sections' mapped state.
1896 If OSECT is non-NULL, then update only OSECT's mapped state. */
1897static void
1898spu_overlay_update (struct obj_section *osect)
1899{
1900 /* Just one section. */
1901 if (osect)
1902 spu_overlay_update_osect (osect);
1903
1904 /* All sections. */
1905 else
1906 {
1907 struct objfile *objfile;
1908
1909 ALL_OBJSECTIONS (objfile, osect)
714835d5 1910 if (section_is_overlay (osect))
dcf52cd8
UW
1911 spu_overlay_update_osect (osect);
1912 }
1913}
1914
1915/* Whenever a new objfile is loaded, read the target's _ovly_table.
1916 If there is one, go through all sections and make sure for non-
1917 overlay sections LMA equals VMA, while for overlay sections LMA
d2ed6730 1918 is larger than SPU_OVERLAY_LMA. */
dcf52cd8
UW
1919static void
1920spu_overlay_new_objfile (struct objfile *objfile)
1921{
1922 struct spu_overlay_table *ovly_table;
1923 struct obj_section *osect;
1924
1925 /* If we've already touched this file, do nothing. */
1926 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1927 return;
1928
0391f248
UW
1929 /* Consider only SPU objfiles. */
1930 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1931 return;
1932
dcf52cd8
UW
1933 /* Check if this objfile has overlays. */
1934 ovly_table = spu_get_overlay_table (objfile);
1935 if (!ovly_table)
1936 return;
1937
1938 /* Now go and fiddle with all the LMAs. */
1939 ALL_OBJFILE_OSECTIONS (objfile, osect)
1940 {
1941 bfd *obfd = objfile->obfd;
1942 asection *bsect = osect->the_bfd_section;
1943 int ndx = osect - objfile->sections;
1944
1945 if (ovly_table[ndx].mapped_ptr == 0)
1946 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1947 else
d2ed6730 1948 bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
dcf52cd8
UW
1949 }
1950}
1951
771b4502 1952
3285f3fe
UW
1953/* Insert temporary breakpoint on "main" function of newly loaded
1954 SPE context OBJFILE. */
1955static void
1956spu_catch_start (struct objfile *objfile)
1957{
3b7344d5 1958 struct bound_minimal_symbol minsym;
3285f3fe
UW
1959 struct symtab *symtab;
1960 CORE_ADDR pc;
1961 char buf[32];
1962
1963 /* Do this only if requested by "set spu stop-on-load on". */
1964 if (!spu_stop_on_load_p)
1965 return;
1966
1967 /* Consider only SPU objfiles. */
1968 if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1969 return;
1970
1971 /* The main objfile is handled differently. */
1972 if (objfile == symfile_objfile)
1973 return;
1974
1975 /* There can be multiple symbols named "main". Search for the
1976 "main" in *this* objfile. */
1977 minsym = lookup_minimal_symbol ("main", NULL, objfile);
3b7344d5 1978 if (!minsym.minsym)
3285f3fe
UW
1979 return;
1980
1981 /* If we have debugging information, try to use it -- this
1982 will allow us to properly skip the prologue. */
77e371c0 1983 pc = BMSYMBOL_VALUE_ADDRESS (minsym);
3b7344d5
TT
1984 symtab = find_pc_sect_symtab (pc, MSYMBOL_OBJ_SECTION (minsym.objfile,
1985 minsym.minsym));
3285f3fe
UW
1986 if (symtab != NULL)
1987 {
346d1dfe 1988 const struct blockvector *bv = BLOCKVECTOR (symtab);
3285f3fe
UW
1989 struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1990 struct symbol *sym;
1991 struct symtab_and_line sal;
1992
94af9270 1993 sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
3285f3fe
UW
1994 if (sym)
1995 {
1996 fixup_symbol_section (sym, objfile);
1997 sal = find_function_start_sal (sym, 1);
1998 pc = sal.pc;
1999 }
2000 }
2001
2002 /* Use a numerical address for the set_breakpoint command to avoid having
2003 the breakpoint re-set incorrectly. */
2004 xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
d8c09fb5
JK
2005 create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
2006 NULL /* cond_string */, -1 /* thread */,
6a609e58 2007 NULL /* extra_string */,
d8c09fb5 2008 0 /* parse_condition_and_thread */, 1 /* tempflag */,
bddaafad 2009 bp_breakpoint /* type_wanted */,
d8c09fb5
JK
2010 0 /* ignore_count */,
2011 AUTO_BOOLEAN_FALSE /* pending_break_support */,
931bb47f 2012 &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */,
44f238bb 2013 1 /* enabled */, 0 /* internal */, 0);
3285f3fe
UW
2014}
2015
2016
ff1a52c6
UW
2017/* Look up OBJFILE loaded into FRAME's SPU context. */
2018static struct objfile *
2019spu_objfile_from_frame (struct frame_info *frame)
2020{
2021 struct gdbarch *gdbarch = get_frame_arch (frame);
2022 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2023 struct objfile *obj;
2024
2025 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2026 return NULL;
2027
2028 ALL_OBJFILES (obj)
2029 {
2030 if (obj->sections != obj->sections_end
2031 && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
2032 return obj;
2033 }
2034
2035 return NULL;
2036}
2037
2038/* Flush cache for ea pointer access if available. */
2039static void
2040flush_ea_cache (void)
2041{
3b7344d5 2042 struct bound_minimal_symbol msymbol;
ff1a52c6
UW
2043 struct objfile *obj;
2044
2045 if (!has_stack_frames ())
2046 return;
2047
2048 obj = spu_objfile_from_frame (get_current_frame ());
2049 if (obj == NULL)
2050 return;
2051
2052 /* Lookup inferior function __cache_flush. */
2053 msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
3b7344d5 2054 if (msymbol.minsym != NULL)
ff1a52c6
UW
2055 {
2056 struct type *type;
2057 CORE_ADDR addr;
2058
2059 type = objfile_type (obj)->builtin_void;
2060 type = lookup_function_type (type);
2061 type = lookup_pointer_type (type);
77e371c0 2062 addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
ff1a52c6
UW
2063
2064 call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
2065 }
2066}
2067
2068/* This handler is called when the inferior has stopped. If it is stopped in
2069 SPU architecture then flush the ea cache if used. */
2070static void
2071spu_attach_normal_stop (struct bpstats *bs, int print_frame)
2072{
2073 if (!spu_auto_flush_cache_p)
2074 return;
2075
2076 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
2077 re-entering this function when __cache_flush stops. */
2078 spu_auto_flush_cache_p = 0;
2079 flush_ea_cache ();
2080 spu_auto_flush_cache_p = 1;
2081}
2082
2083
23d964e7
UW
2084/* "info spu" commands. */
2085
2086static void
2087info_spu_event_command (char *args, int from_tty)
2088{
2089 struct frame_info *frame = get_selected_frame (NULL);
2090 ULONGEST event_status = 0;
2091 ULONGEST event_mask = 0;
2092 struct cleanup *chain;
2093 gdb_byte buf[100];
2094 char annex[32];
2095 LONGEST len;
22e048c9 2096 int id;
23d964e7 2097
0391f248
UW
2098 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2099 error (_("\"info spu\" is only supported on the SPU architecture."));
2100
23d964e7
UW
2101 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2102
2103 xsnprintf (annex, sizeof annex, "%d/event_status", id);
2104 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
9971ac47 2105 buf, 0, (sizeof (buf) - 1));
23d964e7
UW
2106 if (len <= 0)
2107 error (_("Could not read event_status."));
9971ac47 2108 buf[len] = '\0';
001f13d8 2109 event_status = strtoulst ((char *) buf, NULL, 16);
23d964e7
UW
2110
2111 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
2112 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
9971ac47 2113 buf, 0, (sizeof (buf) - 1));
23d964e7
UW
2114 if (len <= 0)
2115 error (_("Could not read event_mask."));
9971ac47 2116 buf[len] = '\0';
001f13d8 2117 event_mask = strtoulst ((char *) buf, NULL, 16);
23d964e7 2118
31a0ae49 2119 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent");
23d964e7 2120
31a0ae49 2121 if (ui_out_is_mi_like_p (current_uiout))
23d964e7 2122 {
31a0ae49 2123 ui_out_field_fmt (current_uiout, "event_status",
23d964e7 2124 "0x%s", phex_nz (event_status, 4));
31a0ae49 2125 ui_out_field_fmt (current_uiout, "event_mask",
23d964e7
UW
2126 "0x%s", phex_nz (event_mask, 4));
2127 }
2128 else
2129 {
2130 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2131 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
2132 }
2133
2134 do_cleanups (chain);
2135}
2136
2137static void
2138info_spu_signal_command (char *args, int from_tty)
2139{
2140 struct frame_info *frame = get_selected_frame (NULL);
e17a4113
UW
2141 struct gdbarch *gdbarch = get_frame_arch (frame);
2142 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
23d964e7
UW
2143 ULONGEST signal1 = 0;
2144 ULONGEST signal1_type = 0;
2145 int signal1_pending = 0;
2146 ULONGEST signal2 = 0;
2147 ULONGEST signal2_type = 0;
2148 int signal2_pending = 0;
2149 struct cleanup *chain;
2150 char annex[32];
2151 gdb_byte buf[100];
2152 LONGEST len;
22e048c9 2153 int id;
23d964e7 2154
e17a4113 2155 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
0391f248
UW
2156 error (_("\"info spu\" is only supported on the SPU architecture."));
2157
23d964e7
UW
2158 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2159
2160 xsnprintf (annex, sizeof annex, "%d/signal1", id);
2161 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2162 if (len < 0)
2163 error (_("Could not read signal1."));
2164 else if (len == 4)
2165 {
e17a4113 2166 signal1 = extract_unsigned_integer (buf, 4, byte_order);
23d964e7
UW
2167 signal1_pending = 1;
2168 }
2169
2170 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2171 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
9971ac47 2172 buf, 0, (sizeof (buf) - 1));
23d964e7
UW
2173 if (len <= 0)
2174 error (_("Could not read signal1_type."));
9971ac47 2175 buf[len] = '\0';
001f13d8 2176 signal1_type = strtoulst ((char *) buf, NULL, 16);
23d964e7
UW
2177
2178 xsnprintf (annex, sizeof annex, "%d/signal2", id);
2179 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2180 if (len < 0)
2181 error (_("Could not read signal2."));
2182 else if (len == 4)
2183 {
e17a4113 2184 signal2 = extract_unsigned_integer (buf, 4, byte_order);
23d964e7
UW
2185 signal2_pending = 1;
2186 }
2187
2188 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2189 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
9971ac47 2190 buf, 0, (sizeof (buf) - 1));
23d964e7
UW
2191 if (len <= 0)
2192 error (_("Could not read signal2_type."));
9971ac47 2193 buf[len] = '\0';
001f13d8 2194 signal2_type = strtoulst ((char *) buf, NULL, 16);
23d964e7 2195
31a0ae49 2196 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal");
23d964e7 2197
31a0ae49 2198 if (ui_out_is_mi_like_p (current_uiout))
23d964e7 2199 {
31a0ae49
JK
2200 ui_out_field_int (current_uiout, "signal1_pending", signal1_pending);
2201 ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2202 ui_out_field_int (current_uiout, "signal1_type", signal1_type);
2203 ui_out_field_int (current_uiout, "signal2_pending", signal2_pending);
2204 ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2205 ui_out_field_int (current_uiout, "signal2_type", signal2_type);
23d964e7
UW
2206 }
2207 else
2208 {
2209 if (signal1_pending)
2210 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2211 else
2212 printf_filtered (_("Signal 1 not pending "));
2213
2214 if (signal1_type)
23d964e7 2215 printf_filtered (_("(Type Or)\n"));
b94c4f7d
UW
2216 else
2217 printf_filtered (_("(Type Overwrite)\n"));
23d964e7
UW
2218
2219 if (signal2_pending)
2220 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2221 else
2222 printf_filtered (_("Signal 2 not pending "));
2223
2224 if (signal2_type)
23d964e7 2225 printf_filtered (_("(Type Or)\n"));
b94c4f7d
UW
2226 else
2227 printf_filtered (_("(Type Overwrite)\n"));
23d964e7
UW
2228 }
2229
2230 do_cleanups (chain);
2231}
2232
2233static void
e17a4113 2234info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
23d964e7
UW
2235 const char *field, const char *msg)
2236{
2237 struct cleanup *chain;
2238 int i;
2239
2240 if (nr <= 0)
2241 return;
2242
31a0ae49 2243 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox");
23d964e7 2244
31a0ae49
JK
2245 ui_out_table_header (current_uiout, 32, ui_left, field, msg);
2246 ui_out_table_body (current_uiout);
23d964e7
UW
2247
2248 for (i = 0; i < nr; i++)
2249 {
2250 struct cleanup *val_chain;
2251 ULONGEST val;
31a0ae49 2252 val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox");
e17a4113 2253 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
31a0ae49 2254 ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4));
23d964e7
UW
2255 do_cleanups (val_chain);
2256
31a0ae49 2257 if (!ui_out_is_mi_like_p (current_uiout))
23d964e7
UW
2258 printf_filtered ("\n");
2259 }
2260
2261 do_cleanups (chain);
2262}
2263
2264static void
2265info_spu_mailbox_command (char *args, int from_tty)
2266{
2267 struct frame_info *frame = get_selected_frame (NULL);
e17a4113
UW
2268 struct gdbarch *gdbarch = get_frame_arch (frame);
2269 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
23d964e7
UW
2270 struct cleanup *chain;
2271 char annex[32];
2272 gdb_byte buf[1024];
2273 LONGEST len;
22e048c9 2274 int id;
23d964e7 2275
e17a4113 2276 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
0391f248
UW
2277 error (_("\"info spu\" is only supported on the SPU architecture."));
2278
23d964e7
UW
2279 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2280
31a0ae49 2281 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox");
23d964e7
UW
2282
2283 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2284 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2285 buf, 0, sizeof buf);
2286 if (len < 0)
2287 error (_("Could not read mbox_info."));
2288
e17a4113
UW
2289 info_spu_mailbox_list (buf, len / 4, byte_order,
2290 "mbox", "SPU Outbound Mailbox");
23d964e7
UW
2291
2292 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2293 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2294 buf, 0, sizeof buf);
2295 if (len < 0)
2296 error (_("Could not read ibox_info."));
2297
e17a4113
UW
2298 info_spu_mailbox_list (buf, len / 4, byte_order,
2299 "ibox", "SPU Outbound Interrupt Mailbox");
23d964e7
UW
2300
2301 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2302 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2303 buf, 0, sizeof buf);
2304 if (len < 0)
2305 error (_("Could not read wbox_info."));
2306
e17a4113
UW
2307 info_spu_mailbox_list (buf, len / 4, byte_order,
2308 "wbox", "SPU Inbound Mailbox");
23d964e7
UW
2309
2310 do_cleanups (chain);
2311}
2312
2313static ULONGEST
2314spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2315{
2316 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2317 return (word >> (63 - last)) & mask;
2318}
2319
2320static void
e17a4113 2321info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
23d964e7
UW
2322{
2323 static char *spu_mfc_opcode[256] =
2324 {
2325 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2326 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2327 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2328 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2329 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2330 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2331 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2332 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2333 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2334 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2335 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2336 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2337 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2338 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2339 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2340 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2341 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2342 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2343 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2344 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2345 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2346 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2347 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2348 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2349 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2350 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2351 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2352 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2353 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2354 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2355 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2356 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2357 };
2358
12ab8a60
UW
2359 int *seq = alloca (nr * sizeof (int));
2360 int done = 0;
23d964e7 2361 struct cleanup *chain;
12ab8a60
UW
2362 int i, j;
2363
2364
2365 /* Determine sequence in which to display (valid) entries. */
2366 for (i = 0; i < nr; i++)
2367 {
2368 /* Search for the first valid entry all of whose
2369 dependencies are met. */
2370 for (j = 0; j < nr; j++)
2371 {
2372 ULONGEST mfc_cq_dw3;
2373 ULONGEST dependencies;
2374
2375 if (done & (1 << (nr - 1 - j)))
2376 continue;
2377
e17a4113
UW
2378 mfc_cq_dw3
2379 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
12ab8a60
UW
2380 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2381 continue;
2382
2383 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2384 if ((dependencies & done) != dependencies)
2385 continue;
2386
2387 seq[i] = j;
2388 done |= 1 << (nr - 1 - j);
2389 break;
2390 }
2391
2392 if (j == nr)
2393 break;
2394 }
2395
2396 nr = i;
2397
23d964e7 2398
31a0ae49
JK
2399 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr,
2400 "dma_cmd");
23d964e7 2401
31a0ae49
JK
2402 ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode");
2403 ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag");
2404 ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId");
2405 ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId");
2406 ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA");
2407 ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA");
2408 ui_out_table_header (current_uiout, 7, ui_left, "size", "Size");
2409 ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr");
2410 ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize");
2411 ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E");
23d964e7 2412
31a0ae49 2413 ui_out_table_body (current_uiout);
23d964e7
UW
2414
2415 for (i = 0; i < nr; i++)
2416 {
2417 struct cleanup *cmd_chain;
2418 ULONGEST mfc_cq_dw0;
2419 ULONGEST mfc_cq_dw1;
2420 ULONGEST mfc_cq_dw2;
23d964e7 2421 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
22e048c9 2422 int list_lsa, list_size, mfc_lsa, mfc_size;
23d964e7
UW
2423 ULONGEST mfc_ea;
2424 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2425
2426 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2427 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2428
e17a4113
UW
2429 mfc_cq_dw0
2430 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2431 mfc_cq_dw1
2432 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2433 mfc_cq_dw2
2434 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
23d964e7
UW
2435
2436 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2437 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2438 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2439 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2440 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2441 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2442 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2443
2444 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2445 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2446
2447 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2448 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2449 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2450 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2451 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2452 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2453
31a0ae49 2454 cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd");
23d964e7
UW
2455
2456 if (spu_mfc_opcode[mfc_cmd_opcode])
31a0ae49 2457 ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
23d964e7 2458 else
31a0ae49 2459 ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode);
23d964e7 2460
31a0ae49
JK
2461 ui_out_field_int (current_uiout, "tag", mfc_cmd_tag);
2462 ui_out_field_int (current_uiout, "tid", tclass_id);
2463 ui_out_field_int (current_uiout, "rid", rclass_id);
23d964e7
UW
2464
2465 if (ea_valid_p)
31a0ae49 2466 ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8));
23d964e7 2467 else
31a0ae49 2468 ui_out_field_skip (current_uiout, "ea");
23d964e7 2469
31a0ae49 2470 ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4);
23d964e7 2471 if (qw_valid_p)
31a0ae49 2472 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4);
23d964e7 2473 else
31a0ae49 2474 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size);
23d964e7
UW
2475
2476 if (list_valid_p)
2477 {
31a0ae49
JK
2478 ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3);
2479 ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3);
23d964e7
UW
2480 }
2481 else
2482 {
31a0ae49
JK
2483 ui_out_field_skip (current_uiout, "lstaddr");
2484 ui_out_field_skip (current_uiout, "lstsize");
23d964e7
UW
2485 }
2486
2487 if (cmd_error_p)
31a0ae49 2488 ui_out_field_string (current_uiout, "error_p", "*");
23d964e7 2489 else
31a0ae49 2490 ui_out_field_skip (current_uiout, "error_p");
23d964e7
UW
2491
2492 do_cleanups (cmd_chain);
2493
31a0ae49 2494 if (!ui_out_is_mi_like_p (current_uiout))
23d964e7
UW
2495 printf_filtered ("\n");
2496 }
2497
2498 do_cleanups (chain);
2499}
2500
2501static void
2502info_spu_dma_command (char *args, int from_tty)
2503{
2504 struct frame_info *frame = get_selected_frame (NULL);
e17a4113
UW
2505 struct gdbarch *gdbarch = get_frame_arch (frame);
2506 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
23d964e7
UW
2507 ULONGEST dma_info_type;
2508 ULONGEST dma_info_mask;
2509 ULONGEST dma_info_status;
2510 ULONGEST dma_info_stall_and_notify;
2511 ULONGEST dma_info_atomic_command_status;
2512 struct cleanup *chain;
2513 char annex[32];
2514 gdb_byte buf[1024];
2515 LONGEST len;
22e048c9 2516 int id;
23d964e7 2517
0391f248
UW
2518 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2519 error (_("\"info spu\" is only supported on the SPU architecture."));
2520
23d964e7
UW
2521 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2522
2523 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2524 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2525 buf, 0, 40 + 16 * 32);
2526 if (len <= 0)
2527 error (_("Could not read dma_info."));
2528
e17a4113
UW
2529 dma_info_type
2530 = extract_unsigned_integer (buf, 8, byte_order);
2531 dma_info_mask
2532 = extract_unsigned_integer (buf + 8, 8, byte_order);
2533 dma_info_status
2534 = extract_unsigned_integer (buf + 16, 8, byte_order);
2535 dma_info_stall_and_notify
2536 = extract_unsigned_integer (buf + 24, 8, byte_order);
2537 dma_info_atomic_command_status
2538 = extract_unsigned_integer (buf + 32, 8, byte_order);
23d964e7 2539
31a0ae49 2540 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA");
23d964e7 2541
31a0ae49 2542 if (ui_out_is_mi_like_p (current_uiout))
23d964e7 2543 {
31a0ae49 2544 ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s",
23d964e7 2545 phex_nz (dma_info_type, 4));
31a0ae49 2546 ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s",
23d964e7 2547 phex_nz (dma_info_mask, 4));
31a0ae49 2548 ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s",
23d964e7 2549 phex_nz (dma_info_status, 4));
31a0ae49 2550 ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s",
23d964e7 2551 phex_nz (dma_info_stall_and_notify, 4));
31a0ae49 2552 ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s",
23d964e7
UW
2553 phex_nz (dma_info_atomic_command_status, 4));
2554 }
2555 else
2556 {
8fbde58b 2557 const char *query_msg = _("no query pending");
23d964e7 2558
8fbde58b
UW
2559 if (dma_info_type & 4)
2560 switch (dma_info_type & 3)
2561 {
2562 case 1: query_msg = _("'any' query pending"); break;
2563 case 2: query_msg = _("'all' query pending"); break;
2564 default: query_msg = _("undefined query type"); break;
2565 }
23d964e7
UW
2566
2567 printf_filtered (_("Tag-Group Status 0x%s\n"),
2568 phex (dma_info_status, 4));
2569 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2570 phex (dma_info_mask, 4), query_msg);
2571 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2572 phex (dma_info_stall_and_notify, 4));
2573 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2574 phex (dma_info_atomic_command_status, 4));
2575 printf_filtered ("\n");
2576 }
2577
e17a4113 2578 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
23d964e7
UW
2579 do_cleanups (chain);
2580}
2581
2582static void
2583info_spu_proxydma_command (char *args, int from_tty)
2584{
2585 struct frame_info *frame = get_selected_frame (NULL);
e17a4113
UW
2586 struct gdbarch *gdbarch = get_frame_arch (frame);
2587 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
23d964e7
UW
2588 ULONGEST dma_info_type;
2589 ULONGEST dma_info_mask;
2590 ULONGEST dma_info_status;
2591 struct cleanup *chain;
2592 char annex[32];
2593 gdb_byte buf[1024];
2594 LONGEST len;
22e048c9 2595 int id;
23d964e7 2596
e17a4113 2597 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
0391f248
UW
2598 error (_("\"info spu\" is only supported on the SPU architecture."));
2599
23d964e7
UW
2600 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2601
2602 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2603 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2604 buf, 0, 24 + 8 * 32);
2605 if (len <= 0)
2606 error (_("Could not read proxydma_info."));
2607
e17a4113
UW
2608 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2609 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2610 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
23d964e7 2611
31a0ae49
JK
2612 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout,
2613 "SPUInfoProxyDMA");
23d964e7 2614
31a0ae49 2615 if (ui_out_is_mi_like_p (current_uiout))
23d964e7 2616 {
31a0ae49 2617 ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s",
23d964e7 2618 phex_nz (dma_info_type, 4));
31a0ae49 2619 ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s",
23d964e7 2620 phex_nz (dma_info_mask, 4));
31a0ae49 2621 ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s",
23d964e7
UW
2622 phex_nz (dma_info_status, 4));
2623 }
2624 else
2625 {
2626 const char *query_msg;
2627
8fbde58b 2628 switch (dma_info_type & 3)
23d964e7
UW
2629 {
2630 case 0: query_msg = _("no query pending"); break;
2631 case 1: query_msg = _("'any' query pending"); break;
2632 case 2: query_msg = _("'all' query pending"); break;
2633 default: query_msg = _("undefined query type"); break;
2634 }
2635
2636 printf_filtered (_("Tag-Group Status 0x%s\n"),
2637 phex (dma_info_status, 4));
2638 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2639 phex (dma_info_mask, 4), query_msg);
2640 printf_filtered ("\n");
2641 }
2642
e17a4113 2643 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
23d964e7
UW
2644 do_cleanups (chain);
2645}
2646
2647static void
2648info_spu_command (char *args, int from_tty)
2649{
c378eb4e
MS
2650 printf_unfiltered (_("\"info spu\" must be followed by "
2651 "the name of an SPU facility.\n"));
635c7e8a 2652 help_list (infospucmdlist, "info spu ", all_commands, gdb_stdout);
23d964e7
UW
2653}
2654
2655
3285f3fe
UW
2656/* Root of all "set spu "/"show spu " commands. */
2657
2658static void
2659show_spu_command (char *args, int from_tty)
2660{
2661 help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2662}
2663
2664static void
2665set_spu_command (char *args, int from_tty)
2666{
2667 help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2668}
2669
2670static void
2671show_spu_stop_on_load (struct ui_file *file, int from_tty,
2672 struct cmd_list_element *c, const char *value)
2673{
2674 fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2675 value);
2676}
2677
ff1a52c6
UW
2678static void
2679show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2680 struct cmd_list_element *c, const char *value)
2681{
2682 fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2683 value);
2684}
2685
3285f3fe 2686
771b4502
UW
2687/* Set up gdbarch struct. */
2688
2689static struct gdbarch *
2690spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2691{
2692 struct gdbarch *gdbarch;
794ac428 2693 struct gdbarch_tdep *tdep;
85e747d2
UW
2694 int id = -1;
2695
2696 /* Which spufs ID was requested as address space? */
2697 if (info.tdep_info)
2698 id = *(int *)info.tdep_info;
2699 /* For objfile architectures of SPU solibs, decode the ID from the name.
2700 This assumes the filename convention employed by solib-spu.c. */
2701 else if (info.abfd)
2702 {
2703 char *name = strrchr (info.abfd->filename, '@');
2704 if (name)
2705 sscanf (name, "@0x%*x <%d>", &id);
2706 }
771b4502 2707
85e747d2
UW
2708 /* Find a candidate among extant architectures. */
2709 for (arches = gdbarch_list_lookup_by_info (arches, &info);
2710 arches != NULL;
2711 arches = gdbarch_list_lookup_by_info (arches->next, &info))
2712 {
2713 tdep = gdbarch_tdep (arches->gdbarch);
2714 if (tdep && tdep->id == id)
2715 return arches->gdbarch;
2716 }
771b4502 2717
85e747d2 2718 /* None found, so create a new architecture. */
fc270c35 2719 tdep = XCNEW (struct gdbarch_tdep);
85e747d2 2720 tdep->id = id;
794ac428 2721 gdbarch = gdbarch_alloc (&info, tdep);
771b4502
UW
2722
2723 /* Disassembler. */
85e747d2 2724 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
771b4502
UW
2725
2726 /* Registers. */
2727 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2728 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2729 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2730 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
118dfbaf
UW
2731 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2732 set_gdbarch_write_pc (gdbarch, spu_write_pc);
771b4502
UW
2733 set_gdbarch_register_name (gdbarch, spu_register_name);
2734 set_gdbarch_register_type (gdbarch, spu_register_type);
2735 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2736 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
9acbedc0 2737 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
771b4502 2738 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
7ce16bd4
UW
2739 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, spu_dwarf_reg_to_regnum);
2740 set_gdbarch_ax_pseudo_register_collect
2741 (gdbarch, spu_ax_pseudo_register_collect);
2742 set_gdbarch_ax_pseudo_register_push_stack
2743 (gdbarch, spu_ax_pseudo_register_push_stack);
771b4502
UW
2744
2745 /* Data types. */
2746 set_gdbarch_char_signed (gdbarch, 0);
2747 set_gdbarch_ptr_bit (gdbarch, 32);
2748 set_gdbarch_addr_bit (gdbarch, 32);
2749 set_gdbarch_short_bit (gdbarch, 16);
2750 set_gdbarch_int_bit (gdbarch, 32);
2751 set_gdbarch_long_bit (gdbarch, 32);
2752 set_gdbarch_long_long_bit (gdbarch, 64);
2753 set_gdbarch_float_bit (gdbarch, 32);
2754 set_gdbarch_double_bit (gdbarch, 64);
2755 set_gdbarch_long_double_bit (gdbarch, 64);
8da61cc4
DJ
2756 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2757 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2758 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
771b4502 2759
ff1a52c6 2760 /* Address handling. */
85e747d2 2761 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
36acd84e
UW
2762 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2763 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
ff1a52c6
UW
2764 set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2765 set_gdbarch_address_class_type_flags_to_name
2766 (gdbarch, spu_address_class_type_flags_to_name);
2767 set_gdbarch_address_class_name_to_type_flags
2768 (gdbarch, spu_address_class_name_to_type_flags);
2769
36acd84e 2770
771b4502 2771 /* Inferior function calls. */
7b3dc0b7
UW
2772 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2773 set_gdbarch_frame_align (gdbarch, spu_frame_align);
5141027d 2774 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
87805e63 2775 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
771b4502 2776 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
8d998b8f 2777 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
771b4502
UW
2778 set_gdbarch_return_value (gdbarch, spu_return_value);
2779
2780 /* Frame handling. */
2781 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
7ce16bd4 2782 dwarf2_append_unwinders (gdbarch);
8d998b8f 2783 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
771b4502
UW
2784 frame_base_set_default (gdbarch, &spu_frame_base);
2785 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2786 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2787 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2788 set_gdbarch_frame_args_skip (gdbarch, 0);
2789 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
fe5febed 2790 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
771b4502 2791
cc5f0d61
UW
2792 /* Cell/B.E. cross-architecture unwinder support. */
2793 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2794
771b4502
UW
2795 /* Breakpoints. */
2796 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2797 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
d03285ec 2798 set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
771b4502
UW
2799 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2800 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
6e3f70d7 2801 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
771b4502 2802
dcf52cd8
UW
2803 /* Overlays. */
2804 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2805
771b4502
UW
2806 return gdbarch;
2807}
2808
63807e1d
PA
2809/* Provide a prototype to silence -Wmissing-prototypes. */
2810extern initialize_file_ftype _initialize_spu_tdep;
2811
771b4502
UW
2812void
2813_initialize_spu_tdep (void)
2814{
2815 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
f2d43c2c 2816
dcf52cd8
UW
2817 /* Add ourselves to objfile event chain. */
2818 observer_attach_new_objfile (spu_overlay_new_objfile);
2819 spu_overlay_data = register_objfile_data ();
23d964e7 2820
3285f3fe
UW
2821 /* Install spu stop-on-load handler. */
2822 observer_attach_new_objfile (spu_catch_start);
2823
ff1a52c6
UW
2824 /* Add ourselves to normal_stop event chain. */
2825 observer_attach_normal_stop (spu_attach_normal_stop);
2826
3285f3fe
UW
2827 /* Add root prefix command for all "set spu"/"show spu" commands. */
2828 add_prefix_cmd ("spu", no_class, set_spu_command,
2829 _("Various SPU specific commands."),
2830 &setspucmdlist, "set spu ", 0, &setlist);
2831 add_prefix_cmd ("spu", no_class, show_spu_command,
2832 _("Various SPU specific commands."),
2833 &showspucmdlist, "show spu ", 0, &showlist);
2834
2835 /* Toggle whether or not to add a temporary breakpoint at the "main"
2836 function of new SPE contexts. */
2837 add_setshow_boolean_cmd ("stop-on-load", class_support,
2838 &spu_stop_on_load_p, _("\
2839Set whether to stop for new SPE threads."),
2840 _("\
2841Show whether to stop for new SPE threads."),
2842 _("\
2843Use \"on\" to give control to the user when a new SPE thread\n\
2844enters its \"main\" function.\n\
2845Use \"off\" to disable stopping for new SPE threads."),
2846 NULL,
2847 show_spu_stop_on_load,
2848 &setspucmdlist, &showspucmdlist);
2849
ff1a52c6
UW
2850 /* Toggle whether or not to automatically flush the software-managed
2851 cache whenever SPE execution stops. */
2852 add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2853 &spu_auto_flush_cache_p, _("\
2854Set whether to automatically flush the software-managed cache."),
2855 _("\
2856Show whether to automatically flush the software-managed cache."),
2857 _("\
2858Use \"on\" to automatically flush the software-managed cache\n\
2859whenever SPE execution stops.\n\
2860Use \"off\" to never automatically flush the software-managed cache."),
2861 NULL,
2862 show_spu_auto_flush_cache,
2863 &setspucmdlist, &showspucmdlist);
2864
23d964e7
UW
2865 /* Add root prefix command for all "info spu" commands. */
2866 add_prefix_cmd ("spu", class_info, info_spu_command,
2867 _("Various SPU specific commands."),
2868 &infospucmdlist, "info spu ", 0, &infolist);
2869
2870 /* Add various "info spu" commands. */
2871 add_cmd ("event", class_info, info_spu_event_command,
2872 _("Display SPU event facility status.\n"),
2873 &infospucmdlist);
2874 add_cmd ("signal", class_info, info_spu_signal_command,
2875 _("Display SPU signal notification facility status.\n"),
2876 &infospucmdlist);
2877 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2878 _("Display SPU mailbox facility status.\n"),
2879 &infospucmdlist);
2880 add_cmd ("dma", class_info, info_spu_dma_command,
2881 _("Display MFC DMA status.\n"),
2882 &infospucmdlist);
2883 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2884 _("Display MFC Proxy-DMA status.\n"),
2885 &infospucmdlist);
771b4502 2886}
This page took 0.984002 seconds and 4 git commands to generate.