+ ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
+ NULL, objfile);
+ if (!ovly_buf_table_msym)
+ return NULL;
+
+ ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
+ ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
+
+ ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
+ ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
+
+ ovly_table = xmalloc (ovly_table_size);
+ read_memory (ovly_table_base, ovly_table, ovly_table_size);
+
+ tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
+ objfile->sections_end - objfile->sections,
+ struct spu_overlay_table);
+
+ for (i = 0; i < ovly_table_size / 16; i++)
+ {
+ CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
+ 4, byte_order);
+ CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
+ 4, byte_order);
+ CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
+ 4, byte_order);
+ CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
+ 4, byte_order);
+
+ if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
+ continue;
+
+ ALL_OBJFILE_OSECTIONS (objfile, osect)
+ if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
+ && pos == osect->the_bfd_section->filepos)
+ {
+ int ndx = osect - objfile->sections;
+ tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
+ tbl[ndx].mapped_val = i + 1;
+ break;
+ }
+ }
+
+ xfree (ovly_table);
+ set_objfile_data (objfile, spu_overlay_data, tbl);
+ return tbl;
+}
+
+/* Read _ovly_buf_table entry from the target to dermine whether
+ OSECT is currently mapped, and update the mapped state. */
+static void
+spu_overlay_update_osect (struct obj_section *osect)
+{
+ enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
+ BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
+ struct spu_overlay_table *ovly_table;
+ CORE_ADDR id, val;
+
+ ovly_table = spu_get_overlay_table (osect->objfile);
+ if (!ovly_table)
+ return;
+
+ ovly_table += osect - osect->objfile->sections;
+ if (ovly_table->mapped_ptr == 0)
+ return;
+
+ id = SPUADDR_SPU (obj_section_addr (osect));
+ val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
+ 4, byte_order);
+ osect->ovly_mapped = (val == ovly_table->mapped_val);
+}
+
+/* If OSECT is NULL, then update all sections' mapped state.
+ If OSECT is non-NULL, then update only OSECT's mapped state. */
+static void
+spu_overlay_update (struct obj_section *osect)
+{
+ /* Just one section. */
+ if (osect)
+ spu_overlay_update_osect (osect);
+
+ /* All sections. */
+ else
+ {
+ struct objfile *objfile;
+
+ ALL_OBJSECTIONS (objfile, osect)
+ if (section_is_overlay (osect))
+ spu_overlay_update_osect (osect);
+ }
+}
+
+/* Whenever a new objfile is loaded, read the target's _ovly_table.
+ If there is one, go through all sections and make sure for non-
+ overlay sections LMA equals VMA, while for overlay sections LMA
+ is larger than SPU_OVERLAY_LMA. */
+static void
+spu_overlay_new_objfile (struct objfile *objfile)
+{
+ struct spu_overlay_table *ovly_table;
+ struct obj_section *osect;
+
+ /* If we've already touched this file, do nothing. */
+ if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
+ return;
+
+ /* Consider only SPU objfiles. */
+ if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
+ return;
+
+ /* Check if this objfile has overlays. */
+ ovly_table = spu_get_overlay_table (objfile);
+ if (!ovly_table)
+ return;
+
+ /* Now go and fiddle with all the LMAs. */
+ ALL_OBJFILE_OSECTIONS (objfile, osect)
+ {
+ bfd *obfd = objfile->obfd;
+ asection *bsect = osect->the_bfd_section;
+ int ndx = osect - objfile->sections;
+
+ if (ovly_table[ndx].mapped_ptr == 0)
+ bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
+ else
+ bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
+ }
+}
+
+
+/* Insert temporary breakpoint on "main" function of newly loaded
+ SPE context OBJFILE. */
+static void
+spu_catch_start (struct objfile *objfile)
+{
+ struct minimal_symbol *minsym;
+ struct symtab *symtab;
+ CORE_ADDR pc;
+ char buf[32];
+
+ /* Do this only if requested by "set spu stop-on-load on". */
+ if (!spu_stop_on_load_p)
+ return;
+
+ /* Consider only SPU objfiles. */
+ if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
+ return;
+
+ /* The main objfile is handled differently. */
+ if (objfile == symfile_objfile)
+ return;
+
+ /* There can be multiple symbols named "main". Search for the
+ "main" in *this* objfile. */
+ minsym = lookup_minimal_symbol ("main", NULL, objfile);
+ if (!minsym)
+ return;
+
+ /* If we have debugging information, try to use it -- this
+ will allow us to properly skip the prologue. */
+ pc = SYMBOL_VALUE_ADDRESS (minsym);
+ symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (objfile, minsym));
+ if (symtab != NULL)
+ {
+ struct blockvector *bv = BLOCKVECTOR (symtab);
+ struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
+ struct symbol *sym;
+ struct symtab_and_line sal;
+
+ sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
+ if (sym)
+ {
+ fixup_symbol_section (sym, objfile);
+ sal = find_function_start_sal (sym, 1);
+ pc = sal.pc;
+ }
+ }
+
+ /* Use a numerical address for the set_breakpoint command to avoid having
+ the breakpoint re-set incorrectly. */
+ xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
+ create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
+ NULL /* cond_string */, -1 /* thread */,
+ NULL /* extra_string */,
+ 0 /* parse_condition_and_thread */, 1 /* tempflag */,
+ bp_breakpoint /* type_wanted */,
+ 0 /* ignore_count */,
+ AUTO_BOOLEAN_FALSE /* pending_break_support */,
+ &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */,
+ 1 /* enabled */, 0 /* internal */, 0);
+}
+
+
+/* Look up OBJFILE loaded into FRAME's SPU context. */
+static struct objfile *
+spu_objfile_from_frame (struct frame_info *frame)
+{
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ struct objfile *obj;
+
+ if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
+ return NULL;
+
+ ALL_OBJFILES (obj)
+ {
+ if (obj->sections != obj->sections_end
+ && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
+ return obj;
+ }
+
+ return NULL;
+}
+
+/* Flush cache for ea pointer access if available. */
+static void
+flush_ea_cache (void)
+{
+ struct minimal_symbol *msymbol;
+ struct objfile *obj;
+
+ if (!has_stack_frames ())
+ return;
+
+ obj = spu_objfile_from_frame (get_current_frame ());
+ if (obj == NULL)
+ return;
+
+ /* Lookup inferior function __cache_flush. */
+ msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
+ if (msymbol != NULL)
+ {
+ struct type *type;
+ CORE_ADDR addr;
+
+ type = objfile_type (obj)->builtin_void;
+ type = lookup_function_type (type);
+ type = lookup_pointer_type (type);
+ addr = SYMBOL_VALUE_ADDRESS (msymbol);
+
+ call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
+ }
+}
+
+/* This handler is called when the inferior has stopped. If it is stopped in
+ SPU architecture then flush the ea cache if used. */
+static void
+spu_attach_normal_stop (struct bpstats *bs, int print_frame)
+{
+ if (!spu_auto_flush_cache_p)
+ return;
+
+ /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
+ re-entering this function when __cache_flush stops. */
+ spu_auto_flush_cache_p = 0;
+ flush_ea_cache ();
+ spu_auto_flush_cache_p = 1;
+}
+
+
+/* "info spu" commands. */
+
+static void
+info_spu_event_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ ULONGEST event_status = 0;
+ ULONGEST event_mask = 0;
+ struct cleanup *chain;
+ gdb_byte buf[100];
+ char annex[32];
+ LONGEST len;
+ int id;
+
+ if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ xsnprintf (annex, sizeof annex, "%d/event_status", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read event_status."));
+ buf[len] = '\0';
+ event_status = strtoulst ((char *) buf, NULL, 16);
+
+ xsnprintf (annex, sizeof annex, "%d/event_mask", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read event_mask."));
+ buf[len] = '\0';
+ event_mask = strtoulst ((char *) buf, NULL, 16);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent");
+
+ if (ui_out_is_mi_like_p (current_uiout))
+ {
+ ui_out_field_fmt (current_uiout, "event_status",
+ "0x%s", phex_nz (event_status, 4));
+ ui_out_field_fmt (current_uiout, "event_mask",
+ "0x%s", phex_nz (event_mask, 4));
+ }
+ else
+ {
+ printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
+ printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
+ }
+
+ do_cleanups (chain);
+}
+
+static void
+info_spu_signal_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ ULONGEST signal1 = 0;
+ ULONGEST signal1_type = 0;
+ int signal1_pending = 0;
+ ULONGEST signal2 = 0;
+ ULONGEST signal2_type = 0;
+ int signal2_pending = 0;
+ struct cleanup *chain;
+ char annex[32];
+ gdb_byte buf[100];
+ LONGEST len;
+ int id;
+
+ if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ xsnprintf (annex, sizeof annex, "%d/signal1", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
+ if (len < 0)
+ error (_("Could not read signal1."));
+ else if (len == 4)
+ {
+ signal1 = extract_unsigned_integer (buf, 4, byte_order);
+ signal1_pending = 1;
+ }
+
+ xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read signal1_type."));
+ buf[len] = '\0';
+ signal1_type = strtoulst ((char *) buf, NULL, 16);
+
+ xsnprintf (annex, sizeof annex, "%d/signal2", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
+ if (len < 0)
+ error (_("Could not read signal2."));
+ else if (len == 4)
+ {
+ signal2 = extract_unsigned_integer (buf, 4, byte_order);
+ signal2_pending = 1;
+ }
+
+ xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read signal2_type."));
+ buf[len] = '\0';
+ signal2_type = strtoulst ((char *) buf, NULL, 16);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal");
+
+ if (ui_out_is_mi_like_p (current_uiout))
+ {
+ ui_out_field_int (current_uiout, "signal1_pending", signal1_pending);
+ ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4));
+ ui_out_field_int (current_uiout, "signal1_type", signal1_type);
+ ui_out_field_int (current_uiout, "signal2_pending", signal2_pending);
+ ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4));
+ ui_out_field_int (current_uiout, "signal2_type", signal2_type);
+ }
+ else
+ {
+ if (signal1_pending)
+ printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
+ else
+ printf_filtered (_("Signal 1 not pending "));
+
+ if (signal1_type)
+ printf_filtered (_("(Type Or)\n"));
+ else
+ printf_filtered (_("(Type Overwrite)\n"));
+
+ if (signal2_pending)
+ printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
+ else
+ printf_filtered (_("Signal 2 not pending "));
+
+ if (signal2_type)
+ printf_filtered (_("(Type Or)\n"));
+ else
+ printf_filtered (_("(Type Overwrite)\n"));
+ }
+
+ do_cleanups (chain);
+}
+
+static void
+info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
+ const char *field, const char *msg)
+{
+ struct cleanup *chain;
+ int i;
+
+ if (nr <= 0)
+ return;
+
+ chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox");
+
+ ui_out_table_header (current_uiout, 32, ui_left, field, msg);
+ ui_out_table_body (current_uiout);
+
+ for (i = 0; i < nr; i++)
+ {
+ struct cleanup *val_chain;
+ ULONGEST val;
+ val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox");
+ val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
+ ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4));
+ do_cleanups (val_chain);
+
+ if (!ui_out_is_mi_like_p (current_uiout))
+ printf_filtered ("\n");
+ }
+
+ do_cleanups (chain);
+}
+
+static void
+info_spu_mailbox_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ struct cleanup *chain;
+ char annex[32];
+ gdb_byte buf[1024];
+ LONGEST len;
+ int id;
+
+ if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox");
+
+ xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, sizeof buf);
+ if (len < 0)
+ error (_("Could not read mbox_info."));
+
+ info_spu_mailbox_list (buf, len / 4, byte_order,
+ "mbox", "SPU Outbound Mailbox");
+
+ xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, sizeof buf);
+ if (len < 0)
+ error (_("Could not read ibox_info."));
+
+ info_spu_mailbox_list (buf, len / 4, byte_order,
+ "ibox", "SPU Outbound Interrupt Mailbox");
+
+ xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, sizeof buf);
+ if (len < 0)
+ error (_("Could not read wbox_info."));
+
+ info_spu_mailbox_list (buf, len / 4, byte_order,
+ "wbox", "SPU Inbound Mailbox");
+
+ do_cleanups (chain);
+}
+
+static ULONGEST
+spu_mfc_get_bitfield (ULONGEST word, int first, int last)
+{
+ ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
+ return (word >> (63 - last)) & mask;
+}
+
+static void
+info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
+{
+ static char *spu_mfc_opcode[256] =
+ {
+ /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
+ "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
+ /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
+ "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
+ /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
+ /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
+ "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
+ /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ };
+
+ int *seq = alloca (nr * sizeof (int));
+ int done = 0;
+ struct cleanup *chain;
+ int i, j;
+
+
+ /* Determine sequence in which to display (valid) entries. */
+ for (i = 0; i < nr; i++)
+ {
+ /* Search for the first valid entry all of whose
+ dependencies are met. */
+ for (j = 0; j < nr; j++)
+ {
+ ULONGEST mfc_cq_dw3;
+ ULONGEST dependencies;
+
+ if (done & (1 << (nr - 1 - j)))
+ continue;
+
+ mfc_cq_dw3
+ = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
+ if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
+ continue;
+
+ dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
+ if ((dependencies & done) != dependencies)
+ continue;
+
+ seq[i] = j;
+ done |= 1 << (nr - 1 - j);
+ break;
+ }
+
+ if (j == nr)
+ break;
+ }
+
+ nr = i;
+
+
+ chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr,
+ "dma_cmd");
+
+ ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode");
+ ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag");
+ ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId");
+ ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId");
+ ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA");
+ ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA");
+ ui_out_table_header (current_uiout, 7, ui_left, "size", "Size");
+ ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr");
+ ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize");
+ ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E");
+
+ ui_out_table_body (current_uiout);
+
+ for (i = 0; i < nr; i++)
+ {
+ struct cleanup *cmd_chain;
+ ULONGEST mfc_cq_dw0;
+ ULONGEST mfc_cq_dw1;
+ ULONGEST mfc_cq_dw2;
+ int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
+ int list_lsa, list_size, mfc_lsa, mfc_size;
+ ULONGEST mfc_ea;
+ int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
+
+ /* Decode contents of MFC Command Queue Context Save/Restore Registers.
+ See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
+
+ mfc_cq_dw0
+ = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
+ mfc_cq_dw1
+ = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
+ mfc_cq_dw2
+ = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
+
+ list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
+ list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
+ mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
+ mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
+ list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
+ rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
+ tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
+
+ mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
+ | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
+
+ mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
+ mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
+ noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
+ qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
+ ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
+ cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
+
+ cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd");
+
+ if (spu_mfc_opcode[mfc_cmd_opcode])
+ ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
+ else
+ ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode);
+
+ ui_out_field_int (current_uiout, "tag", mfc_cmd_tag);
+ ui_out_field_int (current_uiout, "tid", tclass_id);
+ ui_out_field_int (current_uiout, "rid", rclass_id);
+
+ if (ea_valid_p)
+ ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8));
+ else
+ ui_out_field_skip (current_uiout, "ea");
+
+ ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4);
+ if (qw_valid_p)
+ ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4);
+ else
+ ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size);
+
+ if (list_valid_p)
+ {
+ ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3);
+ ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3);
+ }
+ else
+ {
+ ui_out_field_skip (current_uiout, "lstaddr");
+ ui_out_field_skip (current_uiout, "lstsize");
+ }
+
+ if (cmd_error_p)
+ ui_out_field_string (current_uiout, "error_p", "*");
+ else
+ ui_out_field_skip (current_uiout, "error_p");
+
+ do_cleanups (cmd_chain);
+
+ if (!ui_out_is_mi_like_p (current_uiout))
+ printf_filtered ("\n");
+ }
+
+ do_cleanups (chain);
+}
+
+static void
+info_spu_dma_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ ULONGEST dma_info_type;
+ ULONGEST dma_info_mask;
+ ULONGEST dma_info_status;
+ ULONGEST dma_info_stall_and_notify;
+ ULONGEST dma_info_atomic_command_status;
+ struct cleanup *chain;
+ char annex[32];
+ gdb_byte buf[1024];
+ LONGEST len;
+ int id;
+
+ if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ xsnprintf (annex, sizeof annex, "%d/dma_info", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, 40 + 16 * 32);
+ if (len <= 0)
+ error (_("Could not read dma_info."));
+
+ dma_info_type
+ = extract_unsigned_integer (buf, 8, byte_order);
+ dma_info_mask
+ = extract_unsigned_integer (buf + 8, 8, byte_order);
+ dma_info_status
+ = extract_unsigned_integer (buf + 16, 8, byte_order);
+ dma_info_stall_and_notify
+ = extract_unsigned_integer (buf + 24, 8, byte_order);
+ dma_info_atomic_command_status
+ = extract_unsigned_integer (buf + 32, 8, byte_order);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA");
+
+ if (ui_out_is_mi_like_p (current_uiout))
+ {
+ ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s",
+ phex_nz (dma_info_type, 4));
+ ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s",
+ phex_nz (dma_info_mask, 4));
+ ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s",
+ phex_nz (dma_info_status, 4));
+ ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s",
+ phex_nz (dma_info_stall_and_notify, 4));
+ ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s",
+ phex_nz (dma_info_atomic_command_status, 4));
+ }
+ else
+ {
+ const char *query_msg = _("no query pending");
+
+ if (dma_info_type & 4)
+ switch (dma_info_type & 3)
+ {
+ case 1: query_msg = _("'any' query pending"); break;
+ case 2: query_msg = _("'all' query pending"); break;
+ default: query_msg = _("undefined query type"); break;
+ }
+
+ printf_filtered (_("Tag-Group Status 0x%s\n"),
+ phex (dma_info_status, 4));
+ printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
+ phex (dma_info_mask, 4), query_msg);
+ printf_filtered (_("Stall-and-Notify 0x%s\n"),
+ phex (dma_info_stall_and_notify, 4));
+ printf_filtered (_("Atomic Cmd Status 0x%s\n"),
+ phex (dma_info_atomic_command_status, 4));
+ printf_filtered ("\n");
+ }
+
+ info_spu_dma_cmdlist (buf + 40, 16, byte_order);
+ do_cleanups (chain);
+}
+
+static void
+info_spu_proxydma_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ ULONGEST dma_info_type;
+ ULONGEST dma_info_mask;
+ ULONGEST dma_info_status;
+ struct cleanup *chain;
+ char annex[32];
+ gdb_byte buf[1024];
+ LONGEST len;
+ int id;
+
+ if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, 24 + 8 * 32);
+ if (len <= 0)
+ error (_("Could not read proxydma_info."));
+
+ dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
+ dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
+ dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (current_uiout,
+ "SPUInfoProxyDMA");
+
+ if (ui_out_is_mi_like_p (current_uiout))
+ {
+ ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s",
+ phex_nz (dma_info_type, 4));
+ ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s",
+ phex_nz (dma_info_mask, 4));
+ ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s",
+ phex_nz (dma_info_status, 4));
+ }
+ else
+ {
+ const char *query_msg;
+
+ switch (dma_info_type & 3)
+ {
+ case 0: query_msg = _("no query pending"); break;
+ case 1: query_msg = _("'any' query pending"); break;
+ case 2: query_msg = _("'all' query pending"); break;
+ default: query_msg = _("undefined query type"); break;
+ }
+
+ printf_filtered (_("Tag-Group Status 0x%s\n"),
+ phex (dma_info_status, 4));
+ printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
+ phex (dma_info_mask, 4), query_msg);
+ printf_filtered ("\n");
+ }
+
+ info_spu_dma_cmdlist (buf + 24, 8, byte_order);
+ do_cleanups (chain);
+}
+
+static void
+info_spu_command (char *args, int from_tty)
+{
+ printf_unfiltered (_("\"info spu\" must be followed by "
+ "the name of an SPU facility.\n"));
+ help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
+}
+
+
+/* Root of all "set spu "/"show spu " commands. */
+
+static void
+show_spu_command (char *args, int from_tty)
+{
+ help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
+}
+
+static void
+set_spu_command (char *args, int from_tty)
+{
+ help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
+}
+
+static void
+show_spu_stop_on_load (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+ fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
+ value);
+}
+
+static void
+show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+ fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
+ value);
+}
+
+
+/* Set up gdbarch struct. */
+
+static struct gdbarch *
+spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
+{
+ struct gdbarch *gdbarch;
+ struct gdbarch_tdep *tdep;
+ int id = -1;
+
+ /* Which spufs ID was requested as address space? */
+ if (info.tdep_info)
+ id = *(int *)info.tdep_info;
+ /* For objfile architectures of SPU solibs, decode the ID from the name.
+ This assumes the filename convention employed by solib-spu.c. */
+ else if (info.abfd)
+ {
+ char *name = strrchr (info.abfd->filename, '@');
+ if (name)
+ sscanf (name, "@0x%*x <%d>", &id);
+ }
+
+ /* Find a candidate among extant architectures. */
+ for (arches = gdbarch_list_lookup_by_info (arches, &info);
+ arches != NULL;
+ arches = gdbarch_list_lookup_by_info (arches->next, &info))
+ {
+ tdep = gdbarch_tdep (arches->gdbarch);
+ if (tdep && tdep->id == id)
+ return arches->gdbarch;
+ }
+
+ /* None found, so create a new architecture. */
+ tdep = XCALLOC (1, struct gdbarch_tdep);
+ tdep->id = id;
+ gdbarch = gdbarch_alloc (&info, tdep);