+ target = target & (SPU_LS_SIZE - 1);
+ if (target != next_pc)
+ insert_single_step_breakpoint (target);
+ }
+
+ return 1;
+}
+
+/* Target overlays for the SPU overlay manager.
+
+ See the documentation of simple_overlay_update for how the
+ interface is supposed to work.
+
+ Data structures used by the overlay manager:
+
+ struct ovly_table
+ {
+ u32 vma;
+ u32 size;
+ u32 pos;
+ u32 buf;
+ } _ovly_table[]; -- one entry per overlay section
+
+ struct ovly_buf_table
+ {
+ u32 mapped;
+ } _ovly_buf_table[]; -- one entry per overlay buffer
+
+ _ovly_table should never change.
+
+ Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
+ and _ovly_buf_table are of type STT_OBJECT and their size set to the size
+ of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
+
+ mapped is an index into _ovly_table. Both the mapped and buf indices start
+ from one to reference the first entry in their respective tables. */
+
+/* Using the per-objfile private data mechanism, we store for each
+ objfile an array of "struct spu_overlay_table" structures, one
+ for each obj_section of the objfile. This structure holds two
+ fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
+ is *not* an overlay section. If it is non-zero, it represents
+ a target address. The overlay section is mapped iff the target
+ integer at this location equals MAPPED_VAL. */
+
+static const struct objfile_data *spu_overlay_data;
+
+struct spu_overlay_table
+ {
+ CORE_ADDR mapped_ptr;
+ CORE_ADDR mapped_val;
+ };
+
+/* Retrieve the overlay table for OBJFILE. If not already cached, read
+ the _ovly_table data structure from the target and initialize the
+ spu_overlay_table data structure from it. */
+static struct spu_overlay_table *
+spu_get_overlay_table (struct objfile *objfile)
+{
+ struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
+ CORE_ADDR ovly_table_base, ovly_buf_table_base;
+ unsigned ovly_table_size, ovly_buf_table_size;
+ struct spu_overlay_table *tbl;
+ struct obj_section *osect;
+ char *ovly_table;
+ int i;
+
+ tbl = objfile_data (objfile, spu_overlay_data);
+ if (tbl)
+ return tbl;
+
+ ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
+ if (!ovly_table_msym)
+ return NULL;
+
+ ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
+ if (!ovly_buf_table_msym)
+ return NULL;
+
+ ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
+ ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
+
+ ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
+ ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
+
+ ovly_table = xmalloc (ovly_table_size);
+ read_memory (ovly_table_base, ovly_table, ovly_table_size);
+
+ tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
+ objfile->sections_end - objfile->sections,
+ struct spu_overlay_table);
+
+ for (i = 0; i < ovly_table_size / 16; i++)
+ {
+ CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
+ CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
+ CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
+ CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
+
+ if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
+ continue;
+
+ ALL_OBJFILE_OSECTIONS (objfile, osect)
+ if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
+ && pos == osect->the_bfd_section->filepos)
+ {
+ int ndx = osect - objfile->sections;
+ tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
+ tbl[ndx].mapped_val = i + 1;
+ break;
+ }
+ }
+
+ xfree (ovly_table);
+ set_objfile_data (objfile, spu_overlay_data, tbl);
+ return tbl;
+}
+
+/* Read _ovly_buf_table entry from the target to dermine whether
+ OSECT is currently mapped, and update the mapped state. */
+static void
+spu_overlay_update_osect (struct obj_section *osect)
+{
+ struct spu_overlay_table *ovly_table;
+ CORE_ADDR val;
+
+ ovly_table = spu_get_overlay_table (osect->objfile);
+ if (!ovly_table)
+ return;
+
+ ovly_table += osect - osect->objfile->sections;
+ if (ovly_table->mapped_ptr == 0)
+ return;
+
+ val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
+ osect->ovly_mapped = (val == ovly_table->mapped_val);
+}
+
+/* If OSECT is NULL, then update all sections' mapped state.
+ If OSECT is non-NULL, then update only OSECT's mapped state. */
+static void
+spu_overlay_update (struct obj_section *osect)
+{
+ /* Just one section. */
+ if (osect)
+ spu_overlay_update_osect (osect);
+
+ /* All sections. */
+ else
+ {
+ struct objfile *objfile;
+
+ ALL_OBJSECTIONS (objfile, osect)
+ if (section_is_overlay (osect))
+ spu_overlay_update_osect (osect);
+ }
+}
+
+/* Whenever a new objfile is loaded, read the target's _ovly_table.
+ If there is one, go through all sections and make sure for non-
+ overlay sections LMA equals VMA, while for overlay sections LMA
+ is larger than local store size. */
+static void
+spu_overlay_new_objfile (struct objfile *objfile)
+{
+ struct spu_overlay_table *ovly_table;
+ struct obj_section *osect;
+
+ /* If we've already touched this file, do nothing. */
+ if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
+ return;
+
+ /* Consider only SPU objfiles. */
+ if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
+ return;
+
+ /* Check if this objfile has overlays. */
+ ovly_table = spu_get_overlay_table (objfile);
+ if (!ovly_table)
+ return;
+
+ /* Now go and fiddle with all the LMAs. */
+ ALL_OBJFILE_OSECTIONS (objfile, osect)
+ {
+ bfd *obfd = objfile->obfd;
+ asection *bsect = osect->the_bfd_section;
+ int ndx = osect - objfile->sections;
+
+ if (ovly_table[ndx].mapped_ptr == 0)
+ bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
+ else
+ bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
+ }
+}
+
+
+/* "info spu" commands. */
+
+static void
+info_spu_event_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ ULONGEST event_status = 0;
+ ULONGEST event_mask = 0;
+ struct cleanup *chain;
+ gdb_byte buf[100];
+ char annex[32];
+ LONGEST len;
+ int rc, id;
+
+ if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ xsnprintf (annex, sizeof annex, "%d/event_status", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read event_status."));
+ buf[len] = '\0';
+ event_status = strtoulst (buf, NULL, 16);
+
+ xsnprintf (annex, sizeof annex, "%d/event_mask", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read event_mask."));
+ buf[len] = '\0';
+ event_mask = strtoulst (buf, NULL, 16);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
+
+ if (ui_out_is_mi_like_p (uiout))
+ {
+ ui_out_field_fmt (uiout, "event_status",
+ "0x%s", phex_nz (event_status, 4));
+ ui_out_field_fmt (uiout, "event_mask",
+ "0x%s", phex_nz (event_mask, 4));
+ }
+ else
+ {
+ printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
+ printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
+ }
+
+ do_cleanups (chain);
+}
+
+static void
+info_spu_signal_command (char *args, int from_tty)
+{
+ struct frame_info *frame = get_selected_frame (NULL);
+ ULONGEST signal1 = 0;
+ ULONGEST signal1_type = 0;
+ int signal1_pending = 0;
+ ULONGEST signal2 = 0;
+ ULONGEST signal2_type = 0;
+ int signal2_pending = 0;
+ struct cleanup *chain;
+ char annex[32];
+ gdb_byte buf[100];
+ LONGEST len;
+ int rc, id;
+
+ if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
+ error (_("\"info spu\" is only supported on the SPU architecture."));
+
+ id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
+
+ xsnprintf (annex, sizeof annex, "%d/signal1", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
+ if (len < 0)
+ error (_("Could not read signal1."));
+ else if (len == 4)
+ {
+ signal1 = extract_unsigned_integer (buf, 4);
+ signal1_pending = 1;
+ }
+
+ xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read signal1_type."));
+ buf[len] = '\0';
+ signal1_type = strtoulst (buf, NULL, 16);
+
+ xsnprintf (annex, sizeof annex, "%d/signal2", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
+ if (len < 0)
+ error (_("Could not read signal2."));
+ else if (len == 4)
+ {
+ signal2 = extract_unsigned_integer (buf, 4);
+ signal2_pending = 1;
+ }
+
+ xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
+ len = target_read (¤t_target, TARGET_OBJECT_SPU, annex,
+ buf, 0, (sizeof (buf) - 1));
+ if (len <= 0)
+ error (_("Could not read signal2_type."));
+ buf[len] = '\0';
+ signal2_type = strtoulst (buf, NULL, 16);
+
+ chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
+
+ if (ui_out_is_mi_like_p (uiout))
+ {
+ ui_out_field_int (uiout, "signal1_pending", signal1_pending);
+ ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
+ ui_out_field_int (uiout, "signal1_type", signal1_type);
+ ui_out_field_int (uiout, "signal2_pending", signal2_pending);
+ ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
+ ui_out_field_int (uiout, "signal2_type", signal2_type);
+ }
+ else
+ {
+ if (signal1_pending)
+ printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
+ else
+ printf_filtered (_("Signal 1 not pending "));
+
+ if (signal1_type)
+ printf_filtered (_("(Type Or)\n"));
+ else
+ printf_filtered (_("(Type Overwrite)\n"));
+
+ if (signal2_pending)
+ printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
+ else
+ printf_filtered (_("Signal 2 not pending "));