Return gdbpy_ref from gdb_py_object_from_*longest
[deliverable/binutils-gdb.git] / gdb / spu-multiarch.c
1 /* Cell SPU GNU/Linux multi-architecture debugging support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdbcore.h"
23 #include "gdbcmd.h"
24 #include "arch-utils.h"
25 #include "observable.h"
26 #include "inferior.h"
27 #include "regcache.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "solib.h"
31 #include "solist.h"
32
33 #include "ppc-tdep.h"
34 #include "ppc-linux-tdep.h"
35 #include "spu-tdep.h"
36
37 /* The SPU multi-architecture support target. */
38
39 static const target_info spu_multiarch_target_info = {
40 "spu",
41 N_("SPU multi-architecture support."),
42 N_("SPU multi-architecture support.")
43 };
44
45 struct spu_multiarch_target final : public target_ops
46 {
47 spu_multiarch_target ()
48 { to_stratum = arch_stratum; };
49
50 const target_info &info () const override
51 { return spu_multiarch_target_info; }
52
53 void mourn_inferior () override;
54
55 void fetch_registers (struct regcache *, int) override;
56 void store_registers (struct regcache *, int) override;
57
58 enum target_xfer_status xfer_partial (enum target_object object,
59 const char *annex,
60 gdb_byte *readbuf,
61 const gdb_byte *writebuf,
62 ULONGEST offset, ULONGEST len,
63 ULONGEST *xfered_len) override;
64
65 int search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
66 const gdb_byte *pattern, ULONGEST pattern_len,
67 CORE_ADDR *found_addrp) override;
68
69 int region_ok_for_hw_watchpoint (CORE_ADDR, int) override;
70
71 struct gdbarch *thread_architecture (ptid_t) override;
72 };
73
74 static spu_multiarch_target spu_ops;
75
76 /* Number of SPE objects loaded into the current inferior. */
77 static int spu_nr_solib;
78
79 /* Stand-alone SPE executable? */
80 #define spu_standalone_p() \
81 (symfile_objfile && symfile_objfile->obfd \
82 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu)
83
84 /* PPU side system calls. */
85 #define INSTR_SC 0x44000002
86 #define NR_spu_run 0x0116
87
88 /* If the PPU thread is currently stopped on a spu_run system call,
89 return to FD and ADDR the file handle and NPC parameter address
90 used with the system call. Return non-zero if successful. */
91 static int
92 parse_spufs_run (ptid_t ptid, int *fd, CORE_ADDR *addr)
93 {
94 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
95 struct gdbarch_tdep *tdep;
96 struct regcache *regcache;
97 gdb_byte buf[4];
98 ULONGEST regval;
99
100 /* If we're not on PPU, there's nothing to detect. */
101 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_powerpc)
102 return 0;
103
104 /* If we're called too early (e.g. after fork), we cannot
105 access the inferior yet. */
106 if (find_inferior_ptid (ptid) == NULL)
107 return 0;
108
109 /* Get PPU-side registers. */
110 regcache = get_thread_arch_regcache (ptid, target_gdbarch ());
111 tdep = gdbarch_tdep (target_gdbarch ());
112
113 /* Fetch instruction preceding current NIP. */
114 {
115 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
116 inferior_ptid = ptid;
117 regval = target_read_memory (regcache_read_pc (regcache) - 4, buf, 4);
118 }
119 if (regval != 0)
120 return 0;
121 /* It should be a "sc" instruction. */
122 if (extract_unsigned_integer (buf, 4, byte_order) != INSTR_SC)
123 return 0;
124 /* System call number should be NR_spu_run. */
125 regcache_cooked_read_unsigned (regcache, tdep->ppc_gp0_regnum, &regval);
126 if (regval != NR_spu_run)
127 return 0;
128
129 /* Register 3 contains fd, register 4 the NPC param pointer. */
130 regcache_cooked_read_unsigned (regcache, PPC_ORIG_R3_REGNUM, &regval);
131 *fd = (int) regval;
132 regcache_cooked_read_unsigned (regcache, tdep->ppc_gp0_regnum + 4, &regval);
133 *addr = (CORE_ADDR) regval;
134 return 1;
135 }
136
137 /* Find gdbarch for SPU context SPUFS_FD. */
138 static struct gdbarch *
139 spu_gdbarch (int spufs_fd)
140 {
141 struct gdbarch_info info;
142 gdbarch_info_init (&info);
143 info.bfd_arch_info = bfd_lookup_arch (bfd_arch_spu, bfd_mach_spu);
144 info.byte_order = BFD_ENDIAN_BIG;
145 info.osabi = GDB_OSABI_LINUX;
146 info.id = &spufs_fd;
147 return gdbarch_find_by_info (info);
148 }
149
150 /* Override the to_thread_architecture routine. */
151 struct gdbarch *
152 spu_multiarch_target::thread_architecture (ptid_t ptid)
153 {
154 int spufs_fd;
155 CORE_ADDR spufs_addr;
156
157 if (parse_spufs_run (ptid, &spufs_fd, &spufs_addr))
158 return spu_gdbarch (spufs_fd);
159
160 return beneath ()->thread_architecture (ptid);
161 }
162
163 /* Override the to_region_ok_for_hw_watchpoint routine. */
164
165 int
166 spu_multiarch_target::region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
167 {
168 /* We cannot watch SPU local store. */
169 if (SPUADDR_SPU (addr) != -1)
170 return 0;
171
172 return beneath ()->region_ok_for_hw_watchpoint (addr, len);
173 }
174
175 /* Override the to_fetch_registers routine. */
176
177 void
178 spu_multiarch_target::fetch_registers (struct regcache *regcache, int regno)
179 {
180 struct gdbarch *gdbarch = regcache->arch ();
181 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
182 int spufs_fd;
183 CORE_ADDR spufs_addr;
184
185 /* Since we use functions that rely on inferior_ptid, we need to set and
186 restore it. */
187 scoped_restore save_ptid
188 = make_scoped_restore (&inferior_ptid, regcache->ptid ());
189
190 /* This version applies only if we're currently in spu_run. */
191 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
192 {
193 beneath ()->fetch_registers (regcache, regno);
194 return;
195 }
196
197 /* We must be stopped on a spu_run system call. */
198 if (!parse_spufs_run (inferior_ptid, &spufs_fd, &spufs_addr))
199 return;
200
201 /* The ID register holds the spufs file handle. */
202 if (regno == -1 || regno == SPU_ID_REGNUM)
203 {
204 gdb_byte buf[4];
205 store_unsigned_integer (buf, 4, byte_order, spufs_fd);
206 regcache->raw_supply (SPU_ID_REGNUM, buf);
207 }
208
209 /* The NPC register is found in PPC memory at SPUFS_ADDR. */
210 if (regno == -1 || regno == SPU_PC_REGNUM)
211 {
212 gdb_byte buf[4];
213
214 if (target_read (beneath (), TARGET_OBJECT_MEMORY, NULL,
215 buf, spufs_addr, sizeof buf) == sizeof buf)
216 regcache->raw_supply (SPU_PC_REGNUM, buf);
217 }
218
219 /* The GPRs are found in the "regs" spufs file. */
220 if (regno == -1 || (regno >= 0 && regno < SPU_NUM_GPRS))
221 {
222 gdb_byte buf[16 * SPU_NUM_GPRS];
223 char annex[32];
224 int i;
225
226 xsnprintf (annex, sizeof annex, "%d/regs", spufs_fd);
227 if (target_read (beneath (), TARGET_OBJECT_SPU, annex,
228 buf, 0, sizeof buf) == sizeof buf)
229 for (i = 0; i < SPU_NUM_GPRS; i++)
230 regcache->raw_supply (i, buf + i*16);
231 }
232 }
233
234 /* Override the to_store_registers routine. */
235
236 void
237 spu_multiarch_target::store_registers (struct regcache *regcache, int regno)
238 {
239 struct gdbarch *gdbarch = regcache->arch ();
240 int spufs_fd;
241 CORE_ADDR spufs_addr;
242
243 /* Since we use functions that rely on inferior_ptid, we need to set and
244 restore it. */
245 scoped_restore save_ptid
246 = make_scoped_restore (&inferior_ptid, regcache->ptid ());
247
248 /* This version applies only if we're currently in spu_run. */
249 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
250 {
251 beneath ()->store_registers (regcache, regno);
252 return;
253 }
254
255 /* We must be stopped on a spu_run system call. */
256 if (!parse_spufs_run (inferior_ptid, &spufs_fd, &spufs_addr))
257 return;
258
259 /* The NPC register is found in PPC memory at SPUFS_ADDR. */
260 if (regno == -1 || regno == SPU_PC_REGNUM)
261 {
262 gdb_byte buf[4];
263 regcache->raw_collect (SPU_PC_REGNUM, buf);
264
265 target_write (beneath (), TARGET_OBJECT_MEMORY, NULL,
266 buf, spufs_addr, sizeof buf);
267 }
268
269 /* The GPRs are found in the "regs" spufs file. */
270 if (regno == -1 || (regno >= 0 && regno < SPU_NUM_GPRS))
271 {
272 gdb_byte buf[16 * SPU_NUM_GPRS];
273 char annex[32];
274 int i;
275
276 for (i = 0; i < SPU_NUM_GPRS; i++)
277 regcache->raw_collect (i, buf + i*16);
278
279 xsnprintf (annex, sizeof annex, "%d/regs", spufs_fd);
280 target_write (beneath (), TARGET_OBJECT_SPU, annex,
281 buf, 0, sizeof buf);
282 }
283 }
284
285 /* Override the to_xfer_partial routine. */
286
287 enum target_xfer_status
288 spu_multiarch_target::xfer_partial (enum target_object object,
289 const char *annex, gdb_byte *readbuf,
290 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
291 ULONGEST *xfered_len)
292 {
293 struct target_ops *ops_beneath = this->beneath ();
294
295 /* Use the "mem" spufs file to access SPU local store. */
296 if (object == TARGET_OBJECT_MEMORY)
297 {
298 int fd = SPUADDR_SPU (offset);
299 CORE_ADDR addr = SPUADDR_ADDR (offset);
300 char mem_annex[32], lslr_annex[32];
301 gdb_byte buf[32];
302 ULONGEST lslr;
303 enum target_xfer_status ret;
304
305 if (fd >= 0)
306 {
307 xsnprintf (mem_annex, sizeof mem_annex, "%d/mem", fd);
308 ret = ops_beneath->xfer_partial (TARGET_OBJECT_SPU,
309 mem_annex, readbuf, writebuf,
310 addr, len, xfered_len);
311 if (ret == TARGET_XFER_OK)
312 return ret;
313
314 /* SPU local store access wraps the address around at the
315 local store limit. We emulate this here. To avoid needing
316 an extra access to retrieve the LSLR, we only do that after
317 trying the original address first, and getting end-of-file. */
318 xsnprintf (lslr_annex, sizeof lslr_annex, "%d/lslr", fd);
319 memset (buf, 0, sizeof buf);
320 if (ops_beneath->xfer_partial (TARGET_OBJECT_SPU,
321 lslr_annex, buf, NULL,
322 0, sizeof buf, xfered_len)
323 != TARGET_XFER_OK)
324 return ret;
325
326 lslr = strtoulst ((char *) buf, NULL, 16);
327 return ops_beneath->xfer_partial (TARGET_OBJECT_SPU,
328 mem_annex, readbuf, writebuf,
329 addr & lslr, len, xfered_len);
330 }
331 }
332
333 return ops_beneath->xfer_partial (object, annex,
334 readbuf, writebuf, offset, len, xfered_len);
335 }
336
337 /* Override the to_search_memory routine. */
338 int
339 spu_multiarch_target::search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
340 const gdb_byte *pattern, ULONGEST pattern_len,
341 CORE_ADDR *found_addrp)
342 {
343 /* For SPU local store, always fall back to the simple method. */
344 if (SPUADDR_SPU (start_addr) >= 0)
345 return simple_search_memory (this, start_addr, search_space_len,
346 pattern, pattern_len, found_addrp);
347
348 return beneath ()->search_memory (start_addr, search_space_len,
349 pattern, pattern_len, found_addrp);
350 }
351
352
353 /* Push and pop the SPU multi-architecture support target. */
354
355 static void
356 spu_multiarch_activate (void)
357 {
358 /* If GDB was configured without SPU architecture support,
359 we cannot install SPU multi-architecture support either. */
360 if (spu_gdbarch (-1) == NULL)
361 return;
362
363 push_target (&spu_ops);
364
365 /* Make sure the thread architecture is re-evaluated. */
366 registers_changed ();
367 }
368
369 static void
370 spu_multiarch_deactivate (void)
371 {
372 unpush_target (&spu_ops);
373
374 /* Make sure the thread architecture is re-evaluated. */
375 registers_changed ();
376 }
377
378 static void
379 spu_multiarch_inferior_created (struct target_ops *ops, int from_tty)
380 {
381 if (spu_standalone_p ())
382 spu_multiarch_activate ();
383 }
384
385 static void
386 spu_multiarch_solib_loaded (struct so_list *so)
387 {
388 if (!spu_standalone_p ())
389 if (so->abfd && bfd_get_arch (so->abfd) == bfd_arch_spu)
390 if (spu_nr_solib++ == 0)
391 spu_multiarch_activate ();
392 }
393
394 static void
395 spu_multiarch_solib_unloaded (struct so_list *so)
396 {
397 if (!spu_standalone_p ())
398 if (so->abfd && bfd_get_arch (so->abfd) == bfd_arch_spu)
399 if (--spu_nr_solib == 0)
400 spu_multiarch_deactivate ();
401 }
402
403 void
404 spu_multiarch_target::mourn_inferior ()
405 {
406 beneath ()->mourn_inferior ();
407 spu_multiarch_deactivate ();
408 }
409
410 void
411 _initialize_spu_multiarch (void)
412 {
413 /* Install observers to watch for SPU objects. */
414 gdb::observers::inferior_created.attach (spu_multiarch_inferior_created);
415 gdb::observers::solib_loaded.attach (spu_multiarch_solib_loaded);
416 gdb::observers::solib_unloaded.attach (spu_multiarch_solib_unloaded);
417 }
418
This page took 0.037674 seconds and 4 git commands to generate.