e7e5fdd1f2cad0546f9b7660348978bed6c3d96a
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* SPU-specific vector type. */
52 struct type *spu_builtin_type_vec128;
53 };
54
55
56 /* SPU-specific vector type. */
57 static struct type *
58 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
59 {
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
61
62 if (!tdep->spu_builtin_type_vec128)
63 {
64 struct type *t;
65
66 t = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
67 append_composite_type_field (t, "uint128", builtin_type_int128);
68 append_composite_type_field (t, "v2_int64",
69 init_vector_type (builtin_type_int64, 2));
70 append_composite_type_field (t, "v4_int32",
71 init_vector_type (builtin_type_int32, 4));
72 append_composite_type_field (t, "v8_int16",
73 init_vector_type (builtin_type_int16, 8));
74 append_composite_type_field (t, "v16_int8",
75 init_vector_type (builtin_type_int8, 16));
76 append_composite_type_field (t, "v2_double",
77 init_vector_type (builtin_type_double, 2));
78 append_composite_type_field (t, "v4_float",
79 init_vector_type (builtin_type_float, 4));
80
81 TYPE_FLAGS (t) |= TYPE_FLAG_VECTOR;
82 TYPE_NAME (t) = "spu_builtin_type_vec128";
83
84 tdep->spu_builtin_type_vec128 = t;
85 }
86
87 return tdep->spu_builtin_type_vec128;
88 }
89
90
91 /* The list of available "info spu " commands. */
92 static struct cmd_list_element *infospucmdlist = NULL;
93
94 /* Registers. */
95
96 static const char *
97 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
98 {
99 static char *register_names[] =
100 {
101 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
102 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
103 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
104 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
105 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
106 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
107 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
108 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
109 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
110 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
111 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
112 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
113 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
114 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
115 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
116 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
117 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
118 };
119
120 if (reg_nr < 0)
121 return NULL;
122 if (reg_nr >= sizeof register_names / sizeof *register_names)
123 return NULL;
124
125 return register_names[reg_nr];
126 }
127
128 static struct type *
129 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
130 {
131 if (reg_nr < SPU_NUM_GPRS)
132 return spu_builtin_type_vec128 (gdbarch);
133
134 switch (reg_nr)
135 {
136 case SPU_ID_REGNUM:
137 return builtin_type_uint32;
138
139 case SPU_PC_REGNUM:
140 return builtin_type_void_func_ptr;
141
142 case SPU_SP_REGNUM:
143 return builtin_type_void_data_ptr;
144
145 case SPU_FPSCR_REGNUM:
146 return builtin_type_uint128;
147
148 case SPU_SRR0_REGNUM:
149 return builtin_type_uint32;
150
151 case SPU_LSLR_REGNUM:
152 return builtin_type_uint32;
153
154 case SPU_DECR_REGNUM:
155 return builtin_type_uint32;
156
157 case SPU_DECR_STATUS_REGNUM:
158 return builtin_type_uint32;
159
160 default:
161 internal_error (__FILE__, __LINE__, "invalid regnum");
162 }
163 }
164
165 /* Pseudo registers for preferred slots - stack pointer. */
166
167 static void
168 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
169 gdb_byte *buf)
170 {
171 gdb_byte reg[32];
172 char annex[32];
173 ULONGEST id;
174
175 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
176 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
177 memset (reg, 0, sizeof reg);
178 target_read (&current_target, TARGET_OBJECT_SPU, annex,
179 reg, 0, sizeof reg);
180
181 store_unsigned_integer (buf, 4, strtoulst (reg, NULL, 16));
182 }
183
184 static void
185 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
186 int regnum, gdb_byte *buf)
187 {
188 gdb_byte reg[16];
189 char annex[32];
190 ULONGEST id;
191
192 switch (regnum)
193 {
194 case SPU_SP_REGNUM:
195 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
196 memcpy (buf, reg, 4);
197 break;
198
199 case SPU_FPSCR_REGNUM:
200 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
201 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
202 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
203 break;
204
205 case SPU_SRR0_REGNUM:
206 spu_pseudo_register_read_spu (regcache, "srr0", buf);
207 break;
208
209 case SPU_LSLR_REGNUM:
210 spu_pseudo_register_read_spu (regcache, "lslr", buf);
211 break;
212
213 case SPU_DECR_REGNUM:
214 spu_pseudo_register_read_spu (regcache, "decr", buf);
215 break;
216
217 case SPU_DECR_STATUS_REGNUM:
218 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
219 break;
220
221 default:
222 internal_error (__FILE__, __LINE__, _("invalid regnum"));
223 }
224 }
225
226 static void
227 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
228 const gdb_byte *buf)
229 {
230 gdb_byte reg[32];
231 char annex[32];
232 ULONGEST id;
233
234 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
235 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
236 xsnprintf (reg, sizeof reg, "0x%s",
237 phex_nz (extract_unsigned_integer (buf, 4), 4));
238 target_write (&current_target, TARGET_OBJECT_SPU, annex,
239 reg, 0, strlen (reg));
240 }
241
242 static void
243 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
244 int regnum, const gdb_byte *buf)
245 {
246 gdb_byte reg[16];
247 char annex[32];
248 ULONGEST id;
249
250 switch (regnum)
251 {
252 case SPU_SP_REGNUM:
253 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
254 memcpy (reg, buf, 4);
255 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
256 break;
257
258 case SPU_FPSCR_REGNUM:
259 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
260 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
261 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
262 break;
263
264 case SPU_SRR0_REGNUM:
265 spu_pseudo_register_write_spu (regcache, "srr0", buf);
266 break;
267
268 case SPU_LSLR_REGNUM:
269 spu_pseudo_register_write_spu (regcache, "lslr", buf);
270 break;
271
272 case SPU_DECR_REGNUM:
273 spu_pseudo_register_write_spu (regcache, "decr", buf);
274 break;
275
276 case SPU_DECR_STATUS_REGNUM:
277 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
278 break;
279
280 default:
281 internal_error (__FILE__, __LINE__, _("invalid regnum"));
282 }
283 }
284
285 /* Value conversion -- access scalar values at the preferred slot. */
286
287 static struct value *
288 spu_value_from_register (struct type *type, int regnum,
289 struct frame_info *frame)
290 {
291 struct value *value = default_value_from_register (type, regnum, frame);
292 int len = TYPE_LENGTH (type);
293
294 if (regnum < SPU_NUM_GPRS && len < 16)
295 {
296 int preferred_slot = len < 4 ? 4 - len : 0;
297 set_value_offset (value, preferred_slot);
298 }
299
300 return value;
301 }
302
303 /* Register groups. */
304
305 static int
306 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
307 struct reggroup *group)
308 {
309 /* Registers displayed via 'info regs'. */
310 if (group == general_reggroup)
311 return 1;
312
313 /* Registers displayed via 'info float'. */
314 if (group == float_reggroup)
315 return 0;
316
317 /* Registers that need to be saved/restored in order to
318 push or pop frames. */
319 if (group == save_reggroup || group == restore_reggroup)
320 return 1;
321
322 return default_register_reggroup_p (gdbarch, regnum, group);
323 }
324
325 /* Address conversion. */
326
327 static CORE_ADDR
328 spu_pointer_to_address (struct type *type, const gdb_byte *buf)
329 {
330 ULONGEST addr = extract_unsigned_integer (buf, TYPE_LENGTH (type));
331 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
332
333 if (target_has_registers && target_has_stack && target_has_memory)
334 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
335 SPU_LSLR_REGNUM);
336
337 return addr & lslr;
338 }
339
340 static CORE_ADDR
341 spu_integer_to_address (struct gdbarch *gdbarch,
342 struct type *type, const gdb_byte *buf)
343 {
344 ULONGEST addr = unpack_long (type, buf);
345 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
346
347 if (target_has_registers && target_has_stack && target_has_memory)
348 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
349 SPU_LSLR_REGNUM);
350
351 return addr & lslr;
352 }
353
354
355 /* Decoding SPU instructions. */
356
357 enum
358 {
359 op_lqd = 0x34,
360 op_lqx = 0x3c4,
361 op_lqa = 0x61,
362 op_lqr = 0x67,
363 op_stqd = 0x24,
364 op_stqx = 0x144,
365 op_stqa = 0x41,
366 op_stqr = 0x47,
367
368 op_il = 0x081,
369 op_ila = 0x21,
370 op_a = 0x0c0,
371 op_ai = 0x1c,
372
373 op_selb = 0x4,
374
375 op_br = 0x64,
376 op_bra = 0x60,
377 op_brsl = 0x66,
378 op_brasl = 0x62,
379 op_brnz = 0x42,
380 op_brz = 0x40,
381 op_brhnz = 0x46,
382 op_brhz = 0x44,
383 op_bi = 0x1a8,
384 op_bisl = 0x1a9,
385 op_biz = 0x128,
386 op_binz = 0x129,
387 op_bihz = 0x12a,
388 op_bihnz = 0x12b,
389 };
390
391 static int
392 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
393 {
394 if ((insn >> 21) == op)
395 {
396 *rt = insn & 127;
397 *ra = (insn >> 7) & 127;
398 *rb = (insn >> 14) & 127;
399 return 1;
400 }
401
402 return 0;
403 }
404
405 static int
406 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
407 {
408 if ((insn >> 28) == op)
409 {
410 *rt = (insn >> 21) & 127;
411 *ra = (insn >> 7) & 127;
412 *rb = (insn >> 14) & 127;
413 *rc = insn & 127;
414 return 1;
415 }
416
417 return 0;
418 }
419
420 static int
421 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
422 {
423 if ((insn >> 21) == op)
424 {
425 *rt = insn & 127;
426 *ra = (insn >> 7) & 127;
427 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
428 return 1;
429 }
430
431 return 0;
432 }
433
434 static int
435 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
436 {
437 if ((insn >> 24) == op)
438 {
439 *rt = insn & 127;
440 *ra = (insn >> 7) & 127;
441 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
442 return 1;
443 }
444
445 return 0;
446 }
447
448 static int
449 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
450 {
451 if ((insn >> 23) == op)
452 {
453 *rt = insn & 127;
454 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
455 return 1;
456 }
457
458 return 0;
459 }
460
461 static int
462 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
463 {
464 if ((insn >> 25) == op)
465 {
466 *rt = insn & 127;
467 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
468 return 1;
469 }
470
471 return 0;
472 }
473
474 static int
475 is_branch (unsigned int insn, int *offset, int *reg)
476 {
477 int rt, i7, i16;
478
479 if (is_ri16 (insn, op_br, &rt, &i16)
480 || is_ri16 (insn, op_brsl, &rt, &i16)
481 || is_ri16 (insn, op_brnz, &rt, &i16)
482 || is_ri16 (insn, op_brz, &rt, &i16)
483 || is_ri16 (insn, op_brhnz, &rt, &i16)
484 || is_ri16 (insn, op_brhz, &rt, &i16))
485 {
486 *reg = SPU_PC_REGNUM;
487 *offset = i16 << 2;
488 return 1;
489 }
490
491 if (is_ri16 (insn, op_bra, &rt, &i16)
492 || is_ri16 (insn, op_brasl, &rt, &i16))
493 {
494 *reg = -1;
495 *offset = i16 << 2;
496 return 1;
497 }
498
499 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
500 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
501 || is_ri7 (insn, op_biz, &rt, reg, &i7)
502 || is_ri7 (insn, op_binz, &rt, reg, &i7)
503 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
504 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
505 {
506 *offset = 0;
507 return 1;
508 }
509
510 return 0;
511 }
512
513
514 /* Prolog parsing. */
515
516 struct spu_prologue_data
517 {
518 /* Stack frame size. -1 if analysis was unsuccessful. */
519 int size;
520
521 /* How to find the CFA. The CFA is equal to SP at function entry. */
522 int cfa_reg;
523 int cfa_offset;
524
525 /* Offset relative to CFA where a register is saved. -1 if invalid. */
526 int reg_offset[SPU_NUM_GPRS];
527 };
528
529 static CORE_ADDR
530 spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
531 struct spu_prologue_data *data)
532 {
533 int found_sp = 0;
534 int found_fp = 0;
535 int found_lr = 0;
536 int reg_immed[SPU_NUM_GPRS];
537 gdb_byte buf[16];
538 CORE_ADDR prolog_pc = start_pc;
539 CORE_ADDR pc;
540 int i;
541
542
543 /* Initialize DATA to default values. */
544 data->size = -1;
545
546 data->cfa_reg = SPU_RAW_SP_REGNUM;
547 data->cfa_offset = 0;
548
549 for (i = 0; i < SPU_NUM_GPRS; i++)
550 data->reg_offset[i] = -1;
551
552 /* Set up REG_IMMED array. This is non-zero for a register if we know its
553 preferred slot currently holds this immediate value. */
554 for (i = 0; i < SPU_NUM_GPRS; i++)
555 reg_immed[i] = 0;
556
557 /* Scan instructions until the first branch.
558
559 The following instructions are important prolog components:
560
561 - The first instruction to set up the stack pointer.
562 - The first instruction to set up the frame pointer.
563 - The first instruction to save the link register.
564
565 We return the instruction after the latest of these three,
566 or the incoming PC if none is found. The first instruction
567 to set up the stack pointer also defines the frame size.
568
569 Note that instructions saving incoming arguments to their stack
570 slots are not counted as important, because they are hard to
571 identify with certainty. This should not matter much, because
572 arguments are relevant only in code compiled with debug data,
573 and in such code the GDB core will advance until the first source
574 line anyway, using SAL data.
575
576 For purposes of stack unwinding, we analyze the following types
577 of instructions in addition:
578
579 - Any instruction adding to the current frame pointer.
580 - Any instruction loading an immediate constant into a register.
581 - Any instruction storing a register onto the stack.
582
583 These are used to compute the CFA and REG_OFFSET output. */
584
585 for (pc = start_pc; pc < end_pc; pc += 4)
586 {
587 unsigned int insn;
588 int rt, ra, rb, rc, immed;
589
590 if (target_read_memory (pc, buf, 4))
591 break;
592 insn = extract_unsigned_integer (buf, 4);
593
594 /* AI is the typical instruction to set up a stack frame.
595 It is also used to initialize the frame pointer. */
596 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
597 {
598 if (rt == data->cfa_reg && ra == data->cfa_reg)
599 data->cfa_offset -= immed;
600
601 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
602 && !found_sp)
603 {
604 found_sp = 1;
605 prolog_pc = pc + 4;
606
607 data->size = -immed;
608 }
609 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
610 && !found_fp)
611 {
612 found_fp = 1;
613 prolog_pc = pc + 4;
614
615 data->cfa_reg = SPU_FP_REGNUM;
616 data->cfa_offset -= immed;
617 }
618 }
619
620 /* A is used to set up stack frames of size >= 512 bytes.
621 If we have tracked the contents of the addend register,
622 we can handle this as well. */
623 else if (is_rr (insn, op_a, &rt, &ra, &rb))
624 {
625 if (rt == data->cfa_reg && ra == data->cfa_reg)
626 {
627 if (reg_immed[rb] != 0)
628 data->cfa_offset -= reg_immed[rb];
629 else
630 data->cfa_reg = -1; /* We don't know the CFA any more. */
631 }
632
633 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
634 && !found_sp)
635 {
636 found_sp = 1;
637 prolog_pc = pc + 4;
638
639 if (reg_immed[rb] != 0)
640 data->size = -reg_immed[rb];
641 }
642 }
643
644 /* We need to track IL and ILA used to load immediate constants
645 in case they are later used as input to an A instruction. */
646 else if (is_ri16 (insn, op_il, &rt, &immed))
647 {
648 reg_immed[rt] = immed;
649
650 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
651 found_sp = 1;
652 }
653
654 else if (is_ri18 (insn, op_ila, &rt, &immed))
655 {
656 reg_immed[rt] = immed & 0x3ffff;
657
658 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
659 found_sp = 1;
660 }
661
662 /* STQD is used to save registers to the stack. */
663 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
664 {
665 if (ra == data->cfa_reg)
666 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
667
668 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
669 && !found_lr)
670 {
671 found_lr = 1;
672 prolog_pc = pc + 4;
673 }
674 }
675
676 /* _start uses SELB to set up the stack pointer. */
677 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
678 {
679 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
680 found_sp = 1;
681 }
682
683 /* We terminate if we find a branch. */
684 else if (is_branch (insn, &immed, &ra))
685 break;
686 }
687
688
689 /* If we successfully parsed until here, and didn't find any instruction
690 modifying SP, we assume we have a frameless function. */
691 if (!found_sp)
692 data->size = 0;
693
694 /* Return cooked instead of raw SP. */
695 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
696 data->cfa_reg = SPU_SP_REGNUM;
697
698 return prolog_pc;
699 }
700
701 /* Return the first instruction after the prologue starting at PC. */
702 static CORE_ADDR
703 spu_skip_prologue (CORE_ADDR pc)
704 {
705 struct spu_prologue_data data;
706 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
707 }
708
709 /* Return the frame pointer in use at address PC. */
710 static void
711 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
712 int *reg, LONGEST *offset)
713 {
714 struct spu_prologue_data data;
715 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
716
717 if (data.size != -1 && data.cfa_reg != -1)
718 {
719 /* The 'frame pointer' address is CFA minus frame size. */
720 *reg = data.cfa_reg;
721 *offset = data.cfa_offset - data.size;
722 }
723 else
724 {
725 /* ??? We don't really know ... */
726 *reg = SPU_SP_REGNUM;
727 *offset = 0;
728 }
729 }
730
731 /* Return true if we are in the function's epilogue, i.e. after the
732 instruction that destroyed the function's stack frame.
733
734 1) scan forward from the point of execution:
735 a) If you find an instruction that modifies the stack pointer
736 or transfers control (except a return), execution is not in
737 an epilogue, return.
738 b) Stop scanning if you find a return instruction or reach the
739 end of the function or reach the hard limit for the size of
740 an epilogue.
741 2) scan backward from the point of execution:
742 a) If you find an instruction that modifies the stack pointer,
743 execution *is* in an epilogue, return.
744 b) Stop scanning if you reach an instruction that transfers
745 control or the beginning of the function or reach the hard
746 limit for the size of an epilogue. */
747
748 static int
749 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
750 {
751 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
752 bfd_byte buf[4];
753 unsigned int insn;
754 int rt, ra, rb, rc, immed;
755
756 /* Find the search limits based on function boundaries and hard limit.
757 We assume the epilogue can be up to 64 instructions long. */
758
759 const int spu_max_epilogue_size = 64 * 4;
760
761 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
762 return 0;
763
764 if (pc - func_start < spu_max_epilogue_size)
765 epilogue_start = func_start;
766 else
767 epilogue_start = pc - spu_max_epilogue_size;
768
769 if (func_end - pc < spu_max_epilogue_size)
770 epilogue_end = func_end;
771 else
772 epilogue_end = pc + spu_max_epilogue_size;
773
774 /* Scan forward until next 'bi $0'. */
775
776 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
777 {
778 if (target_read_memory (scan_pc, buf, 4))
779 return 0;
780 insn = extract_unsigned_integer (buf, 4);
781
782 if (is_branch (insn, &immed, &ra))
783 {
784 if (immed == 0 && ra == SPU_LR_REGNUM)
785 break;
786
787 return 0;
788 }
789
790 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
791 || is_rr (insn, op_a, &rt, &ra, &rb)
792 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
793 {
794 if (rt == SPU_RAW_SP_REGNUM)
795 return 0;
796 }
797 }
798
799 if (scan_pc >= epilogue_end)
800 return 0;
801
802 /* Scan backward until adjustment to stack pointer (R1). */
803
804 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
805 {
806 if (target_read_memory (scan_pc, buf, 4))
807 return 0;
808 insn = extract_unsigned_integer (buf, 4);
809
810 if (is_branch (insn, &immed, &ra))
811 return 0;
812
813 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
814 || is_rr (insn, op_a, &rt, &ra, &rb)
815 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
816 {
817 if (rt == SPU_RAW_SP_REGNUM)
818 return 1;
819 }
820 }
821
822 return 0;
823 }
824
825
826 /* Normal stack frames. */
827
828 struct spu_unwind_cache
829 {
830 CORE_ADDR func;
831 CORE_ADDR frame_base;
832 CORE_ADDR local_base;
833
834 struct trad_frame_saved_reg *saved_regs;
835 };
836
837 static struct spu_unwind_cache *
838 spu_frame_unwind_cache (struct frame_info *next_frame,
839 void **this_prologue_cache)
840 {
841 struct spu_unwind_cache *info;
842 struct spu_prologue_data data;
843 gdb_byte buf[16];
844
845 if (*this_prologue_cache)
846 return *this_prologue_cache;
847
848 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
849 *this_prologue_cache = info;
850 info->saved_regs = trad_frame_alloc_saved_regs (next_frame);
851 info->frame_base = 0;
852 info->local_base = 0;
853
854 /* Find the start of the current function, and analyze its prologue. */
855 info->func = frame_func_unwind (next_frame, NORMAL_FRAME);
856 if (info->func == 0)
857 {
858 /* Fall back to using the current PC as frame ID. */
859 info->func = frame_pc_unwind (next_frame);
860 data.size = -1;
861 }
862 else
863 spu_analyze_prologue (info->func, frame_pc_unwind (next_frame), &data);
864
865
866 /* If successful, use prologue analysis data. */
867 if (data.size != -1 && data.cfa_reg != -1)
868 {
869 CORE_ADDR cfa;
870 int i;
871
872 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
873 frame_unwind_register (next_frame, data.cfa_reg, buf);
874 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
875
876 /* Call-saved register slots. */
877 for (i = 0; i < SPU_NUM_GPRS; i++)
878 if (i == SPU_LR_REGNUM
879 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
880 if (data.reg_offset[i] != -1)
881 info->saved_regs[i].addr = cfa - data.reg_offset[i];
882
883 /* Frame bases. */
884 info->frame_base = cfa;
885 info->local_base = cfa - data.size;
886 }
887
888 /* Otherwise, fall back to reading the backchain link. */
889 else
890 {
891 CORE_ADDR reg, backchain;
892
893 /* Get the backchain. */
894 reg = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
895 backchain = read_memory_unsigned_integer (reg, 4);
896
897 /* A zero backchain terminates the frame chain. Also, sanity
898 check against the local store size limit. */
899 if (backchain != 0 && backchain < SPU_LS_SIZE)
900 {
901 /* Assume the link register is saved into its slot. */
902 if (backchain + 16 < SPU_LS_SIZE)
903 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
904
905 /* Frame bases. */
906 info->frame_base = backchain;
907 info->local_base = reg;
908 }
909 }
910
911 /* The previous SP is equal to the CFA. */
912 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
913
914 /* Read full contents of the unwound link register in order to
915 be able to determine the return address. */
916 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
917 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
918 else
919 frame_unwind_register (next_frame, SPU_LR_REGNUM, buf);
920
921 /* Normally, the return address is contained in the slot 0 of the
922 link register, and slots 1-3 are zero. For an overlay return,
923 slot 0 contains the address of the overlay manager return stub,
924 slot 1 contains the partition number of the overlay section to
925 be returned to, and slot 2 contains the return address within
926 that section. Return the latter address in that case. */
927 if (extract_unsigned_integer (buf + 8, 4) != 0)
928 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
929 extract_unsigned_integer (buf + 8, 4));
930 else
931 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
932 extract_unsigned_integer (buf, 4));
933
934 return info;
935 }
936
937 static void
938 spu_frame_this_id (struct frame_info *next_frame,
939 void **this_prologue_cache, struct frame_id *this_id)
940 {
941 struct spu_unwind_cache *info =
942 spu_frame_unwind_cache (next_frame, this_prologue_cache);
943
944 if (info->frame_base == 0)
945 return;
946
947 *this_id = frame_id_build (info->frame_base, info->func);
948 }
949
950 static void
951 spu_frame_prev_register (struct frame_info *next_frame,
952 void **this_prologue_cache,
953 int regnum, int *optimizedp,
954 enum lval_type *lvalp, CORE_ADDR * addrp,
955 int *realnump, gdb_byte *bufferp)
956 {
957 struct spu_unwind_cache *info
958 = spu_frame_unwind_cache (next_frame, this_prologue_cache);
959
960 /* Special-case the stack pointer. */
961 if (regnum == SPU_RAW_SP_REGNUM)
962 regnum = SPU_SP_REGNUM;
963
964 trad_frame_get_prev_register (next_frame, info->saved_regs, regnum,
965 optimizedp, lvalp, addrp, realnump, bufferp);
966 }
967
968 static const struct frame_unwind spu_frame_unwind = {
969 NORMAL_FRAME,
970 spu_frame_this_id,
971 spu_frame_prev_register
972 };
973
974 const struct frame_unwind *
975 spu_frame_sniffer (struct frame_info *next_frame)
976 {
977 return &spu_frame_unwind;
978 }
979
980 static CORE_ADDR
981 spu_frame_base_address (struct frame_info *next_frame, void **this_cache)
982 {
983 struct spu_unwind_cache *info
984 = spu_frame_unwind_cache (next_frame, this_cache);
985 return info->local_base;
986 }
987
988 static const struct frame_base spu_frame_base = {
989 &spu_frame_unwind,
990 spu_frame_base_address,
991 spu_frame_base_address,
992 spu_frame_base_address
993 };
994
995 static CORE_ADDR
996 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
997 {
998 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
999 /* Mask off interrupt enable bit. */
1000 return pc & -4;
1001 }
1002
1003 static CORE_ADDR
1004 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1005 {
1006 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1007 }
1008
1009 static CORE_ADDR
1010 spu_read_pc (struct regcache *regcache)
1011 {
1012 ULONGEST pc;
1013 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1014 /* Mask off interrupt enable bit. */
1015 return pc & -4;
1016 }
1017
1018 static void
1019 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1020 {
1021 /* Keep interrupt enabled state unchanged. */
1022 ULONGEST old_pc;
1023 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1024 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1025 (pc & -4) | (old_pc & 3));
1026 }
1027
1028
1029 /* Function calling convention. */
1030
1031 static CORE_ADDR
1032 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1033 {
1034 return sp & ~15;
1035 }
1036
1037 static int
1038 spu_scalar_value_p (struct type *type)
1039 {
1040 switch (TYPE_CODE (type))
1041 {
1042 case TYPE_CODE_INT:
1043 case TYPE_CODE_ENUM:
1044 case TYPE_CODE_RANGE:
1045 case TYPE_CODE_CHAR:
1046 case TYPE_CODE_BOOL:
1047 case TYPE_CODE_PTR:
1048 case TYPE_CODE_REF:
1049 return TYPE_LENGTH (type) <= 16;
1050
1051 default:
1052 return 0;
1053 }
1054 }
1055
1056 static void
1057 spu_value_to_regcache (struct regcache *regcache, int regnum,
1058 struct type *type, const gdb_byte *in)
1059 {
1060 int len = TYPE_LENGTH (type);
1061
1062 if (spu_scalar_value_p (type))
1063 {
1064 int preferred_slot = len < 4 ? 4 - len : 0;
1065 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1066 }
1067 else
1068 {
1069 while (len >= 16)
1070 {
1071 regcache_cooked_write (regcache, regnum++, in);
1072 in += 16;
1073 len -= 16;
1074 }
1075
1076 if (len > 0)
1077 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1078 }
1079 }
1080
1081 static void
1082 spu_regcache_to_value (struct regcache *regcache, int regnum,
1083 struct type *type, gdb_byte *out)
1084 {
1085 int len = TYPE_LENGTH (type);
1086
1087 if (spu_scalar_value_p (type))
1088 {
1089 int preferred_slot = len < 4 ? 4 - len : 0;
1090 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1091 }
1092 else
1093 {
1094 while (len >= 16)
1095 {
1096 regcache_cooked_read (regcache, regnum++, out);
1097 out += 16;
1098 len -= 16;
1099 }
1100
1101 if (len > 0)
1102 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1103 }
1104 }
1105
1106 static CORE_ADDR
1107 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1108 struct regcache *regcache, CORE_ADDR bp_addr,
1109 int nargs, struct value **args, CORE_ADDR sp,
1110 int struct_return, CORE_ADDR struct_addr)
1111 {
1112 int i;
1113 int regnum = SPU_ARG1_REGNUM;
1114 int stack_arg = -1;
1115 gdb_byte buf[16];
1116
1117 /* Set the return address. */
1118 memset (buf, 0, sizeof buf);
1119 store_unsigned_integer (buf, 4, bp_addr);
1120 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1121
1122 /* If STRUCT_RETURN is true, then the struct return address (in
1123 STRUCT_ADDR) will consume the first argument-passing register.
1124 Both adjust the register count and store that value. */
1125 if (struct_return)
1126 {
1127 memset (buf, 0, sizeof buf);
1128 store_unsigned_integer (buf, 4, struct_addr);
1129 regcache_cooked_write (regcache, regnum++, buf);
1130 }
1131
1132 /* Fill in argument registers. */
1133 for (i = 0; i < nargs; i++)
1134 {
1135 struct value *arg = args[i];
1136 struct type *type = check_typedef (value_type (arg));
1137 const gdb_byte *contents = value_contents (arg);
1138 int len = TYPE_LENGTH (type);
1139 int n_regs = align_up (len, 16) / 16;
1140
1141 /* If the argument doesn't wholly fit into registers, it and
1142 all subsequent arguments go to the stack. */
1143 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1144 {
1145 stack_arg = i;
1146 break;
1147 }
1148
1149 spu_value_to_regcache (regcache, regnum, type, contents);
1150 regnum += n_regs;
1151 }
1152
1153 /* Overflow arguments go to the stack. */
1154 if (stack_arg != -1)
1155 {
1156 CORE_ADDR ap;
1157
1158 /* Allocate all required stack size. */
1159 for (i = stack_arg; i < nargs; i++)
1160 {
1161 struct type *type = check_typedef (value_type (args[i]));
1162 sp -= align_up (TYPE_LENGTH (type), 16);
1163 }
1164
1165 /* Fill in stack arguments. */
1166 ap = sp;
1167 for (i = stack_arg; i < nargs; i++)
1168 {
1169 struct value *arg = args[i];
1170 struct type *type = check_typedef (value_type (arg));
1171 int len = TYPE_LENGTH (type);
1172 int preferred_slot;
1173
1174 if (spu_scalar_value_p (type))
1175 preferred_slot = len < 4 ? 4 - len : 0;
1176 else
1177 preferred_slot = 0;
1178
1179 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1180 ap += align_up (TYPE_LENGTH (type), 16);
1181 }
1182 }
1183
1184 /* Allocate stack frame header. */
1185 sp -= 32;
1186
1187 /* Store stack back chain. */
1188 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1189 target_write_memory (sp, buf, 16);
1190
1191 /* Finally, update the SP register. */
1192 regcache_cooked_write_unsigned (regcache, SPU_SP_REGNUM, sp);
1193
1194 return sp;
1195 }
1196
1197 static struct frame_id
1198 spu_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1199 {
1200 return frame_id_build (spu_unwind_sp (gdbarch, next_frame),
1201 spu_unwind_pc (gdbarch, next_frame));
1202 }
1203
1204 /* Function return value access. */
1205
1206 static enum return_value_convention
1207 spu_return_value (struct gdbarch *gdbarch, struct type *type,
1208 struct regcache *regcache, gdb_byte *out, const gdb_byte *in)
1209 {
1210 enum return_value_convention rvc;
1211
1212 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1213 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1214 else
1215 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1216
1217 if (in)
1218 {
1219 switch (rvc)
1220 {
1221 case RETURN_VALUE_REGISTER_CONVENTION:
1222 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1223 break;
1224
1225 case RETURN_VALUE_STRUCT_CONVENTION:
1226 error ("Cannot set function return value.");
1227 break;
1228 }
1229 }
1230 else if (out)
1231 {
1232 switch (rvc)
1233 {
1234 case RETURN_VALUE_REGISTER_CONVENTION:
1235 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1236 break;
1237
1238 case RETURN_VALUE_STRUCT_CONVENTION:
1239 error ("Function return value unknown.");
1240 break;
1241 }
1242 }
1243
1244 return rvc;
1245 }
1246
1247
1248 /* Breakpoints. */
1249
1250 static const gdb_byte *
1251 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1252 {
1253 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1254
1255 *lenptr = sizeof breakpoint;
1256 return breakpoint;
1257 }
1258
1259
1260 /* Software single-stepping support. */
1261
1262 int
1263 spu_software_single_step (struct frame_info *frame)
1264 {
1265 CORE_ADDR pc, next_pc;
1266 unsigned int insn;
1267 int offset, reg;
1268 gdb_byte buf[4];
1269
1270 pc = get_frame_pc (frame);
1271
1272 if (target_read_memory (pc, buf, 4))
1273 return 1;
1274 insn = extract_unsigned_integer (buf, 4);
1275
1276 /* Next sequential instruction is at PC + 4, except if the current
1277 instruction is a PPE-assisted call, in which case it is at PC + 8.
1278 Wrap around LS limit to be on the safe side. */
1279 if ((insn & 0xffffff00) == 0x00002100)
1280 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1281 else
1282 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1283
1284 insert_single_step_breakpoint (next_pc);
1285
1286 if (is_branch (insn, &offset, &reg))
1287 {
1288 CORE_ADDR target = offset;
1289
1290 if (reg == SPU_PC_REGNUM)
1291 target += pc;
1292 else if (reg != -1)
1293 {
1294 get_frame_register_bytes (frame, reg, 0, 4, buf);
1295 target += extract_unsigned_integer (buf, 4) & -4;
1296 }
1297
1298 target = target & (SPU_LS_SIZE - 1);
1299 if (target != next_pc)
1300 insert_single_step_breakpoint (target);
1301 }
1302
1303 return 1;
1304 }
1305
1306 /* Target overlays for the SPU overlay manager.
1307
1308 See the documentation of simple_overlay_update for how the
1309 interface is supposed to work.
1310
1311 Data structures used by the overlay manager:
1312
1313 struct ovly_table
1314 {
1315 u32 vma;
1316 u32 size;
1317 u32 pos;
1318 u32 buf;
1319 } _ovly_table[]; -- one entry per overlay section
1320
1321 struct ovly_buf_table
1322 {
1323 u32 mapped;
1324 } _ovly_buf_table[]; -- one entry per overlay buffer
1325
1326 _ovly_table should never change.
1327
1328 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1329 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1330 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1331
1332 mapped is an index into _ovly_table. Both the mapped and buf indices start
1333 from one to reference the first entry in their respective tables. */
1334
1335 /* Using the per-objfile private data mechanism, we store for each
1336 objfile an array of "struct spu_overlay_table" structures, one
1337 for each obj_section of the objfile. This structure holds two
1338 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1339 is *not* an overlay section. If it is non-zero, it represents
1340 a target address. The overlay section is mapped iff the target
1341 integer at this location equals MAPPED_VAL. */
1342
1343 static const struct objfile_data *spu_overlay_data;
1344
1345 struct spu_overlay_table
1346 {
1347 CORE_ADDR mapped_ptr;
1348 CORE_ADDR mapped_val;
1349 };
1350
1351 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1352 the _ovly_table data structure from the target and initialize the
1353 spu_overlay_table data structure from it. */
1354 static struct spu_overlay_table *
1355 spu_get_overlay_table (struct objfile *objfile)
1356 {
1357 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1358 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1359 unsigned ovly_table_size, ovly_buf_table_size;
1360 struct spu_overlay_table *tbl;
1361 struct obj_section *osect;
1362 char *ovly_table;
1363 int i;
1364
1365 tbl = objfile_data (objfile, spu_overlay_data);
1366 if (tbl)
1367 return tbl;
1368
1369 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1370 if (!ovly_table_msym)
1371 return NULL;
1372
1373 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1374 if (!ovly_buf_table_msym)
1375 return NULL;
1376
1377 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1378 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1379
1380 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1381 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1382
1383 ovly_table = xmalloc (ovly_table_size);
1384 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1385
1386 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1387 objfile->sections_end - objfile->sections,
1388 struct spu_overlay_table);
1389
1390 for (i = 0; i < ovly_table_size / 16; i++)
1391 {
1392 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1393 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1394 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1395 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1396
1397 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1398 continue;
1399
1400 ALL_OBJFILE_OSECTIONS (objfile, osect)
1401 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1402 && pos == osect->the_bfd_section->filepos)
1403 {
1404 int ndx = osect - objfile->sections;
1405 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1406 tbl[ndx].mapped_val = i + 1;
1407 break;
1408 }
1409 }
1410
1411 xfree (ovly_table);
1412 set_objfile_data (objfile, spu_overlay_data, tbl);
1413 return tbl;
1414 }
1415
1416 /* Read _ovly_buf_table entry from the target to dermine whether
1417 OSECT is currently mapped, and update the mapped state. */
1418 static void
1419 spu_overlay_update_osect (struct obj_section *osect)
1420 {
1421 struct spu_overlay_table *ovly_table;
1422 CORE_ADDR val;
1423
1424 ovly_table = spu_get_overlay_table (osect->objfile);
1425 if (!ovly_table)
1426 return;
1427
1428 ovly_table += osect - osect->objfile->sections;
1429 if (ovly_table->mapped_ptr == 0)
1430 return;
1431
1432 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1433 osect->ovly_mapped = (val == ovly_table->mapped_val);
1434 }
1435
1436 /* If OSECT is NULL, then update all sections' mapped state.
1437 If OSECT is non-NULL, then update only OSECT's mapped state. */
1438 static void
1439 spu_overlay_update (struct obj_section *osect)
1440 {
1441 /* Just one section. */
1442 if (osect)
1443 spu_overlay_update_osect (osect);
1444
1445 /* All sections. */
1446 else
1447 {
1448 struct objfile *objfile;
1449
1450 ALL_OBJSECTIONS (objfile, osect)
1451 if (section_is_overlay (osect->the_bfd_section))
1452 spu_overlay_update_osect (osect);
1453 }
1454 }
1455
1456 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1457 If there is one, go through all sections and make sure for non-
1458 overlay sections LMA equals VMA, while for overlay sections LMA
1459 is larger than local store size. */
1460 static void
1461 spu_overlay_new_objfile (struct objfile *objfile)
1462 {
1463 struct spu_overlay_table *ovly_table;
1464 struct obj_section *osect;
1465
1466 /* If we've already touched this file, do nothing. */
1467 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1468 return;
1469
1470 /* Check if this objfile has overlays. */
1471 ovly_table = spu_get_overlay_table (objfile);
1472 if (!ovly_table)
1473 return;
1474
1475 /* Now go and fiddle with all the LMAs. */
1476 ALL_OBJFILE_OSECTIONS (objfile, osect)
1477 {
1478 bfd *obfd = objfile->obfd;
1479 asection *bsect = osect->the_bfd_section;
1480 int ndx = osect - objfile->sections;
1481
1482 if (ovly_table[ndx].mapped_ptr == 0)
1483 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1484 else
1485 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1486 }
1487 }
1488
1489
1490 /* "info spu" commands. */
1491
1492 static void
1493 info_spu_event_command (char *args, int from_tty)
1494 {
1495 struct frame_info *frame = get_selected_frame (NULL);
1496 ULONGEST event_status = 0;
1497 ULONGEST event_mask = 0;
1498 struct cleanup *chain;
1499 gdb_byte buf[100];
1500 char annex[32];
1501 LONGEST len;
1502 int rc, id;
1503
1504 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1505
1506 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1507 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1508 buf, 0, sizeof buf);
1509 if (len <= 0)
1510 error (_("Could not read event_status."));
1511 event_status = strtoulst (buf, NULL, 16);
1512
1513 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1514 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1515 buf, 0, sizeof buf);
1516 if (len <= 0)
1517 error (_("Could not read event_mask."));
1518 event_mask = strtoulst (buf, NULL, 16);
1519
1520 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1521
1522 if (ui_out_is_mi_like_p (uiout))
1523 {
1524 ui_out_field_fmt (uiout, "event_status",
1525 "0x%s", phex_nz (event_status, 4));
1526 ui_out_field_fmt (uiout, "event_mask",
1527 "0x%s", phex_nz (event_mask, 4));
1528 }
1529 else
1530 {
1531 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1532 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1533 }
1534
1535 do_cleanups (chain);
1536 }
1537
1538 static void
1539 info_spu_signal_command (char *args, int from_tty)
1540 {
1541 struct frame_info *frame = get_selected_frame (NULL);
1542 ULONGEST signal1 = 0;
1543 ULONGEST signal1_type = 0;
1544 int signal1_pending = 0;
1545 ULONGEST signal2 = 0;
1546 ULONGEST signal2_type = 0;
1547 int signal2_pending = 0;
1548 struct cleanup *chain;
1549 char annex[32];
1550 gdb_byte buf[100];
1551 LONGEST len;
1552 int rc, id;
1553
1554 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1555
1556 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1557 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1558 if (len < 0)
1559 error (_("Could not read signal1."));
1560 else if (len == 4)
1561 {
1562 signal1 = extract_unsigned_integer (buf, 4);
1563 signal1_pending = 1;
1564 }
1565
1566 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1567 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1568 buf, 0, sizeof buf);
1569 if (len <= 0)
1570 error (_("Could not read signal1_type."));
1571 signal1_type = strtoulst (buf, NULL, 16);
1572
1573 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1574 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1575 if (len < 0)
1576 error (_("Could not read signal2."));
1577 else if (len == 4)
1578 {
1579 signal2 = extract_unsigned_integer (buf, 4);
1580 signal2_pending = 1;
1581 }
1582
1583 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1584 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1585 buf, 0, sizeof buf);
1586 if (len <= 0)
1587 error (_("Could not read signal2_type."));
1588 signal2_type = strtoulst (buf, NULL, 16);
1589
1590 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1591
1592 if (ui_out_is_mi_like_p (uiout))
1593 {
1594 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1595 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1596 ui_out_field_int (uiout, "signal1_type", signal1_type);
1597 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1598 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1599 ui_out_field_int (uiout, "signal2_type", signal2_type);
1600 }
1601 else
1602 {
1603 if (signal1_pending)
1604 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1605 else
1606 printf_filtered (_("Signal 1 not pending "));
1607
1608 if (signal1_type)
1609 printf_filtered (_("(Type Or)\n"));
1610 else
1611 printf_filtered (_("(Type Overwrite)\n"));
1612
1613 if (signal2_pending)
1614 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1615 else
1616 printf_filtered (_("Signal 2 not pending "));
1617
1618 if (signal2_type)
1619 printf_filtered (_("(Type Or)\n"));
1620 else
1621 printf_filtered (_("(Type Overwrite)\n"));
1622 }
1623
1624 do_cleanups (chain);
1625 }
1626
1627 static void
1628 info_spu_mailbox_list (gdb_byte *buf, int nr,
1629 const char *field, const char *msg)
1630 {
1631 struct cleanup *chain;
1632 int i;
1633
1634 if (nr <= 0)
1635 return;
1636
1637 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1638
1639 ui_out_table_header (uiout, 32, ui_left, field, msg);
1640 ui_out_table_body (uiout);
1641
1642 for (i = 0; i < nr; i++)
1643 {
1644 struct cleanup *val_chain;
1645 ULONGEST val;
1646 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1647 val = extract_unsigned_integer (buf + 4*i, 4);
1648 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1649 do_cleanups (val_chain);
1650
1651 if (!ui_out_is_mi_like_p (uiout))
1652 printf_filtered ("\n");
1653 }
1654
1655 do_cleanups (chain);
1656 }
1657
1658 static void
1659 info_spu_mailbox_command (char *args, int from_tty)
1660 {
1661 struct frame_info *frame = get_selected_frame (NULL);
1662 struct cleanup *chain;
1663 char annex[32];
1664 gdb_byte buf[1024];
1665 LONGEST len;
1666 int i, id;
1667
1668 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1669
1670 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1671
1672 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1673 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1674 buf, 0, sizeof buf);
1675 if (len < 0)
1676 error (_("Could not read mbox_info."));
1677
1678 info_spu_mailbox_list (buf, len / 4, "mbox", "SPU Outbound Mailbox");
1679
1680 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1681 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1682 buf, 0, sizeof buf);
1683 if (len < 0)
1684 error (_("Could not read ibox_info."));
1685
1686 info_spu_mailbox_list (buf, len / 4, "ibox", "SPU Outbound Interrupt Mailbox");
1687
1688 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1689 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1690 buf, 0, sizeof buf);
1691 if (len < 0)
1692 error (_("Could not read wbox_info."));
1693
1694 info_spu_mailbox_list (buf, len / 4, "wbox", "SPU Inbound Mailbox");
1695
1696 do_cleanups (chain);
1697 }
1698
1699 static ULONGEST
1700 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1701 {
1702 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1703 return (word >> (63 - last)) & mask;
1704 }
1705
1706 static void
1707 info_spu_dma_cmdlist (gdb_byte *buf, int nr)
1708 {
1709 static char *spu_mfc_opcode[256] =
1710 {
1711 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1712 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1713 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1714 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1715 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
1716 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
1717 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
1718 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1719 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
1720 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
1721 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1722 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1723 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1724 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1725 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1726 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1727 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
1728 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
1729 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1730 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1731 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
1732 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1733 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
1734 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1735 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1736 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
1737 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1738 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1739 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1740 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1741 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1742 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1743 };
1744
1745 struct cleanup *chain;
1746 int i;
1747
1748 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
1749
1750 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
1751 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
1752 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
1753 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
1754 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
1755 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
1756 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
1757 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
1758 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
1759 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
1760
1761 ui_out_table_body (uiout);
1762
1763 for (i = 0; i < nr; i++)
1764 {
1765 struct cleanup *cmd_chain;
1766 ULONGEST mfc_cq_dw0;
1767 ULONGEST mfc_cq_dw1;
1768 ULONGEST mfc_cq_dw2;
1769 ULONGEST mfc_cq_dw3;
1770 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
1771 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
1772 ULONGEST mfc_ea;
1773 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
1774
1775 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1776 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1777
1778 mfc_cq_dw0 = extract_unsigned_integer (buf + 32*i, 8);
1779 mfc_cq_dw1 = extract_unsigned_integer (buf + 32*i + 8, 8);
1780 mfc_cq_dw2 = extract_unsigned_integer (buf + 32*i + 16, 8);
1781 mfc_cq_dw3 = extract_unsigned_integer (buf + 32*i + 24, 8);
1782
1783 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
1784 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
1785 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
1786 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
1787 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
1788 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
1789 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
1790
1791 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
1792 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
1793
1794 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
1795 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
1796 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
1797 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
1798 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
1799 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
1800
1801 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
1802
1803 if (spu_mfc_opcode[mfc_cmd_opcode])
1804 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
1805 else
1806 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
1807
1808 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
1809 ui_out_field_int (uiout, "tid", tclass_id);
1810 ui_out_field_int (uiout, "rid", rclass_id);
1811
1812 if (ea_valid_p)
1813 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
1814 else
1815 ui_out_field_skip (uiout, "ea");
1816
1817 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
1818 if (qw_valid_p)
1819 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
1820 else
1821 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
1822
1823 if (list_valid_p)
1824 {
1825 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
1826 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
1827 }
1828 else
1829 {
1830 ui_out_field_skip (uiout, "lstaddr");
1831 ui_out_field_skip (uiout, "lstsize");
1832 }
1833
1834 if (cmd_error_p)
1835 ui_out_field_string (uiout, "error_p", "*");
1836 else
1837 ui_out_field_skip (uiout, "error_p");
1838
1839 do_cleanups (cmd_chain);
1840
1841 if (!ui_out_is_mi_like_p (uiout))
1842 printf_filtered ("\n");
1843 }
1844
1845 do_cleanups (chain);
1846 }
1847
1848 static void
1849 info_spu_dma_command (char *args, int from_tty)
1850 {
1851 struct frame_info *frame = get_selected_frame (NULL);
1852 ULONGEST dma_info_type;
1853 ULONGEST dma_info_mask;
1854 ULONGEST dma_info_status;
1855 ULONGEST dma_info_stall_and_notify;
1856 ULONGEST dma_info_atomic_command_status;
1857 struct cleanup *chain;
1858 char annex[32];
1859 gdb_byte buf[1024];
1860 LONGEST len;
1861 int i, id;
1862
1863 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1864
1865 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
1866 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1867 buf, 0, 40 + 16 * 32);
1868 if (len <= 0)
1869 error (_("Could not read dma_info."));
1870
1871 dma_info_type = extract_unsigned_integer (buf, 8);
1872 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1873 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1874 dma_info_stall_and_notify = extract_unsigned_integer (buf + 24, 8);
1875 dma_info_atomic_command_status = extract_unsigned_integer (buf + 32, 8);
1876
1877 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
1878
1879 if (ui_out_is_mi_like_p (uiout))
1880 {
1881 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
1882 phex_nz (dma_info_type, 4));
1883 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
1884 phex_nz (dma_info_mask, 4));
1885 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
1886 phex_nz (dma_info_status, 4));
1887 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
1888 phex_nz (dma_info_stall_and_notify, 4));
1889 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
1890 phex_nz (dma_info_atomic_command_status, 4));
1891 }
1892 else
1893 {
1894 const char *query_msg;
1895
1896 switch (dma_info_type)
1897 {
1898 case 0: query_msg = _("no query pending"); break;
1899 case 1: query_msg = _("'any' query pending"); break;
1900 case 2: query_msg = _("'all' query pending"); break;
1901 default: query_msg = _("undefined query type"); break;
1902 }
1903
1904 printf_filtered (_("Tag-Group Status 0x%s\n"),
1905 phex (dma_info_status, 4));
1906 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1907 phex (dma_info_mask, 4), query_msg);
1908 printf_filtered (_("Stall-and-Notify 0x%s\n"),
1909 phex (dma_info_stall_and_notify, 4));
1910 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
1911 phex (dma_info_atomic_command_status, 4));
1912 printf_filtered ("\n");
1913 }
1914
1915 info_spu_dma_cmdlist (buf + 40, 16);
1916 do_cleanups (chain);
1917 }
1918
1919 static void
1920 info_spu_proxydma_command (char *args, int from_tty)
1921 {
1922 struct frame_info *frame = get_selected_frame (NULL);
1923 ULONGEST dma_info_type;
1924 ULONGEST dma_info_mask;
1925 ULONGEST dma_info_status;
1926 struct cleanup *chain;
1927 char annex[32];
1928 gdb_byte buf[1024];
1929 LONGEST len;
1930 int i, id;
1931
1932 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1933
1934 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
1935 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1936 buf, 0, 24 + 8 * 32);
1937 if (len <= 0)
1938 error (_("Could not read proxydma_info."));
1939
1940 dma_info_type = extract_unsigned_integer (buf, 8);
1941 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1942 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1943
1944 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
1945
1946 if (ui_out_is_mi_like_p (uiout))
1947 {
1948 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
1949 phex_nz (dma_info_type, 4));
1950 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
1951 phex_nz (dma_info_mask, 4));
1952 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
1953 phex_nz (dma_info_status, 4));
1954 }
1955 else
1956 {
1957 const char *query_msg;
1958
1959 switch (dma_info_type)
1960 {
1961 case 0: query_msg = _("no query pending"); break;
1962 case 1: query_msg = _("'any' query pending"); break;
1963 case 2: query_msg = _("'all' query pending"); break;
1964 default: query_msg = _("undefined query type"); break;
1965 }
1966
1967 printf_filtered (_("Tag-Group Status 0x%s\n"),
1968 phex (dma_info_status, 4));
1969 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1970 phex (dma_info_mask, 4), query_msg);
1971 printf_filtered ("\n");
1972 }
1973
1974 info_spu_dma_cmdlist (buf + 24, 8);
1975 do_cleanups (chain);
1976 }
1977
1978 static void
1979 info_spu_command (char *args, int from_tty)
1980 {
1981 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
1982 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
1983 }
1984
1985
1986 /* Set up gdbarch struct. */
1987
1988 static struct gdbarch *
1989 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
1990 {
1991 struct gdbarch *gdbarch;
1992 struct gdbarch_tdep *tdep;
1993
1994 /* Find a candidate among the list of pre-declared architectures. */
1995 arches = gdbarch_list_lookup_by_info (arches, &info);
1996 if (arches != NULL)
1997 return arches->gdbarch;
1998
1999 /* Is is for us? */
2000 if (info.bfd_arch_info->mach != bfd_mach_spu)
2001 return NULL;
2002
2003 /* Yes, create a new architecture. */
2004 tdep = XCALLOC (1, struct gdbarch_tdep);
2005 gdbarch = gdbarch_alloc (&info, tdep);
2006
2007 /* Disassembler. */
2008 set_gdbarch_print_insn (gdbarch, print_insn_spu);
2009
2010 /* Registers. */
2011 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2012 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2013 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2014 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2015 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2016 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2017 set_gdbarch_register_name (gdbarch, spu_register_name);
2018 set_gdbarch_register_type (gdbarch, spu_register_type);
2019 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2020 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2021 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2022 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2023
2024 /* Data types. */
2025 set_gdbarch_char_signed (gdbarch, 0);
2026 set_gdbarch_ptr_bit (gdbarch, 32);
2027 set_gdbarch_addr_bit (gdbarch, 32);
2028 set_gdbarch_short_bit (gdbarch, 16);
2029 set_gdbarch_int_bit (gdbarch, 32);
2030 set_gdbarch_long_bit (gdbarch, 32);
2031 set_gdbarch_long_long_bit (gdbarch, 64);
2032 set_gdbarch_float_bit (gdbarch, 32);
2033 set_gdbarch_double_bit (gdbarch, 64);
2034 set_gdbarch_long_double_bit (gdbarch, 64);
2035 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2036 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2037 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2038
2039 /* Address conversion. */
2040 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2041 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2042
2043 /* Inferior function calls. */
2044 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2045 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2046 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2047 set_gdbarch_unwind_dummy_id (gdbarch, spu_unwind_dummy_id);
2048 set_gdbarch_return_value (gdbarch, spu_return_value);
2049
2050 /* Frame handling. */
2051 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2052 frame_unwind_append_sniffer (gdbarch, spu_frame_sniffer);
2053 frame_base_set_default (gdbarch, &spu_frame_base);
2054 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2055 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2056 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2057 set_gdbarch_frame_args_skip (gdbarch, 0);
2058 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2059 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2060
2061 /* Breakpoints. */
2062 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2063 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2064 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2065 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2066
2067 /* Overlays. */
2068 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2069
2070 return gdbarch;
2071 }
2072
2073 void
2074 _initialize_spu_tdep (void)
2075 {
2076 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2077
2078 /* Add ourselves to objfile event chain. */
2079 observer_attach_new_objfile (spu_overlay_new_objfile);
2080 spu_overlay_data = register_objfile_data ();
2081
2082 /* Add root prefix command for all "info spu" commands. */
2083 add_prefix_cmd ("spu", class_info, info_spu_command,
2084 _("Various SPU specific commands."),
2085 &infospucmdlist, "info spu ", 0, &infolist);
2086
2087 /* Add various "info spu" commands. */
2088 add_cmd ("event", class_info, info_spu_event_command,
2089 _("Display SPU event facility status.\n"),
2090 &infospucmdlist);
2091 add_cmd ("signal", class_info, info_spu_signal_command,
2092 _("Display SPU signal notification facility status.\n"),
2093 &infospucmdlist);
2094 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2095 _("Display SPU mailbox facility status.\n"),
2096 &infospucmdlist);
2097 add_cmd ("dma", class_info, info_spu_dma_command,
2098 _("Display MFC DMA status.\n"),
2099 &infospucmdlist);
2100 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2101 _("Display MFC Proxy-DMA status.\n"),
2102 &infospucmdlist);
2103 }
This page took 0.074078 seconds and 4 git commands to generate.