2007-11-02 Markus Deuling <deuling@de.ibm.com>
[deliverable/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* SPU-specific vector type. */
52 struct type *spu_builtin_type_vec128;
53 };
54
55
56 /* SPU-specific vector type. */
57 static struct type *
58 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
59 {
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
61
62 if (!tdep->spu_builtin_type_vec128)
63 {
64 struct type *t;
65
66 t = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
67 append_composite_type_field (t, "uint128", builtin_type_int128);
68 append_composite_type_field (t, "v2_int64",
69 init_vector_type (builtin_type_int64, 2));
70 append_composite_type_field (t, "v4_int32",
71 init_vector_type (builtin_type_int32, 4));
72 append_composite_type_field (t, "v8_int16",
73 init_vector_type (builtin_type_int16, 8));
74 append_composite_type_field (t, "v16_int8",
75 init_vector_type (builtin_type_int8, 16));
76 append_composite_type_field (t, "v2_double",
77 init_vector_type (builtin_type_double, 2));
78 append_composite_type_field (t, "v4_float",
79 init_vector_type (builtin_type_float, 4));
80
81 TYPE_FLAGS (t) |= TYPE_FLAG_VECTOR;
82 TYPE_NAME (t) = "spu_builtin_type_vec128";
83
84 tdep->spu_builtin_type_vec128 = t;
85 }
86
87 return tdep->spu_builtin_type_vec128;
88 }
89
90
91 /* The list of available "info spu " commands. */
92 static struct cmd_list_element *infospucmdlist = NULL;
93
94 /* Registers. */
95
96 static const char *
97 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
98 {
99 static char *register_names[] =
100 {
101 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
102 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
103 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
104 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
105 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
106 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
107 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
108 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
109 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
110 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
111 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
112 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
113 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
114 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
115 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
116 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
117 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
118 };
119
120 if (reg_nr < 0)
121 return NULL;
122 if (reg_nr >= sizeof register_names / sizeof *register_names)
123 return NULL;
124
125 return register_names[reg_nr];
126 }
127
128 static struct type *
129 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
130 {
131 if (reg_nr < SPU_NUM_GPRS)
132 return spu_builtin_type_vec128 (gdbarch);
133
134 switch (reg_nr)
135 {
136 case SPU_ID_REGNUM:
137 return builtin_type_uint32;
138
139 case SPU_PC_REGNUM:
140 return builtin_type_void_func_ptr;
141
142 case SPU_SP_REGNUM:
143 return builtin_type_void_data_ptr;
144
145 case SPU_FPSCR_REGNUM:
146 return builtin_type_uint128;
147
148 case SPU_SRR0_REGNUM:
149 return builtin_type_uint32;
150
151 case SPU_LSLR_REGNUM:
152 return builtin_type_uint32;
153
154 case SPU_DECR_REGNUM:
155 return builtin_type_uint32;
156
157 case SPU_DECR_STATUS_REGNUM:
158 return builtin_type_uint32;
159
160 default:
161 internal_error (__FILE__, __LINE__, "invalid regnum");
162 }
163 }
164
165 /* Pseudo registers for preferred slots - stack pointer. */
166
167 static void
168 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
169 gdb_byte *buf)
170 {
171 gdb_byte reg[32];
172 char annex[32];
173 ULONGEST id;
174
175 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
176 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
177 memset (reg, 0, sizeof reg);
178 target_read (&current_target, TARGET_OBJECT_SPU, annex,
179 reg, 0, sizeof reg);
180
181 store_unsigned_integer (buf, 4, strtoulst (reg, NULL, 16));
182 }
183
184 static void
185 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
186 int regnum, gdb_byte *buf)
187 {
188 gdb_byte reg[16];
189 char annex[32];
190 ULONGEST id;
191
192 switch (regnum)
193 {
194 case SPU_SP_REGNUM:
195 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
196 memcpy (buf, reg, 4);
197 break;
198
199 case SPU_FPSCR_REGNUM:
200 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
201 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
202 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
203 break;
204
205 case SPU_SRR0_REGNUM:
206 spu_pseudo_register_read_spu (regcache, "srr0", buf);
207 break;
208
209 case SPU_LSLR_REGNUM:
210 spu_pseudo_register_read_spu (regcache, "lslr", buf);
211 break;
212
213 case SPU_DECR_REGNUM:
214 spu_pseudo_register_read_spu (regcache, "decr", buf);
215 break;
216
217 case SPU_DECR_STATUS_REGNUM:
218 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
219 break;
220
221 default:
222 internal_error (__FILE__, __LINE__, _("invalid regnum"));
223 }
224 }
225
226 static void
227 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
228 const gdb_byte *buf)
229 {
230 gdb_byte reg[32];
231 char annex[32];
232 ULONGEST id;
233
234 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
235 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
236 xsnprintf (reg, sizeof reg, "0x%s",
237 phex_nz (extract_unsigned_integer (buf, 4), 4));
238 target_write (&current_target, TARGET_OBJECT_SPU, annex,
239 reg, 0, strlen (reg));
240 }
241
242 static void
243 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
244 int regnum, const gdb_byte *buf)
245 {
246 gdb_byte reg[16];
247 char annex[32];
248 ULONGEST id;
249
250 switch (regnum)
251 {
252 case SPU_SP_REGNUM:
253 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
254 memcpy (reg, buf, 4);
255 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
256 break;
257
258 case SPU_FPSCR_REGNUM:
259 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
260 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
261 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
262 break;
263
264 case SPU_SRR0_REGNUM:
265 spu_pseudo_register_write_spu (regcache, "srr0", buf);
266 break;
267
268 case SPU_LSLR_REGNUM:
269 spu_pseudo_register_write_spu (regcache, "lslr", buf);
270 break;
271
272 case SPU_DECR_REGNUM:
273 spu_pseudo_register_write_spu (regcache, "decr", buf);
274 break;
275
276 case SPU_DECR_STATUS_REGNUM:
277 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
278 break;
279
280 default:
281 internal_error (__FILE__, __LINE__, _("invalid regnum"));
282 }
283 }
284
285 /* Value conversion -- access scalar values at the preferred slot. */
286
287 static struct value *
288 spu_value_from_register (struct type *type, int regnum,
289 struct frame_info *frame)
290 {
291 struct value *value = default_value_from_register (type, regnum, frame);
292 int len = TYPE_LENGTH (type);
293
294 if (regnum < SPU_NUM_GPRS && len < 16)
295 {
296 int preferred_slot = len < 4 ? 4 - len : 0;
297 set_value_offset (value, preferred_slot);
298 }
299
300 return value;
301 }
302
303 /* Register groups. */
304
305 static int
306 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
307 struct reggroup *group)
308 {
309 /* Registers displayed via 'info regs'. */
310 if (group == general_reggroup)
311 return 1;
312
313 /* Registers displayed via 'info float'. */
314 if (group == float_reggroup)
315 return 0;
316
317 /* Registers that need to be saved/restored in order to
318 push or pop frames. */
319 if (group == save_reggroup || group == restore_reggroup)
320 return 1;
321
322 return default_register_reggroup_p (gdbarch, regnum, group);
323 }
324
325 /* Address conversion. */
326
327 static CORE_ADDR
328 spu_pointer_to_address (struct type *type, const gdb_byte *buf)
329 {
330 ULONGEST addr = extract_unsigned_integer (buf, TYPE_LENGTH (type));
331 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
332
333 if (target_has_registers && target_has_stack && target_has_memory)
334 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
335 SPU_LSLR_REGNUM);
336
337 return addr & lslr;
338 }
339
340 static CORE_ADDR
341 spu_integer_to_address (struct gdbarch *gdbarch,
342 struct type *type, const gdb_byte *buf)
343 {
344 ULONGEST addr = unpack_long (type, buf);
345 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
346
347 if (target_has_registers && target_has_stack && target_has_memory)
348 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
349 SPU_LSLR_REGNUM);
350
351 return addr & lslr;
352 }
353
354
355 /* Decoding SPU instructions. */
356
357 enum
358 {
359 op_lqd = 0x34,
360 op_lqx = 0x3c4,
361 op_lqa = 0x61,
362 op_lqr = 0x67,
363 op_stqd = 0x24,
364 op_stqx = 0x144,
365 op_stqa = 0x41,
366 op_stqr = 0x47,
367
368 op_il = 0x081,
369 op_ila = 0x21,
370 op_a = 0x0c0,
371 op_ai = 0x1c,
372
373 op_selb = 0x4,
374
375 op_br = 0x64,
376 op_bra = 0x60,
377 op_brsl = 0x66,
378 op_brasl = 0x62,
379 op_brnz = 0x42,
380 op_brz = 0x40,
381 op_brhnz = 0x46,
382 op_brhz = 0x44,
383 op_bi = 0x1a8,
384 op_bisl = 0x1a9,
385 op_biz = 0x128,
386 op_binz = 0x129,
387 op_bihz = 0x12a,
388 op_bihnz = 0x12b,
389 };
390
391 static int
392 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
393 {
394 if ((insn >> 21) == op)
395 {
396 *rt = insn & 127;
397 *ra = (insn >> 7) & 127;
398 *rb = (insn >> 14) & 127;
399 return 1;
400 }
401
402 return 0;
403 }
404
405 static int
406 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
407 {
408 if ((insn >> 28) == op)
409 {
410 *rt = (insn >> 21) & 127;
411 *ra = (insn >> 7) & 127;
412 *rb = (insn >> 14) & 127;
413 *rc = insn & 127;
414 return 1;
415 }
416
417 return 0;
418 }
419
420 static int
421 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
422 {
423 if ((insn >> 21) == op)
424 {
425 *rt = insn & 127;
426 *ra = (insn >> 7) & 127;
427 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
428 return 1;
429 }
430
431 return 0;
432 }
433
434 static int
435 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
436 {
437 if ((insn >> 24) == op)
438 {
439 *rt = insn & 127;
440 *ra = (insn >> 7) & 127;
441 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
442 return 1;
443 }
444
445 return 0;
446 }
447
448 static int
449 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
450 {
451 if ((insn >> 23) == op)
452 {
453 *rt = insn & 127;
454 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
455 return 1;
456 }
457
458 return 0;
459 }
460
461 static int
462 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
463 {
464 if ((insn >> 25) == op)
465 {
466 *rt = insn & 127;
467 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
468 return 1;
469 }
470
471 return 0;
472 }
473
474 static int
475 is_branch (unsigned int insn, int *offset, int *reg)
476 {
477 int rt, i7, i16;
478
479 if (is_ri16 (insn, op_br, &rt, &i16)
480 || is_ri16 (insn, op_brsl, &rt, &i16)
481 || is_ri16 (insn, op_brnz, &rt, &i16)
482 || is_ri16 (insn, op_brz, &rt, &i16)
483 || is_ri16 (insn, op_brhnz, &rt, &i16)
484 || is_ri16 (insn, op_brhz, &rt, &i16))
485 {
486 *reg = SPU_PC_REGNUM;
487 *offset = i16 << 2;
488 return 1;
489 }
490
491 if (is_ri16 (insn, op_bra, &rt, &i16)
492 || is_ri16 (insn, op_brasl, &rt, &i16))
493 {
494 *reg = -1;
495 *offset = i16 << 2;
496 return 1;
497 }
498
499 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
500 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
501 || is_ri7 (insn, op_biz, &rt, reg, &i7)
502 || is_ri7 (insn, op_binz, &rt, reg, &i7)
503 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
504 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
505 {
506 *offset = 0;
507 return 1;
508 }
509
510 return 0;
511 }
512
513
514 /* Prolog parsing. */
515
516 struct spu_prologue_data
517 {
518 /* Stack frame size. -1 if analysis was unsuccessful. */
519 int size;
520
521 /* How to find the CFA. The CFA is equal to SP at function entry. */
522 int cfa_reg;
523 int cfa_offset;
524
525 /* Offset relative to CFA where a register is saved. -1 if invalid. */
526 int reg_offset[SPU_NUM_GPRS];
527 };
528
529 static CORE_ADDR
530 spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
531 struct spu_prologue_data *data)
532 {
533 int found_sp = 0;
534 int found_fp = 0;
535 int found_lr = 0;
536 int reg_immed[SPU_NUM_GPRS];
537 gdb_byte buf[16];
538 CORE_ADDR prolog_pc = start_pc;
539 CORE_ADDR pc;
540 int i;
541
542
543 /* Initialize DATA to default values. */
544 data->size = -1;
545
546 data->cfa_reg = SPU_RAW_SP_REGNUM;
547 data->cfa_offset = 0;
548
549 for (i = 0; i < SPU_NUM_GPRS; i++)
550 data->reg_offset[i] = -1;
551
552 /* Set up REG_IMMED array. This is non-zero for a register if we know its
553 preferred slot currently holds this immediate value. */
554 for (i = 0; i < SPU_NUM_GPRS; i++)
555 reg_immed[i] = 0;
556
557 /* Scan instructions until the first branch.
558
559 The following instructions are important prolog components:
560
561 - The first instruction to set up the stack pointer.
562 - The first instruction to set up the frame pointer.
563 - The first instruction to save the link register.
564
565 We return the instruction after the latest of these three,
566 or the incoming PC if none is found. The first instruction
567 to set up the stack pointer also defines the frame size.
568
569 Note that instructions saving incoming arguments to their stack
570 slots are not counted as important, because they are hard to
571 identify with certainty. This should not matter much, because
572 arguments are relevant only in code compiled with debug data,
573 and in such code the GDB core will advance until the first source
574 line anyway, using SAL data.
575
576 For purposes of stack unwinding, we analyze the following types
577 of instructions in addition:
578
579 - Any instruction adding to the current frame pointer.
580 - Any instruction loading an immediate constant into a register.
581 - Any instruction storing a register onto the stack.
582
583 These are used to compute the CFA and REG_OFFSET output. */
584
585 for (pc = start_pc; pc < end_pc; pc += 4)
586 {
587 unsigned int insn;
588 int rt, ra, rb, rc, immed;
589
590 if (target_read_memory (pc, buf, 4))
591 break;
592 insn = extract_unsigned_integer (buf, 4);
593
594 /* AI is the typical instruction to set up a stack frame.
595 It is also used to initialize the frame pointer. */
596 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
597 {
598 if (rt == data->cfa_reg && ra == data->cfa_reg)
599 data->cfa_offset -= immed;
600
601 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
602 && !found_sp)
603 {
604 found_sp = 1;
605 prolog_pc = pc + 4;
606
607 data->size = -immed;
608 }
609 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
610 && !found_fp)
611 {
612 found_fp = 1;
613 prolog_pc = pc + 4;
614
615 data->cfa_reg = SPU_FP_REGNUM;
616 data->cfa_offset -= immed;
617 }
618 }
619
620 /* A is used to set up stack frames of size >= 512 bytes.
621 If we have tracked the contents of the addend register,
622 we can handle this as well. */
623 else if (is_rr (insn, op_a, &rt, &ra, &rb))
624 {
625 if (rt == data->cfa_reg && ra == data->cfa_reg)
626 {
627 if (reg_immed[rb] != 0)
628 data->cfa_offset -= reg_immed[rb];
629 else
630 data->cfa_reg = -1; /* We don't know the CFA any more. */
631 }
632
633 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
634 && !found_sp)
635 {
636 found_sp = 1;
637 prolog_pc = pc + 4;
638
639 if (reg_immed[rb] != 0)
640 data->size = -reg_immed[rb];
641 }
642 }
643
644 /* We need to track IL and ILA used to load immediate constants
645 in case they are later used as input to an A instruction. */
646 else if (is_ri16 (insn, op_il, &rt, &immed))
647 {
648 reg_immed[rt] = immed;
649
650 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
651 found_sp = 1;
652 }
653
654 else if (is_ri18 (insn, op_ila, &rt, &immed))
655 {
656 reg_immed[rt] = immed & 0x3ffff;
657
658 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
659 found_sp = 1;
660 }
661
662 /* STQD is used to save registers to the stack. */
663 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
664 {
665 if (ra == data->cfa_reg)
666 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
667
668 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
669 && !found_lr)
670 {
671 found_lr = 1;
672 prolog_pc = pc + 4;
673 }
674 }
675
676 /* _start uses SELB to set up the stack pointer. */
677 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
678 {
679 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
680 found_sp = 1;
681 }
682
683 /* We terminate if we find a branch. */
684 else if (is_branch (insn, &immed, &ra))
685 break;
686 }
687
688
689 /* If we successfully parsed until here, and didn't find any instruction
690 modifying SP, we assume we have a frameless function. */
691 if (!found_sp)
692 data->size = 0;
693
694 /* Return cooked instead of raw SP. */
695 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
696 data->cfa_reg = SPU_SP_REGNUM;
697
698 return prolog_pc;
699 }
700
701 /* Return the first instruction after the prologue starting at PC. */
702 static CORE_ADDR
703 spu_skip_prologue (CORE_ADDR pc)
704 {
705 struct spu_prologue_data data;
706 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
707 }
708
709 /* Return the frame pointer in use at address PC. */
710 static void
711 spu_virtual_frame_pointer (CORE_ADDR pc, int *reg, LONGEST *offset)
712 {
713 struct spu_prologue_data data;
714 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
715
716 if (data.size != -1 && data.cfa_reg != -1)
717 {
718 /* The 'frame pointer' address is CFA minus frame size. */
719 *reg = data.cfa_reg;
720 *offset = data.cfa_offset - data.size;
721 }
722 else
723 {
724 /* ??? We don't really know ... */
725 *reg = SPU_SP_REGNUM;
726 *offset = 0;
727 }
728 }
729
730 /* Return true if we are in the function's epilogue, i.e. after the
731 instruction that destroyed the function's stack frame.
732
733 1) scan forward from the point of execution:
734 a) If you find an instruction that modifies the stack pointer
735 or transfers control (except a return), execution is not in
736 an epilogue, return.
737 b) Stop scanning if you find a return instruction or reach the
738 end of the function or reach the hard limit for the size of
739 an epilogue.
740 2) scan backward from the point of execution:
741 a) If you find an instruction that modifies the stack pointer,
742 execution *is* in an epilogue, return.
743 b) Stop scanning if you reach an instruction that transfers
744 control or the beginning of the function or reach the hard
745 limit for the size of an epilogue. */
746
747 static int
748 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
749 {
750 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
751 bfd_byte buf[4];
752 unsigned int insn;
753 int rt, ra, rb, rc, immed;
754
755 /* Find the search limits based on function boundaries and hard limit.
756 We assume the epilogue can be up to 64 instructions long. */
757
758 const int spu_max_epilogue_size = 64 * 4;
759
760 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
761 return 0;
762
763 if (pc - func_start < spu_max_epilogue_size)
764 epilogue_start = func_start;
765 else
766 epilogue_start = pc - spu_max_epilogue_size;
767
768 if (func_end - pc < spu_max_epilogue_size)
769 epilogue_end = func_end;
770 else
771 epilogue_end = pc + spu_max_epilogue_size;
772
773 /* Scan forward until next 'bi $0'. */
774
775 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
776 {
777 if (target_read_memory (scan_pc, buf, 4))
778 return 0;
779 insn = extract_unsigned_integer (buf, 4);
780
781 if (is_branch (insn, &immed, &ra))
782 {
783 if (immed == 0 && ra == SPU_LR_REGNUM)
784 break;
785
786 return 0;
787 }
788
789 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
790 || is_rr (insn, op_a, &rt, &ra, &rb)
791 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
792 {
793 if (rt == SPU_RAW_SP_REGNUM)
794 return 0;
795 }
796 }
797
798 if (scan_pc >= epilogue_end)
799 return 0;
800
801 /* Scan backward until adjustment to stack pointer (R1). */
802
803 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
804 {
805 if (target_read_memory (scan_pc, buf, 4))
806 return 0;
807 insn = extract_unsigned_integer (buf, 4);
808
809 if (is_branch (insn, &immed, &ra))
810 return 0;
811
812 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
813 || is_rr (insn, op_a, &rt, &ra, &rb)
814 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
815 {
816 if (rt == SPU_RAW_SP_REGNUM)
817 return 1;
818 }
819 }
820
821 return 0;
822 }
823
824
825 /* Normal stack frames. */
826
827 struct spu_unwind_cache
828 {
829 CORE_ADDR func;
830 CORE_ADDR frame_base;
831 CORE_ADDR local_base;
832
833 struct trad_frame_saved_reg *saved_regs;
834 };
835
836 static struct spu_unwind_cache *
837 spu_frame_unwind_cache (struct frame_info *next_frame,
838 void **this_prologue_cache)
839 {
840 struct spu_unwind_cache *info;
841 struct spu_prologue_data data;
842 gdb_byte buf[16];
843
844 if (*this_prologue_cache)
845 return *this_prologue_cache;
846
847 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
848 *this_prologue_cache = info;
849 info->saved_regs = trad_frame_alloc_saved_regs (next_frame);
850 info->frame_base = 0;
851 info->local_base = 0;
852
853 /* Find the start of the current function, and analyze its prologue. */
854 info->func = frame_func_unwind (next_frame, NORMAL_FRAME);
855 if (info->func == 0)
856 {
857 /* Fall back to using the current PC as frame ID. */
858 info->func = frame_pc_unwind (next_frame);
859 data.size = -1;
860 }
861 else
862 spu_analyze_prologue (info->func, frame_pc_unwind (next_frame), &data);
863
864
865 /* If successful, use prologue analysis data. */
866 if (data.size != -1 && data.cfa_reg != -1)
867 {
868 CORE_ADDR cfa;
869 int i;
870
871 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
872 frame_unwind_register (next_frame, data.cfa_reg, buf);
873 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
874
875 /* Call-saved register slots. */
876 for (i = 0; i < SPU_NUM_GPRS; i++)
877 if (i == SPU_LR_REGNUM
878 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
879 if (data.reg_offset[i] != -1)
880 info->saved_regs[i].addr = cfa - data.reg_offset[i];
881
882 /* Frame bases. */
883 info->frame_base = cfa;
884 info->local_base = cfa - data.size;
885 }
886
887 /* Otherwise, fall back to reading the backchain link. */
888 else
889 {
890 CORE_ADDR reg, backchain;
891
892 /* Get the backchain. */
893 reg = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
894 backchain = read_memory_unsigned_integer (reg, 4);
895
896 /* A zero backchain terminates the frame chain. Also, sanity
897 check against the local store size limit. */
898 if (backchain != 0 && backchain < SPU_LS_SIZE)
899 {
900 /* Assume the link register is saved into its slot. */
901 if (backchain + 16 < SPU_LS_SIZE)
902 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
903
904 /* Frame bases. */
905 info->frame_base = backchain;
906 info->local_base = reg;
907 }
908 }
909
910 /* The previous SP is equal to the CFA. */
911 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
912
913 /* Read full contents of the unwound link register in order to
914 be able to determine the return address. */
915 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
916 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
917 else
918 frame_unwind_register (next_frame, SPU_LR_REGNUM, buf);
919
920 /* Normally, the return address is contained in the slot 0 of the
921 link register, and slots 1-3 are zero. For an overlay return,
922 slot 0 contains the address of the overlay manager return stub,
923 slot 1 contains the partition number of the overlay section to
924 be returned to, and slot 2 contains the return address within
925 that section. Return the latter address in that case. */
926 if (extract_unsigned_integer (buf + 8, 4) != 0)
927 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
928 extract_unsigned_integer (buf + 8, 4));
929 else
930 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
931 extract_unsigned_integer (buf, 4));
932
933 return info;
934 }
935
936 static void
937 spu_frame_this_id (struct frame_info *next_frame,
938 void **this_prologue_cache, struct frame_id *this_id)
939 {
940 struct spu_unwind_cache *info =
941 spu_frame_unwind_cache (next_frame, this_prologue_cache);
942
943 if (info->frame_base == 0)
944 return;
945
946 *this_id = frame_id_build (info->frame_base, info->func);
947 }
948
949 static void
950 spu_frame_prev_register (struct frame_info *next_frame,
951 void **this_prologue_cache,
952 int regnum, int *optimizedp,
953 enum lval_type *lvalp, CORE_ADDR * addrp,
954 int *realnump, gdb_byte *bufferp)
955 {
956 struct spu_unwind_cache *info
957 = spu_frame_unwind_cache (next_frame, this_prologue_cache);
958
959 /* Special-case the stack pointer. */
960 if (regnum == SPU_RAW_SP_REGNUM)
961 regnum = SPU_SP_REGNUM;
962
963 trad_frame_get_prev_register (next_frame, info->saved_regs, regnum,
964 optimizedp, lvalp, addrp, realnump, bufferp);
965 }
966
967 static const struct frame_unwind spu_frame_unwind = {
968 NORMAL_FRAME,
969 spu_frame_this_id,
970 spu_frame_prev_register
971 };
972
973 const struct frame_unwind *
974 spu_frame_sniffer (struct frame_info *next_frame)
975 {
976 return &spu_frame_unwind;
977 }
978
979 static CORE_ADDR
980 spu_frame_base_address (struct frame_info *next_frame, void **this_cache)
981 {
982 struct spu_unwind_cache *info
983 = spu_frame_unwind_cache (next_frame, this_cache);
984 return info->local_base;
985 }
986
987 static const struct frame_base spu_frame_base = {
988 &spu_frame_unwind,
989 spu_frame_base_address,
990 spu_frame_base_address,
991 spu_frame_base_address
992 };
993
994 static CORE_ADDR
995 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
996 {
997 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
998 /* Mask off interrupt enable bit. */
999 return pc & -4;
1000 }
1001
1002 static CORE_ADDR
1003 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1004 {
1005 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1006 }
1007
1008 static CORE_ADDR
1009 spu_read_pc (struct regcache *regcache)
1010 {
1011 ULONGEST pc;
1012 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1013 /* Mask off interrupt enable bit. */
1014 return pc & -4;
1015 }
1016
1017 static void
1018 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1019 {
1020 /* Keep interrupt enabled state unchanged. */
1021 ULONGEST old_pc;
1022 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1023 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1024 (pc & -4) | (old_pc & 3));
1025 }
1026
1027
1028 /* Function calling convention. */
1029
1030 static CORE_ADDR
1031 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1032 {
1033 return sp & ~15;
1034 }
1035
1036 static int
1037 spu_scalar_value_p (struct type *type)
1038 {
1039 switch (TYPE_CODE (type))
1040 {
1041 case TYPE_CODE_INT:
1042 case TYPE_CODE_ENUM:
1043 case TYPE_CODE_RANGE:
1044 case TYPE_CODE_CHAR:
1045 case TYPE_CODE_BOOL:
1046 case TYPE_CODE_PTR:
1047 case TYPE_CODE_REF:
1048 return TYPE_LENGTH (type) <= 16;
1049
1050 default:
1051 return 0;
1052 }
1053 }
1054
1055 static void
1056 spu_value_to_regcache (struct regcache *regcache, int regnum,
1057 struct type *type, const gdb_byte *in)
1058 {
1059 int len = TYPE_LENGTH (type);
1060
1061 if (spu_scalar_value_p (type))
1062 {
1063 int preferred_slot = len < 4 ? 4 - len : 0;
1064 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1065 }
1066 else
1067 {
1068 while (len >= 16)
1069 {
1070 regcache_cooked_write (regcache, regnum++, in);
1071 in += 16;
1072 len -= 16;
1073 }
1074
1075 if (len > 0)
1076 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1077 }
1078 }
1079
1080 static void
1081 spu_regcache_to_value (struct regcache *regcache, int regnum,
1082 struct type *type, gdb_byte *out)
1083 {
1084 int len = TYPE_LENGTH (type);
1085
1086 if (spu_scalar_value_p (type))
1087 {
1088 int preferred_slot = len < 4 ? 4 - len : 0;
1089 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1090 }
1091 else
1092 {
1093 while (len >= 16)
1094 {
1095 regcache_cooked_read (regcache, regnum++, out);
1096 out += 16;
1097 len -= 16;
1098 }
1099
1100 if (len > 0)
1101 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1102 }
1103 }
1104
1105 static CORE_ADDR
1106 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1107 struct regcache *regcache, CORE_ADDR bp_addr,
1108 int nargs, struct value **args, CORE_ADDR sp,
1109 int struct_return, CORE_ADDR struct_addr)
1110 {
1111 int i;
1112 int regnum = SPU_ARG1_REGNUM;
1113 int stack_arg = -1;
1114 gdb_byte buf[16];
1115
1116 /* Set the return address. */
1117 memset (buf, 0, sizeof buf);
1118 store_unsigned_integer (buf, 4, bp_addr);
1119 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1120
1121 /* If STRUCT_RETURN is true, then the struct return address (in
1122 STRUCT_ADDR) will consume the first argument-passing register.
1123 Both adjust the register count and store that value. */
1124 if (struct_return)
1125 {
1126 memset (buf, 0, sizeof buf);
1127 store_unsigned_integer (buf, 4, struct_addr);
1128 regcache_cooked_write (regcache, regnum++, buf);
1129 }
1130
1131 /* Fill in argument registers. */
1132 for (i = 0; i < nargs; i++)
1133 {
1134 struct value *arg = args[i];
1135 struct type *type = check_typedef (value_type (arg));
1136 const gdb_byte *contents = value_contents (arg);
1137 int len = TYPE_LENGTH (type);
1138 int n_regs = align_up (len, 16) / 16;
1139
1140 /* If the argument doesn't wholly fit into registers, it and
1141 all subsequent arguments go to the stack. */
1142 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1143 {
1144 stack_arg = i;
1145 break;
1146 }
1147
1148 spu_value_to_regcache (regcache, regnum, type, contents);
1149 regnum += n_regs;
1150 }
1151
1152 /* Overflow arguments go to the stack. */
1153 if (stack_arg != -1)
1154 {
1155 CORE_ADDR ap;
1156
1157 /* Allocate all required stack size. */
1158 for (i = stack_arg; i < nargs; i++)
1159 {
1160 struct type *type = check_typedef (value_type (args[i]));
1161 sp -= align_up (TYPE_LENGTH (type), 16);
1162 }
1163
1164 /* Fill in stack arguments. */
1165 ap = sp;
1166 for (i = stack_arg; i < nargs; i++)
1167 {
1168 struct value *arg = args[i];
1169 struct type *type = check_typedef (value_type (arg));
1170 int len = TYPE_LENGTH (type);
1171 int preferred_slot;
1172
1173 if (spu_scalar_value_p (type))
1174 preferred_slot = len < 4 ? 4 - len : 0;
1175 else
1176 preferred_slot = 0;
1177
1178 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1179 ap += align_up (TYPE_LENGTH (type), 16);
1180 }
1181 }
1182
1183 /* Allocate stack frame header. */
1184 sp -= 32;
1185
1186 /* Store stack back chain. */
1187 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1188 target_write_memory (sp, buf, 16);
1189
1190 /* Finally, update the SP register. */
1191 regcache_cooked_write_unsigned (regcache, SPU_SP_REGNUM, sp);
1192
1193 return sp;
1194 }
1195
1196 static struct frame_id
1197 spu_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
1198 {
1199 return frame_id_build (spu_unwind_sp (gdbarch, next_frame),
1200 spu_unwind_pc (gdbarch, next_frame));
1201 }
1202
1203 /* Function return value access. */
1204
1205 static enum return_value_convention
1206 spu_return_value (struct gdbarch *gdbarch, struct type *type,
1207 struct regcache *regcache, gdb_byte *out, const gdb_byte *in)
1208 {
1209 enum return_value_convention rvc;
1210
1211 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1212 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1213 else
1214 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1215
1216 if (in)
1217 {
1218 switch (rvc)
1219 {
1220 case RETURN_VALUE_REGISTER_CONVENTION:
1221 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1222 break;
1223
1224 case RETURN_VALUE_STRUCT_CONVENTION:
1225 error ("Cannot set function return value.");
1226 break;
1227 }
1228 }
1229 else if (out)
1230 {
1231 switch (rvc)
1232 {
1233 case RETURN_VALUE_REGISTER_CONVENTION:
1234 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1235 break;
1236
1237 case RETURN_VALUE_STRUCT_CONVENTION:
1238 error ("Function return value unknown.");
1239 break;
1240 }
1241 }
1242
1243 return rvc;
1244 }
1245
1246
1247 /* Breakpoints. */
1248
1249 static const gdb_byte *
1250 spu_breakpoint_from_pc (CORE_ADDR * pcptr, int *lenptr)
1251 {
1252 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1253
1254 *lenptr = sizeof breakpoint;
1255 return breakpoint;
1256 }
1257
1258
1259 /* Software single-stepping support. */
1260
1261 int
1262 spu_software_single_step (struct frame_info *frame)
1263 {
1264 CORE_ADDR pc, next_pc;
1265 unsigned int insn;
1266 int offset, reg;
1267 gdb_byte buf[4];
1268
1269 pc = get_frame_pc (frame);
1270
1271 if (target_read_memory (pc, buf, 4))
1272 return 1;
1273 insn = extract_unsigned_integer (buf, 4);
1274
1275 /* Next sequential instruction is at PC + 4, except if the current
1276 instruction is a PPE-assisted call, in which case it is at PC + 8.
1277 Wrap around LS limit to be on the safe side. */
1278 if ((insn & 0xffffff00) == 0x00002100)
1279 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1280 else
1281 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1282
1283 insert_single_step_breakpoint (next_pc);
1284
1285 if (is_branch (insn, &offset, &reg))
1286 {
1287 CORE_ADDR target = offset;
1288
1289 if (reg == SPU_PC_REGNUM)
1290 target += pc;
1291 else if (reg != -1)
1292 {
1293 get_frame_register_bytes (frame, reg, 0, 4, buf);
1294 target += extract_unsigned_integer (buf, 4) & -4;
1295 }
1296
1297 target = target & (SPU_LS_SIZE - 1);
1298 if (target != next_pc)
1299 insert_single_step_breakpoint (target);
1300 }
1301
1302 return 1;
1303 }
1304
1305 /* Target overlays for the SPU overlay manager.
1306
1307 See the documentation of simple_overlay_update for how the
1308 interface is supposed to work.
1309
1310 Data structures used by the overlay manager:
1311
1312 struct ovly_table
1313 {
1314 u32 vma;
1315 u32 size;
1316 u32 pos;
1317 u32 buf;
1318 } _ovly_table[]; -- one entry per overlay section
1319
1320 struct ovly_buf_table
1321 {
1322 u32 mapped;
1323 } _ovly_buf_table[]; -- one entry per overlay buffer
1324
1325 _ovly_table should never change.
1326
1327 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1328 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1329 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1330
1331 mapped is an index into _ovly_table. Both the mapped and buf indices start
1332 from one to reference the first entry in their respective tables. */
1333
1334 /* Using the per-objfile private data mechanism, we store for each
1335 objfile an array of "struct spu_overlay_table" structures, one
1336 for each obj_section of the objfile. This structure holds two
1337 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1338 is *not* an overlay section. If it is non-zero, it represents
1339 a target address. The overlay section is mapped iff the target
1340 integer at this location equals MAPPED_VAL. */
1341
1342 static const struct objfile_data *spu_overlay_data;
1343
1344 struct spu_overlay_table
1345 {
1346 CORE_ADDR mapped_ptr;
1347 CORE_ADDR mapped_val;
1348 };
1349
1350 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1351 the _ovly_table data structure from the target and initialize the
1352 spu_overlay_table data structure from it. */
1353 static struct spu_overlay_table *
1354 spu_get_overlay_table (struct objfile *objfile)
1355 {
1356 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1357 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1358 unsigned ovly_table_size, ovly_buf_table_size;
1359 struct spu_overlay_table *tbl;
1360 struct obj_section *osect;
1361 char *ovly_table;
1362 int i;
1363
1364 tbl = objfile_data (objfile, spu_overlay_data);
1365 if (tbl)
1366 return tbl;
1367
1368 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1369 if (!ovly_table_msym)
1370 return NULL;
1371
1372 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1373 if (!ovly_buf_table_msym)
1374 return NULL;
1375
1376 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1377 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1378
1379 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1380 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1381
1382 ovly_table = xmalloc (ovly_table_size);
1383 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1384
1385 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1386 objfile->sections_end - objfile->sections,
1387 struct spu_overlay_table);
1388
1389 for (i = 0; i < ovly_table_size / 16; i++)
1390 {
1391 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1392 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1393 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1394 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1395
1396 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1397 continue;
1398
1399 ALL_OBJFILE_OSECTIONS (objfile, osect)
1400 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1401 && pos == osect->the_bfd_section->filepos)
1402 {
1403 int ndx = osect - objfile->sections;
1404 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1405 tbl[ndx].mapped_val = i + 1;
1406 break;
1407 }
1408 }
1409
1410 xfree (ovly_table);
1411 set_objfile_data (objfile, spu_overlay_data, tbl);
1412 return tbl;
1413 }
1414
1415 /* Read _ovly_buf_table entry from the target to dermine whether
1416 OSECT is currently mapped, and update the mapped state. */
1417 static void
1418 spu_overlay_update_osect (struct obj_section *osect)
1419 {
1420 struct spu_overlay_table *ovly_table;
1421 CORE_ADDR val;
1422
1423 ovly_table = spu_get_overlay_table (osect->objfile);
1424 if (!ovly_table)
1425 return;
1426
1427 ovly_table += osect - osect->objfile->sections;
1428 if (ovly_table->mapped_ptr == 0)
1429 return;
1430
1431 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1432 osect->ovly_mapped = (val == ovly_table->mapped_val);
1433 }
1434
1435 /* If OSECT is NULL, then update all sections' mapped state.
1436 If OSECT is non-NULL, then update only OSECT's mapped state. */
1437 static void
1438 spu_overlay_update (struct obj_section *osect)
1439 {
1440 /* Just one section. */
1441 if (osect)
1442 spu_overlay_update_osect (osect);
1443
1444 /* All sections. */
1445 else
1446 {
1447 struct objfile *objfile;
1448
1449 ALL_OBJSECTIONS (objfile, osect)
1450 if (section_is_overlay (osect->the_bfd_section))
1451 spu_overlay_update_osect (osect);
1452 }
1453 }
1454
1455 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1456 If there is one, go through all sections and make sure for non-
1457 overlay sections LMA equals VMA, while for overlay sections LMA
1458 is larger than local store size. */
1459 static void
1460 spu_overlay_new_objfile (struct objfile *objfile)
1461 {
1462 struct spu_overlay_table *ovly_table;
1463 struct obj_section *osect;
1464
1465 /* If we've already touched this file, do nothing. */
1466 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1467 return;
1468
1469 /* Check if this objfile has overlays. */
1470 ovly_table = spu_get_overlay_table (objfile);
1471 if (!ovly_table)
1472 return;
1473
1474 /* Now go and fiddle with all the LMAs. */
1475 ALL_OBJFILE_OSECTIONS (objfile, osect)
1476 {
1477 bfd *obfd = objfile->obfd;
1478 asection *bsect = osect->the_bfd_section;
1479 int ndx = osect - objfile->sections;
1480
1481 if (ovly_table[ndx].mapped_ptr == 0)
1482 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1483 else
1484 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1485 }
1486 }
1487
1488
1489 /* "info spu" commands. */
1490
1491 static void
1492 info_spu_event_command (char *args, int from_tty)
1493 {
1494 struct frame_info *frame = get_selected_frame (NULL);
1495 ULONGEST event_status = 0;
1496 ULONGEST event_mask = 0;
1497 struct cleanup *chain;
1498 gdb_byte buf[100];
1499 char annex[32];
1500 LONGEST len;
1501 int rc, id;
1502
1503 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1504
1505 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1506 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1507 buf, 0, sizeof buf);
1508 if (len <= 0)
1509 error (_("Could not read event_status."));
1510 event_status = strtoulst (buf, NULL, 16);
1511
1512 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1513 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1514 buf, 0, sizeof buf);
1515 if (len <= 0)
1516 error (_("Could not read event_mask."));
1517 event_mask = strtoulst (buf, NULL, 16);
1518
1519 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1520
1521 if (ui_out_is_mi_like_p (uiout))
1522 {
1523 ui_out_field_fmt (uiout, "event_status",
1524 "0x%s", phex_nz (event_status, 4));
1525 ui_out_field_fmt (uiout, "event_mask",
1526 "0x%s", phex_nz (event_mask, 4));
1527 }
1528 else
1529 {
1530 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1531 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1532 }
1533
1534 do_cleanups (chain);
1535 }
1536
1537 static void
1538 info_spu_signal_command (char *args, int from_tty)
1539 {
1540 struct frame_info *frame = get_selected_frame (NULL);
1541 ULONGEST signal1 = 0;
1542 ULONGEST signal1_type = 0;
1543 int signal1_pending = 0;
1544 ULONGEST signal2 = 0;
1545 ULONGEST signal2_type = 0;
1546 int signal2_pending = 0;
1547 struct cleanup *chain;
1548 char annex[32];
1549 gdb_byte buf[100];
1550 LONGEST len;
1551 int rc, id;
1552
1553 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1554
1555 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1556 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1557 if (len < 0)
1558 error (_("Could not read signal1."));
1559 else if (len == 4)
1560 {
1561 signal1 = extract_unsigned_integer (buf, 4);
1562 signal1_pending = 1;
1563 }
1564
1565 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1566 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1567 buf, 0, sizeof buf);
1568 if (len <= 0)
1569 error (_("Could not read signal1_type."));
1570 signal1_type = strtoulst (buf, NULL, 16);
1571
1572 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1573 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1574 if (len < 0)
1575 error (_("Could not read signal2."));
1576 else if (len == 4)
1577 {
1578 signal2 = extract_unsigned_integer (buf, 4);
1579 signal2_pending = 1;
1580 }
1581
1582 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1583 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1584 buf, 0, sizeof buf);
1585 if (len <= 0)
1586 error (_("Could not read signal2_type."));
1587 signal2_type = strtoulst (buf, NULL, 16);
1588
1589 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1590
1591 if (ui_out_is_mi_like_p (uiout))
1592 {
1593 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1594 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1595 ui_out_field_int (uiout, "signal1_type", signal1_type);
1596 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1597 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1598 ui_out_field_int (uiout, "signal2_type", signal2_type);
1599 }
1600 else
1601 {
1602 if (signal1_pending)
1603 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1604 else
1605 printf_filtered (_("Signal 1 not pending "));
1606
1607 if (signal1_type)
1608 printf_filtered (_("(Type Or)\n"));
1609 else
1610 printf_filtered (_("(Type Overwrite)\n"));
1611
1612 if (signal2_pending)
1613 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1614 else
1615 printf_filtered (_("Signal 2 not pending "));
1616
1617 if (signal2_type)
1618 printf_filtered (_("(Type Or)\n"));
1619 else
1620 printf_filtered (_("(Type Overwrite)\n"));
1621 }
1622
1623 do_cleanups (chain);
1624 }
1625
1626 static void
1627 info_spu_mailbox_list (gdb_byte *buf, int nr,
1628 const char *field, const char *msg)
1629 {
1630 struct cleanup *chain;
1631 int i;
1632
1633 if (nr <= 0)
1634 return;
1635
1636 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1637
1638 ui_out_table_header (uiout, 32, ui_left, field, msg);
1639 ui_out_table_body (uiout);
1640
1641 for (i = 0; i < nr; i++)
1642 {
1643 struct cleanup *val_chain;
1644 ULONGEST val;
1645 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1646 val = extract_unsigned_integer (buf + 4*i, 4);
1647 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1648 do_cleanups (val_chain);
1649
1650 if (!ui_out_is_mi_like_p (uiout))
1651 printf_filtered ("\n");
1652 }
1653
1654 do_cleanups (chain);
1655 }
1656
1657 static void
1658 info_spu_mailbox_command (char *args, int from_tty)
1659 {
1660 struct frame_info *frame = get_selected_frame (NULL);
1661 struct cleanup *chain;
1662 char annex[32];
1663 gdb_byte buf[1024];
1664 LONGEST len;
1665 int i, id;
1666
1667 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1668
1669 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1670
1671 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1672 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1673 buf, 0, sizeof buf);
1674 if (len < 0)
1675 error (_("Could not read mbox_info."));
1676
1677 info_spu_mailbox_list (buf, len / 4, "mbox", "SPU Outbound Mailbox");
1678
1679 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1680 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1681 buf, 0, sizeof buf);
1682 if (len < 0)
1683 error (_("Could not read ibox_info."));
1684
1685 info_spu_mailbox_list (buf, len / 4, "ibox", "SPU Outbound Interrupt Mailbox");
1686
1687 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1688 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1689 buf, 0, sizeof buf);
1690 if (len < 0)
1691 error (_("Could not read wbox_info."));
1692
1693 info_spu_mailbox_list (buf, len / 4, "wbox", "SPU Inbound Mailbox");
1694
1695 do_cleanups (chain);
1696 }
1697
1698 static ULONGEST
1699 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1700 {
1701 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1702 return (word >> (63 - last)) & mask;
1703 }
1704
1705 static void
1706 info_spu_dma_cmdlist (gdb_byte *buf, int nr)
1707 {
1708 static char *spu_mfc_opcode[256] =
1709 {
1710 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1711 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1712 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1713 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1714 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
1715 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
1716 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
1717 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1718 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
1719 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
1720 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1721 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1722 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1723 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1724 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1725 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1726 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
1727 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
1728 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1729 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1730 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
1731 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1732 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
1733 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1734 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1735 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
1736 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1737 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1738 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1739 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1740 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1741 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1742 };
1743
1744 struct cleanup *chain;
1745 int i;
1746
1747 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
1748
1749 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
1750 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
1751 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
1752 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
1753 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
1754 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
1755 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
1756 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
1757 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
1758 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
1759
1760 ui_out_table_body (uiout);
1761
1762 for (i = 0; i < nr; i++)
1763 {
1764 struct cleanup *cmd_chain;
1765 ULONGEST mfc_cq_dw0;
1766 ULONGEST mfc_cq_dw1;
1767 ULONGEST mfc_cq_dw2;
1768 ULONGEST mfc_cq_dw3;
1769 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
1770 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
1771 ULONGEST mfc_ea;
1772 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
1773
1774 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1775 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1776
1777 mfc_cq_dw0 = extract_unsigned_integer (buf + 32*i, 8);
1778 mfc_cq_dw1 = extract_unsigned_integer (buf + 32*i + 8, 8);
1779 mfc_cq_dw2 = extract_unsigned_integer (buf + 32*i + 16, 8);
1780 mfc_cq_dw3 = extract_unsigned_integer (buf + 32*i + 24, 8);
1781
1782 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
1783 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
1784 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
1785 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
1786 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
1787 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
1788 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
1789
1790 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
1791 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
1792
1793 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
1794 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
1795 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
1796 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
1797 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
1798 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
1799
1800 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
1801
1802 if (spu_mfc_opcode[mfc_cmd_opcode])
1803 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
1804 else
1805 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
1806
1807 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
1808 ui_out_field_int (uiout, "tid", tclass_id);
1809 ui_out_field_int (uiout, "rid", rclass_id);
1810
1811 if (ea_valid_p)
1812 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
1813 else
1814 ui_out_field_skip (uiout, "ea");
1815
1816 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
1817 if (qw_valid_p)
1818 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
1819 else
1820 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
1821
1822 if (list_valid_p)
1823 {
1824 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
1825 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
1826 }
1827 else
1828 {
1829 ui_out_field_skip (uiout, "lstaddr");
1830 ui_out_field_skip (uiout, "lstsize");
1831 }
1832
1833 if (cmd_error_p)
1834 ui_out_field_string (uiout, "error_p", "*");
1835 else
1836 ui_out_field_skip (uiout, "error_p");
1837
1838 do_cleanups (cmd_chain);
1839
1840 if (!ui_out_is_mi_like_p (uiout))
1841 printf_filtered ("\n");
1842 }
1843
1844 do_cleanups (chain);
1845 }
1846
1847 static void
1848 info_spu_dma_command (char *args, int from_tty)
1849 {
1850 struct frame_info *frame = get_selected_frame (NULL);
1851 ULONGEST dma_info_type;
1852 ULONGEST dma_info_mask;
1853 ULONGEST dma_info_status;
1854 ULONGEST dma_info_stall_and_notify;
1855 ULONGEST dma_info_atomic_command_status;
1856 struct cleanup *chain;
1857 char annex[32];
1858 gdb_byte buf[1024];
1859 LONGEST len;
1860 int i, id;
1861
1862 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1863
1864 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
1865 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1866 buf, 0, 40 + 16 * 32);
1867 if (len <= 0)
1868 error (_("Could not read dma_info."));
1869
1870 dma_info_type = extract_unsigned_integer (buf, 8);
1871 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1872 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1873 dma_info_stall_and_notify = extract_unsigned_integer (buf + 24, 8);
1874 dma_info_atomic_command_status = extract_unsigned_integer (buf + 32, 8);
1875
1876 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
1877
1878 if (ui_out_is_mi_like_p (uiout))
1879 {
1880 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
1881 phex_nz (dma_info_type, 4));
1882 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
1883 phex_nz (dma_info_mask, 4));
1884 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
1885 phex_nz (dma_info_status, 4));
1886 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
1887 phex_nz (dma_info_stall_and_notify, 4));
1888 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
1889 phex_nz (dma_info_atomic_command_status, 4));
1890 }
1891 else
1892 {
1893 const char *query_msg;
1894
1895 switch (dma_info_type)
1896 {
1897 case 0: query_msg = _("no query pending"); break;
1898 case 1: query_msg = _("'any' query pending"); break;
1899 case 2: query_msg = _("'all' query pending"); break;
1900 default: query_msg = _("undefined query type"); break;
1901 }
1902
1903 printf_filtered (_("Tag-Group Status 0x%s\n"),
1904 phex (dma_info_status, 4));
1905 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1906 phex (dma_info_mask, 4), query_msg);
1907 printf_filtered (_("Stall-and-Notify 0x%s\n"),
1908 phex (dma_info_stall_and_notify, 4));
1909 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
1910 phex (dma_info_atomic_command_status, 4));
1911 printf_filtered ("\n");
1912 }
1913
1914 info_spu_dma_cmdlist (buf + 40, 16);
1915 do_cleanups (chain);
1916 }
1917
1918 static void
1919 info_spu_proxydma_command (char *args, int from_tty)
1920 {
1921 struct frame_info *frame = get_selected_frame (NULL);
1922 ULONGEST dma_info_type;
1923 ULONGEST dma_info_mask;
1924 ULONGEST dma_info_status;
1925 struct cleanup *chain;
1926 char annex[32];
1927 gdb_byte buf[1024];
1928 LONGEST len;
1929 int i, id;
1930
1931 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1932
1933 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
1934 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1935 buf, 0, 24 + 8 * 32);
1936 if (len <= 0)
1937 error (_("Could not read proxydma_info."));
1938
1939 dma_info_type = extract_unsigned_integer (buf, 8);
1940 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1941 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1942
1943 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
1944
1945 if (ui_out_is_mi_like_p (uiout))
1946 {
1947 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
1948 phex_nz (dma_info_type, 4));
1949 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
1950 phex_nz (dma_info_mask, 4));
1951 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
1952 phex_nz (dma_info_status, 4));
1953 }
1954 else
1955 {
1956 const char *query_msg;
1957
1958 switch (dma_info_type)
1959 {
1960 case 0: query_msg = _("no query pending"); break;
1961 case 1: query_msg = _("'any' query pending"); break;
1962 case 2: query_msg = _("'all' query pending"); break;
1963 default: query_msg = _("undefined query type"); break;
1964 }
1965
1966 printf_filtered (_("Tag-Group Status 0x%s\n"),
1967 phex (dma_info_status, 4));
1968 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1969 phex (dma_info_mask, 4), query_msg);
1970 printf_filtered ("\n");
1971 }
1972
1973 info_spu_dma_cmdlist (buf + 24, 8);
1974 do_cleanups (chain);
1975 }
1976
1977 static void
1978 info_spu_command (char *args, int from_tty)
1979 {
1980 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
1981 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
1982 }
1983
1984
1985 /* Set up gdbarch struct. */
1986
1987 static struct gdbarch *
1988 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
1989 {
1990 struct gdbarch *gdbarch;
1991 struct gdbarch_tdep *tdep;
1992
1993 /* Find a candidate among the list of pre-declared architectures. */
1994 arches = gdbarch_list_lookup_by_info (arches, &info);
1995 if (arches != NULL)
1996 return arches->gdbarch;
1997
1998 /* Is is for us? */
1999 if (info.bfd_arch_info->mach != bfd_mach_spu)
2000 return NULL;
2001
2002 /* Yes, create a new architecture. */
2003 tdep = XCALLOC (1, struct gdbarch_tdep);
2004 gdbarch = gdbarch_alloc (&info, tdep);
2005
2006 /* Disassembler. */
2007 set_gdbarch_print_insn (gdbarch, print_insn_spu);
2008
2009 /* Registers. */
2010 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2011 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2012 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2013 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2014 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2015 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2016 set_gdbarch_register_name (gdbarch, spu_register_name);
2017 set_gdbarch_register_type (gdbarch, spu_register_type);
2018 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2019 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2020 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2021 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2022
2023 /* Data types. */
2024 set_gdbarch_char_signed (gdbarch, 0);
2025 set_gdbarch_ptr_bit (gdbarch, 32);
2026 set_gdbarch_addr_bit (gdbarch, 32);
2027 set_gdbarch_short_bit (gdbarch, 16);
2028 set_gdbarch_int_bit (gdbarch, 32);
2029 set_gdbarch_long_bit (gdbarch, 32);
2030 set_gdbarch_long_long_bit (gdbarch, 64);
2031 set_gdbarch_float_bit (gdbarch, 32);
2032 set_gdbarch_double_bit (gdbarch, 64);
2033 set_gdbarch_long_double_bit (gdbarch, 64);
2034 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2035 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2036 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2037
2038 /* Address conversion. */
2039 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2040 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2041
2042 /* Inferior function calls. */
2043 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2044 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2045 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2046 set_gdbarch_unwind_dummy_id (gdbarch, spu_unwind_dummy_id);
2047 set_gdbarch_return_value (gdbarch, spu_return_value);
2048
2049 /* Frame handling. */
2050 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2051 frame_unwind_append_sniffer (gdbarch, spu_frame_sniffer);
2052 frame_base_set_default (gdbarch, &spu_frame_base);
2053 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2054 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2055 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2056 set_gdbarch_frame_args_skip (gdbarch, 0);
2057 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2058 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2059
2060 /* Breakpoints. */
2061 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2062 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2063 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2064 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2065
2066 /* Overlays. */
2067 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2068
2069 return gdbarch;
2070 }
2071
2072 void
2073 _initialize_spu_tdep (void)
2074 {
2075 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2076
2077 /* Add ourselves to objfile event chain. */
2078 observer_attach_new_objfile (spu_overlay_new_objfile);
2079 spu_overlay_data = register_objfile_data ();
2080
2081 /* Add root prefix command for all "info spu" commands. */
2082 add_prefix_cmd ("spu", class_info, info_spu_command,
2083 _("Various SPU specific commands."),
2084 &infospucmdlist, "info spu ", 0, &infolist);
2085
2086 /* Add various "info spu" commands. */
2087 add_cmd ("event", class_info, info_spu_event_command,
2088 _("Display SPU event facility status.\n"),
2089 &infospucmdlist);
2090 add_cmd ("signal", class_info, info_spu_signal_command,
2091 _("Display SPU signal notification facility status.\n"),
2092 &infospucmdlist);
2093 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2094 _("Display SPU mailbox facility status.\n"),
2095 &infospucmdlist);
2096 add_cmd ("dma", class_info, info_spu_dma_command,
2097 _("Display MFC DMA status.\n"),
2098 &infospucmdlist);
2099 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2100 _("Display MFC Proxy-DMA status.\n"),
2101 &infospucmdlist);
2102 }
This page took 0.101759 seconds and 4 git commands to generate.