Remove regcache_raw_supply
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <elf.h>
24 #include <asm/ptrace.h>
25
26 #include "arch/ppc-linux-common.h"
27 #include "arch/ppc-linux-tdesc.h"
28 #include "nat/ppc-linux.h"
29 #include "linux-ppc-tdesc-init.h"
30 #include "ax.h"
31 #include "tracepoint.h"
32
33 #define PPC_FIELD(value, from, len) \
34 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
35 #define PPC_SEXT(v, bs) \
36 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
37 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
38 - ((CORE_ADDR) 1 << ((bs) - 1)))
39 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
40 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
41 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
42 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
43
44 static unsigned long ppc_hwcap;
45
46
47 #define ppc_num_regs 73
48
49 #ifdef __powerpc64__
50 /* We use a constant for FPSCR instead of PT_FPSCR, because
51 many shipped PPC64 kernels had the wrong value in ptrace.h. */
52 static int ppc_regmap[] =
53 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
54 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
55 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
56 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
57 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
58 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
59 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
60 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
61 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
62 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
63 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
64 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
65 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
66 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
67 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
68 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
69 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
70 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
71 PT_ORIG_R3 * 8, PT_TRAP * 8 };
72 #else
73 /* Currently, don't check/send MQ. */
74 static int ppc_regmap[] =
75 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
76 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
77 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
78 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
79 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
80 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
81 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
82 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
83 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
84 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
85 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
86 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
87 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
88 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
89 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
90 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
91 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
92 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
93 PT_ORIG_R3 * 4, PT_TRAP * 4
94 };
95
96 static int ppc_regmap_e500[] =
97 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
98 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
99 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
100 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
101 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
102 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
103 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
104 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
105 -1, -1, -1, -1,
106 -1, -1, -1, -1,
107 -1, -1, -1, -1,
108 -1, -1, -1, -1,
109 -1, -1, -1, -1,
110 -1, -1, -1, -1,
111 -1, -1, -1, -1,
112 -1, -1, -1, -1,
113 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
114 PT_CTR * 4, PT_XER * 4, -1,
115 PT_ORIG_R3 * 4, PT_TRAP * 4
116 };
117 #endif
118
119 static int
120 ppc_cannot_store_register (int regno)
121 {
122 const struct target_desc *tdesc = current_process ()->tdesc;
123
124 #ifndef __powerpc64__
125 /* Some kernels do not allow us to store fpscr. */
126 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
127 && regno == find_regno (tdesc, "fpscr"))
128 return 2;
129 #endif
130
131 /* Some kernels do not allow us to store orig_r3 or trap. */
132 if (regno == find_regno (tdesc, "orig_r3")
133 || regno == find_regno (tdesc, "trap"))
134 return 2;
135
136 return 0;
137 }
138
139 static int
140 ppc_cannot_fetch_register (int regno)
141 {
142 return 0;
143 }
144
145 static void
146 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
147 {
148 memset (buf, 0, sizeof (long));
149
150 if (__BYTE_ORDER == __LITTLE_ENDIAN)
151 {
152 /* Little-endian values always sit at the left end of the buffer. */
153 collect_register (regcache, regno, buf);
154 }
155 else if (__BYTE_ORDER == __BIG_ENDIAN)
156 {
157 /* Big-endian values sit at the right end of the buffer. In case of
158 registers whose sizes are smaller than sizeof (long), we must use a
159 padding to access them correctly. */
160 int size = register_size (regcache->tdesc, regno);
161
162 if (size < sizeof (long))
163 collect_register (regcache, regno, buf + sizeof (long) - size);
164 else
165 collect_register (regcache, regno, buf);
166 }
167 else
168 perror_with_name ("Unexpected byte order");
169 }
170
171 static void
172 ppc_supply_ptrace_register (struct regcache *regcache,
173 int regno, const char *buf)
174 {
175 if (__BYTE_ORDER == __LITTLE_ENDIAN)
176 {
177 /* Little-endian values always sit at the left end of the buffer. */
178 supply_register (regcache, regno, buf);
179 }
180 else if (__BYTE_ORDER == __BIG_ENDIAN)
181 {
182 /* Big-endian values sit at the right end of the buffer. In case of
183 registers whose sizes are smaller than sizeof (long), we must use a
184 padding to access them correctly. */
185 int size = register_size (regcache->tdesc, regno);
186
187 if (size < sizeof (long))
188 supply_register (regcache, regno, buf + sizeof (long) - size);
189 else
190 supply_register (regcache, regno, buf);
191 }
192 else
193 perror_with_name ("Unexpected byte order");
194 }
195
196
197 #define INSTR_SC 0x44000002
198 #define NR_spu_run 0x0116
199
200 /* If the PPU thread is currently stopped on a spu_run system call,
201 return to FD and ADDR the file handle and NPC parameter address
202 used with the system call. Return non-zero if successful. */
203 static int
204 parse_spufs_run (struct regcache *regcache, int *fd, CORE_ADDR *addr)
205 {
206 CORE_ADDR curr_pc;
207 int curr_insn;
208 int curr_r0;
209
210 if (register_size (regcache->tdesc, 0) == 4)
211 {
212 unsigned int pc, r0, r3, r4;
213 collect_register_by_name (regcache, "pc", &pc);
214 collect_register_by_name (regcache, "r0", &r0);
215 collect_register_by_name (regcache, "orig_r3", &r3);
216 collect_register_by_name (regcache, "r4", &r4);
217 curr_pc = (CORE_ADDR) pc;
218 curr_r0 = (int) r0;
219 *fd = (int) r3;
220 *addr = (CORE_ADDR) r4;
221 }
222 else
223 {
224 unsigned long pc, r0, r3, r4;
225 collect_register_by_name (regcache, "pc", &pc);
226 collect_register_by_name (regcache, "r0", &r0);
227 collect_register_by_name (regcache, "orig_r3", &r3);
228 collect_register_by_name (regcache, "r4", &r4);
229 curr_pc = (CORE_ADDR) pc;
230 curr_r0 = (int) r0;
231 *fd = (int) r3;
232 *addr = (CORE_ADDR) r4;
233 }
234
235 /* Fetch instruction preceding current NIP. */
236 if ((*the_target->read_memory) (curr_pc - 4,
237 (unsigned char *) &curr_insn, 4) != 0)
238 return 0;
239 /* It should be a "sc" instruction. */
240 if (curr_insn != INSTR_SC)
241 return 0;
242 /* System call number should be NR_spu_run. */
243 if (curr_r0 != NR_spu_run)
244 return 0;
245
246 return 1;
247 }
248
249 static CORE_ADDR
250 ppc_get_pc (struct regcache *regcache)
251 {
252 CORE_ADDR addr;
253 int fd;
254
255 if (parse_spufs_run (regcache, &fd, &addr))
256 {
257 unsigned int pc;
258 (*the_target->read_memory) (addr, (unsigned char *) &pc, 4);
259 return ((CORE_ADDR)1 << 63)
260 | ((CORE_ADDR)fd << 32) | (CORE_ADDR) (pc - 4);
261 }
262 else if (register_size (regcache->tdesc, 0) == 4)
263 {
264 unsigned int pc;
265 collect_register_by_name (regcache, "pc", &pc);
266 return (CORE_ADDR) pc;
267 }
268 else
269 {
270 unsigned long pc;
271 collect_register_by_name (regcache, "pc", &pc);
272 return (CORE_ADDR) pc;
273 }
274 }
275
276 static void
277 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
278 {
279 CORE_ADDR addr;
280 int fd;
281
282 if (parse_spufs_run (regcache, &fd, &addr))
283 {
284 unsigned int newpc = pc;
285 (*the_target->write_memory) (addr, (unsigned char *) &newpc, 4);
286 }
287 else if (register_size (regcache->tdesc, 0) == 4)
288 {
289 unsigned int newpc = pc;
290 supply_register_by_name (regcache, "pc", &newpc);
291 }
292 else
293 {
294 unsigned long newpc = pc;
295 supply_register_by_name (regcache, "pc", &newpc);
296 }
297 }
298
299
300 static int
301 ppc_get_auxv (unsigned long type, unsigned long *valp)
302 {
303 const struct target_desc *tdesc = current_process ()->tdesc;
304 int wordsize = register_size (tdesc, 0);
305 unsigned char *data = (unsigned char *) alloca (2 * wordsize);
306 int offset = 0;
307
308 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
309 {
310 if (wordsize == 4)
311 {
312 unsigned int *data_p = (unsigned int *)data;
313 if (data_p[0] == type)
314 {
315 *valp = data_p[1];
316 return 1;
317 }
318 }
319 else
320 {
321 unsigned long *data_p = (unsigned long *)data;
322 if (data_p[0] == type)
323 {
324 *valp = data_p[1];
325 return 1;
326 }
327 }
328
329 offset += 2 * wordsize;
330 }
331
332 *valp = 0;
333 return 0;
334 }
335
336 #ifndef __powerpc64__
337 static int ppc_regmap_adjusted;
338 #endif
339
340
341 /* Correct in either endianness.
342 This instruction is "twge r2, r2", which GDB uses as a software
343 breakpoint. */
344 static const unsigned int ppc_breakpoint = 0x7d821008;
345 #define ppc_breakpoint_len 4
346
347 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
348
349 static const gdb_byte *
350 ppc_sw_breakpoint_from_kind (int kind, int *size)
351 {
352 *size = ppc_breakpoint_len;
353 return (const gdb_byte *) &ppc_breakpoint;
354 }
355
356 static int
357 ppc_breakpoint_at (CORE_ADDR where)
358 {
359 unsigned int insn;
360
361 if (where & ((CORE_ADDR)1 << 63))
362 {
363 char mem_annex[32];
364 sprintf (mem_annex, "%d/mem", (int)((where >> 32) & 0x7fffffff));
365 (*the_target->qxfer_spu) (mem_annex, (unsigned char *) &insn,
366 NULL, where & 0xffffffff, 4);
367 if (insn == 0x3fff)
368 return 1;
369 }
370 else
371 {
372 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
373 if (insn == ppc_breakpoint)
374 return 1;
375 /* If necessary, recognize more trap instructions here. GDB only uses
376 the one. */
377 }
378
379 return 0;
380 }
381
382 /* Implement supports_z_point_type target-ops.
383 Returns true if type Z_TYPE breakpoint is supported.
384
385 Handling software breakpoint at server side, so tracepoints
386 and breakpoints can be inserted at the same location. */
387
388 static int
389 ppc_supports_z_point_type (char z_type)
390 {
391 switch (z_type)
392 {
393 case Z_PACKET_SW_BP:
394 return 1;
395 case Z_PACKET_HW_BP:
396 case Z_PACKET_WRITE_WP:
397 case Z_PACKET_ACCESS_WP:
398 default:
399 return 0;
400 }
401 }
402
403 /* Implement insert_point target-ops.
404 Returns 0 on success, -1 on failure and 1 on unsupported. */
405
406 static int
407 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
408 int size, struct raw_breakpoint *bp)
409 {
410 switch (type)
411 {
412 case raw_bkpt_type_sw:
413 return insert_memory_breakpoint (bp);
414
415 case raw_bkpt_type_hw:
416 case raw_bkpt_type_write_wp:
417 case raw_bkpt_type_access_wp:
418 default:
419 /* Unsupported. */
420 return 1;
421 }
422 }
423
424 /* Implement remove_point target-ops.
425 Returns 0 on success, -1 on failure and 1 on unsupported. */
426
427 static int
428 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
429 int size, struct raw_breakpoint *bp)
430 {
431 switch (type)
432 {
433 case raw_bkpt_type_sw:
434 return remove_memory_breakpoint (bp);
435
436 case raw_bkpt_type_hw:
437 case raw_bkpt_type_write_wp:
438 case raw_bkpt_type_access_wp:
439 default:
440 /* Unsupported. */
441 return 1;
442 }
443 }
444
445 /* Provide only a fill function for the general register set. ps_lgetregs
446 will use this for NPTL support. */
447
448 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
449 {
450 int i;
451
452 for (i = 0; i < 32; i++)
453 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
454
455 for (i = 64; i < 70; i++)
456 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
457
458 for (i = 71; i < 73; i++)
459 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
460 }
461
462 static void
463 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
464 {
465 int i, base;
466 char *regset = (char *) buf;
467
468 base = find_regno (regcache->tdesc, "vs0h");
469 for (i = 0; i < 32; i++)
470 collect_register (regcache, base + i, &regset[i * 8]);
471 }
472
473 static void
474 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
475 {
476 int i, base;
477 const char *regset = (const char *) buf;
478
479 base = find_regno (regcache->tdesc, "vs0h");
480 for (i = 0; i < 32; i++)
481 supply_register (regcache, base + i, &regset[i * 8]);
482 }
483
484 static void
485 ppc_fill_vrregset (struct regcache *regcache, void *buf)
486 {
487 int i, base;
488 char *regset = (char *) buf;
489 int vscr_offset = 0;
490
491 base = find_regno (regcache->tdesc, "vr0");
492 for (i = 0; i < 32; i++)
493 collect_register (regcache, base + i, &regset[i * 16]);
494
495 if (__BYTE_ORDER == __BIG_ENDIAN)
496 vscr_offset = 12;
497
498 /* Zero-pad the unused bytes in the fields for vscr and vrsave in
499 case they get displayed somewhere. */
500 memset (&regset[32 * 16], 0, 16);
501 collect_register_by_name (regcache, "vscr",
502 &regset[32 * 16 + vscr_offset]);
503
504 memset (&regset[33 * 16], 0, 16);
505 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
506 }
507
508 static void
509 ppc_store_vrregset (struct regcache *regcache, const void *buf)
510 {
511 int i, base;
512 const char *regset = (const char *) buf;
513 int vscr_offset = 0;
514
515 base = find_regno (regcache->tdesc, "vr0");
516 for (i = 0; i < 32; i++)
517 supply_register (regcache, base + i, &regset[i * 16]);
518
519 if (__BYTE_ORDER == __BIG_ENDIAN)
520 vscr_offset = 12;
521
522 supply_register_by_name (regcache, "vscr",
523 &regset[32 * 16 + vscr_offset]);
524 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
525 }
526
527 struct gdb_evrregset_t
528 {
529 unsigned long evr[32];
530 unsigned long long acc;
531 unsigned long spefscr;
532 };
533
534 static void
535 ppc_fill_evrregset (struct regcache *regcache, void *buf)
536 {
537 int i, ev0;
538 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
539
540 ev0 = find_regno (regcache->tdesc, "ev0h");
541 for (i = 0; i < 32; i++)
542 collect_register (regcache, ev0 + i, &regset->evr[i]);
543
544 collect_register_by_name (regcache, "acc", &regset->acc);
545 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
546 }
547
548 static void
549 ppc_store_evrregset (struct regcache *regcache, const void *buf)
550 {
551 int i, ev0;
552 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
553
554 ev0 = find_regno (regcache->tdesc, "ev0h");
555 for (i = 0; i < 32; i++)
556 supply_register (regcache, ev0 + i, &regset->evr[i]);
557
558 supply_register_by_name (regcache, "acc", &regset->acc);
559 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
560 }
561
562 /* Support for hardware single step. */
563
564 static int
565 ppc_supports_hardware_single_step (void)
566 {
567 return 1;
568 }
569
570 static struct regset_info ppc_regsets[] = {
571 /* List the extra register sets before GENERAL_REGS. That way we will
572 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
573 general registers. Some kernels support these, but not the newer
574 PPC_PTRACE_GETREGS. */
575 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
576 ppc_fill_vsxregset, ppc_store_vsxregset },
577 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
578 ppc_fill_vrregset, ppc_store_vrregset },
579 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
580 ppc_fill_evrregset, ppc_store_evrregset },
581 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
582 NULL_REGSET
583 };
584
585 static struct usrregs_info ppc_usrregs_info =
586 {
587 ppc_num_regs,
588 ppc_regmap,
589 };
590
591 static struct regsets_info ppc_regsets_info =
592 {
593 ppc_regsets, /* regsets */
594 0, /* num_regsets */
595 NULL, /* disabled_regsets */
596 };
597
598 static struct regs_info regs_info =
599 {
600 NULL, /* regset_bitmap */
601 &ppc_usrregs_info,
602 &ppc_regsets_info
603 };
604
605 static const struct regs_info *
606 ppc_regs_info (void)
607 {
608 return &regs_info;
609 }
610
611 static void
612 ppc_arch_setup (void)
613 {
614 const struct target_desc *tdesc;
615 struct regset_info *regset;
616 struct ppc_linux_features features = ppc_linux_no_features;
617
618 int tid = lwpid_of (current_thread);
619
620 features.wordsize = ppc_linux_target_wordsize (tid);
621
622 if (features.wordsize == 4)
623 tdesc = tdesc_powerpc_32l;
624 else
625 tdesc = tdesc_powerpc_64l;
626
627 current_process ()->tdesc = tdesc;
628
629 /* The value of current_process ()->tdesc needs to be set for this
630 call. */
631 ppc_get_auxv (AT_HWCAP, &ppc_hwcap);
632
633 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
634
635 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
636 features.vsx = true;
637
638 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
639 features.altivec = true;
640
641 if (ppc_hwcap & PPC_FEATURE_CELL)
642 features.cell = true;
643
644 tdesc = ppc_linux_match_description (features);
645
646 /* On 32-bit machines, check for SPE registers.
647 Set the low target's regmap field as appropriately. */
648 #ifndef __powerpc64__
649 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
650 tdesc = tdesc_powerpc_e500l;
651
652 if (!ppc_regmap_adjusted)
653 {
654 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
655 ppc_usrregs_info.regmap = ppc_regmap_e500;
656
657 /* If the FPSCR is 64-bit wide, we need to fetch the whole
658 64-bit slot and not just its second word. The PT_FPSCR
659 supplied in a 32-bit GDB compilation doesn't reflect
660 this. */
661 if (register_size (tdesc, 70) == 8)
662 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
663
664 ppc_regmap_adjusted = 1;
665 }
666 #endif
667
668 current_process ()->tdesc = tdesc;
669
670 for (regset = ppc_regsets; regset->size >= 0; regset++)
671 switch (regset->get_request)
672 {
673 case PTRACE_GETVRREGS:
674 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
675 break;
676 case PTRACE_GETVSXREGS:
677 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
678 break;
679 case PTRACE_GETEVRREGS:
680 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
681 regset->size = 32 * 4 + 8 + 4;
682 else
683 regset->size = 0;
684 break;
685 default:
686 break;
687 }
688 }
689
690 /* Implementation of linux_target_ops method "supports_tracepoints". */
691
692 static int
693 ppc_supports_tracepoints (void)
694 {
695 return 1;
696 }
697
698 /* Get the thread area address. This is used to recognize which
699 thread is which when tracing with the in-process agent library. We
700 don't read anything from the address, and treat it as opaque; it's
701 the address itself that we assume is unique per-thread. */
702
703 static int
704 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
705 {
706 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
707 struct thread_info *thr = get_lwp_thread (lwp);
708 struct regcache *regcache = get_thread_regcache (thr, 1);
709 ULONGEST tp = 0;
710
711 #ifdef __powerpc64__
712 if (register_size (regcache->tdesc, 0) == 8)
713 collect_register_by_name (regcache, "r13", &tp);
714 else
715 #endif
716 collect_register_by_name (regcache, "r2", &tp);
717
718 *addr = tp;
719
720 return 0;
721 }
722
723 #ifdef __powerpc64__
724
725 /* Older glibc doesn't provide this. */
726
727 #ifndef EF_PPC64_ABI
728 #define EF_PPC64_ABI 3
729 #endif
730
731 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
732 inferiors. */
733
734 static int
735 is_elfv2_inferior (void)
736 {
737 /* To be used as fallback if we're unable to determine the right result -
738 assume inferior uses the same ABI as gdbserver. */
739 #if _CALL_ELF == 2
740 const int def_res = 1;
741 #else
742 const int def_res = 0;
743 #endif
744 unsigned long phdr;
745 Elf64_Ehdr ehdr;
746
747 if (!ppc_get_auxv (AT_PHDR, &phdr))
748 return def_res;
749
750 /* Assume ELF header is at the beginning of the page where program headers
751 are located. If it doesn't look like one, bail. */
752
753 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
754 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
755 return def_res;
756
757 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
758 }
759
760 #endif
761
762 /* Generate a ds-form instruction in BUF and return the number of bytes written
763
764 0 6 11 16 30 32
765 | OPCD | RST | RA | DS |XO| */
766
767 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
768 static int
769 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
770 {
771 uint32_t insn;
772
773 gdb_assert ((opcd & ~0x3f) == 0);
774 gdb_assert ((rst & ~0x1f) == 0);
775 gdb_assert ((ra & ~0x1f) == 0);
776 gdb_assert ((xo & ~0x3) == 0);
777
778 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
779 *buf = (opcd << 26) | insn;
780 return 1;
781 }
782
783 /* Followings are frequently used ds-form instructions. */
784
785 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
786 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
787 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
788 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
789
790 /* Generate a d-form instruction in BUF.
791
792 0 6 11 16 32
793 | OPCD | RST | RA | D | */
794
795 static int
796 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
797 {
798 uint32_t insn;
799
800 gdb_assert ((opcd & ~0x3f) == 0);
801 gdb_assert ((rst & ~0x1f) == 0);
802 gdb_assert ((ra & ~0x1f) == 0);
803
804 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
805 *buf = (opcd << 26) | insn;
806 return 1;
807 }
808
809 /* Followings are frequently used d-form instructions. */
810
811 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
812 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
813 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
814 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
815 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
816 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
817 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
818 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
819 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
820
821 /* Generate a xfx-form instruction in BUF and return the number of bytes
822 written.
823
824 0 6 11 21 31 32
825 | OPCD | RST | RI | XO |/| */
826
827 static int
828 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
829 {
830 uint32_t insn;
831 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
832
833 gdb_assert ((opcd & ~0x3f) == 0);
834 gdb_assert ((rst & ~0x1f) == 0);
835 gdb_assert ((xo & ~0x3ff) == 0);
836
837 insn = (rst << 21) | (n << 11) | (xo << 1);
838 *buf = (opcd << 26) | insn;
839 return 1;
840 }
841
842 /* Followings are frequently used xfx-form instructions. */
843
844 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
845 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
846 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
847 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
848 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
849 E & 0xf, 598)
850 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
851
852
853 /* Generate a x-form instruction in BUF and return the number of bytes written.
854
855 0 6 11 16 21 31 32
856 | OPCD | RST | RA | RB | XO |RC| */
857
858 static int
859 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
860 {
861 uint32_t insn;
862
863 gdb_assert ((opcd & ~0x3f) == 0);
864 gdb_assert ((rst & ~0x1f) == 0);
865 gdb_assert ((ra & ~0x1f) == 0);
866 gdb_assert ((rb & ~0x1f) == 0);
867 gdb_assert ((xo & ~0x3ff) == 0);
868 gdb_assert ((rc & ~1) == 0);
869
870 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
871 *buf = (opcd << 26) | insn;
872 return 1;
873 }
874
875 /* Followings are frequently used x-form instructions. */
876
877 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
878 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
879 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
880 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
881 /* Assume bf = cr7. */
882 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
883
884
885 /* Generate a md-form instruction in BUF and return the number of bytes written.
886
887 0 6 11 16 21 27 30 31 32
888 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
889
890 static int
891 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
892 int xo, int rc)
893 {
894 uint32_t insn;
895 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
896 unsigned int sh0_4 = sh & 0x1f;
897 unsigned int sh5 = (sh >> 5) & 1;
898
899 gdb_assert ((opcd & ~0x3f) == 0);
900 gdb_assert ((rs & ~0x1f) == 0);
901 gdb_assert ((ra & ~0x1f) == 0);
902 gdb_assert ((sh & ~0x3f) == 0);
903 gdb_assert ((mb & ~0x3f) == 0);
904 gdb_assert ((xo & ~0x7) == 0);
905 gdb_assert ((rc & ~0x1) == 0);
906
907 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
908 | (sh5 << 1) | (xo << 2) | (rc & 1);
909 *buf = (opcd << 26) | insn;
910 return 1;
911 }
912
913 /* The following are frequently used md-form instructions. */
914
915 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
916 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
917 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
918 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
919
920 /* Generate a i-form instruction in BUF and return the number of bytes written.
921
922 0 6 30 31 32
923 | OPCD | LI |AA|LK| */
924
925 static int
926 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
927 {
928 uint32_t insn;
929
930 gdb_assert ((opcd & ~0x3f) == 0);
931
932 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
933 *buf = (opcd << 26) | insn;
934 return 1;
935 }
936
937 /* The following are frequently used i-form instructions. */
938
939 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
940 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
941
942 /* Generate a b-form instruction in BUF and return the number of bytes written.
943
944 0 6 11 16 30 31 32
945 | OPCD | BO | BI | BD |AA|LK| */
946
947 static int
948 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
949 int aa, int lk)
950 {
951 uint32_t insn;
952
953 gdb_assert ((opcd & ~0x3f) == 0);
954 gdb_assert ((bo & ~0x1f) == 0);
955 gdb_assert ((bi & ~0x1f) == 0);
956
957 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
958 *buf = (opcd << 26) | insn;
959 return 1;
960 }
961
962 /* The following are frequently used b-form instructions. */
963 /* Assume bi = cr7. */
964 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
965
966 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
967 respectively. They are primary used for save/restore GPRs in jump-pad,
968 not used for bytecode compiling. */
969
970 #ifdef __powerpc64__
971 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
972 GEN_LD (buf, rt, ra, si) : \
973 GEN_LWZ (buf, rt, ra, si))
974 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
975 GEN_STD (buf, rt, ra, si) : \
976 GEN_STW (buf, rt, ra, si))
977 #else
978 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
979 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
980 #endif
981
982 /* Generate a sequence of instructions to load IMM in the register REG.
983 Write the instructions in BUF and return the number of bytes written. */
984
985 static int
986 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
987 {
988 uint32_t *p = buf;
989
990 if ((imm + 32768) < 65536)
991 {
992 /* li reg, imm[15:0] */
993 p += GEN_LI (p, reg, imm);
994 }
995 else if ((imm >> 32) == 0)
996 {
997 /* lis reg, imm[31:16]
998 ori reg, reg, imm[15:0]
999 rldicl reg, reg, 0, 32 */
1000 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1001 if ((imm & 0xffff) != 0)
1002 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1003 /* Clear upper 32-bit if sign-bit is set. */
1004 if (imm & (1u << 31) && is_64)
1005 p += GEN_RLDICL (p, reg, reg, 0, 32);
1006 }
1007 else
1008 {
1009 gdb_assert (is_64);
1010 /* lis reg, <imm[63:48]>
1011 ori reg, reg, <imm[48:32]>
1012 rldicr reg, reg, 32, 31
1013 oris reg, reg, <imm[31:16]>
1014 ori reg, reg, <imm[15:0]> */
1015 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1016 if (((imm >> 32) & 0xffff) != 0)
1017 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1018 p += GEN_RLDICR (p, reg, reg, 32, 31);
1019 if (((imm >> 16) & 0xffff) != 0)
1020 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1021 if ((imm & 0xffff) != 0)
1022 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1023 }
1024
1025 return p - buf;
1026 }
1027
1028 /* Generate a sequence for atomically exchange at location LOCK.
1029 This code sequence clobbers r6, r7, r8. LOCK is the location for
1030 the atomic-xchg, OLD_VALUE is expected old value stored in the
1031 location, and R_NEW is a register for the new value. */
1032
1033 static int
1034 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1035 int is_64)
1036 {
1037 const int r_lock = 6;
1038 const int r_old = 7;
1039 const int r_tmp = 8;
1040 uint32_t *p = buf;
1041
1042 /*
1043 1: lwarx TMP, 0, LOCK
1044 cmpwi TMP, OLD
1045 bne 1b
1046 stwcx. NEW, 0, LOCK
1047 bne 1b */
1048
1049 p += gen_limm (p, r_lock, lock, is_64);
1050 p += gen_limm (p, r_old, old_value, is_64);
1051
1052 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1053 p += GEN_CMPW (p, r_tmp, r_old);
1054 p += GEN_BNE (p, -8);
1055 p += GEN_STWCX (p, r_new, 0, r_lock);
1056 p += GEN_BNE (p, -16);
1057
1058 return p - buf;
1059 }
1060
1061 /* Generate a sequence of instructions for calling a function
1062 at address of FN. Return the number of bytes are written in BUF. */
1063
1064 static int
1065 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1066 {
1067 uint32_t *p = buf;
1068
1069 /* Must be called by r12 for caller to calculate TOC address. */
1070 p += gen_limm (p, 12, fn, is_64);
1071 if (is_opd)
1072 {
1073 p += GEN_LOAD (p, 11, 12, 16, is_64);
1074 p += GEN_LOAD (p, 2, 12, 8, is_64);
1075 p += GEN_LOAD (p, 12, 12, 0, is_64);
1076 }
1077 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1078 *p++ = 0x4e800421; /* bctrl */
1079
1080 return p - buf;
1081 }
1082
1083 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1084 of instruction. This function is used to adjust pc-relative instructions
1085 when copying. */
1086
1087 static void
1088 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1089 {
1090 uint32_t insn, op6;
1091 long rel, newrel;
1092
1093 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1094 op6 = PPC_OP6 (insn);
1095
1096 if (op6 == 18 && (insn & 2) == 0)
1097 {
1098 /* branch && AA = 0 */
1099 rel = PPC_LI (insn);
1100 newrel = (oldloc - *to) + rel;
1101
1102 /* Out of range. Cannot relocate instruction. */
1103 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1104 return;
1105
1106 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1107 }
1108 else if (op6 == 16 && (insn & 2) == 0)
1109 {
1110 /* conditional branch && AA = 0 */
1111
1112 /* If the new relocation is too big for even a 26-bit unconditional
1113 branch, there is nothing we can do. Just abort.
1114
1115 Otherwise, if it can be fit in 16-bit conditional branch, just
1116 copy the instruction and relocate the address.
1117
1118 If the it's big for conditional-branch (16-bit), try to invert the
1119 condition and jump with 26-bit branch. For example,
1120
1121 beq .Lgoto
1122 INSN1
1123
1124 =>
1125
1126 bne 1f (+8)
1127 b .Lgoto
1128 1:INSN1
1129
1130 After this transform, we are actually jump from *TO+4 instead of *TO,
1131 so check the relocation again because it will be 1-insn farther then
1132 before if *TO is after OLDLOC.
1133
1134
1135 For BDNZT (or so) is transformed from
1136
1137 bdnzt eq, .Lgoto
1138 INSN1
1139
1140 =>
1141
1142 bdz 1f (+12)
1143 bf eq, 1f (+8)
1144 b .Lgoto
1145 1:INSN1
1146
1147 See also "BO field encodings". */
1148
1149 rel = PPC_BD (insn);
1150 newrel = (oldloc - *to) + rel;
1151
1152 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1153 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1154 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1155 {
1156 newrel -= 4;
1157
1158 /* Out of range. Cannot relocate instruction. */
1159 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1160 return;
1161
1162 if ((PPC_BO (insn) & 0x14) == 0x4)
1163 insn ^= (1 << 24);
1164 else if ((PPC_BO (insn) & 0x14) == 0x10)
1165 insn ^= (1 << 22);
1166
1167 /* Jump over the unconditional branch. */
1168 insn = (insn & ~0xfffc) | 0x8;
1169 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1170 *to += 4;
1171
1172 /* Build a unconditional branch and copy LK bit. */
1173 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1174 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1175 *to += 4;
1176
1177 return;
1178 }
1179 else if ((PPC_BO (insn) & 0x14) == 0)
1180 {
1181 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1182 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1183
1184 newrel -= 8;
1185
1186 /* Out of range. Cannot relocate instruction. */
1187 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1188 return;
1189
1190 /* Copy BI field. */
1191 bf_insn |= (insn & 0x1f0000);
1192
1193 /* Invert condition. */
1194 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1195 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1196
1197 write_inferior_memory (*to, (unsigned char *) &bdnz_insn, 4);
1198 *to += 4;
1199 write_inferior_memory (*to, (unsigned char *) &bf_insn, 4);
1200 *to += 4;
1201
1202 /* Build a unconditional branch and copy LK bit. */
1203 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1204 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1205 *to += 4;
1206
1207 return;
1208 }
1209 else /* (BO & 0x14) == 0x14, branch always. */
1210 {
1211 /* Out of range. Cannot relocate instruction. */
1212 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1213 return;
1214
1215 /* Build a unconditional branch and copy LK bit. */
1216 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1217 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1218 *to += 4;
1219
1220 return;
1221 }
1222 }
1223
1224 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1225 *to += 4;
1226 }
1227
1228 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1229 See target.h for details. */
1230
1231 static int
1232 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1233 CORE_ADDR collector,
1234 CORE_ADDR lockaddr,
1235 ULONGEST orig_size,
1236 CORE_ADDR *jump_entry,
1237 CORE_ADDR *trampoline,
1238 ULONGEST *trampoline_size,
1239 unsigned char *jjump_pad_insn,
1240 ULONGEST *jjump_pad_insn_size,
1241 CORE_ADDR *adjusted_insn_addr,
1242 CORE_ADDR *adjusted_insn_addr_end,
1243 char *err)
1244 {
1245 uint32_t buf[256];
1246 uint32_t *p = buf;
1247 int j, offset;
1248 CORE_ADDR buildaddr = *jump_entry;
1249 const CORE_ADDR entryaddr = *jump_entry;
1250 int rsz, min_frame, frame_size, tp_reg;
1251 #ifdef __powerpc64__
1252 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1253 int is_64 = register_size (regcache->tdesc, 0) == 8;
1254 int is_opd = is_64 && !is_elfv2_inferior ();
1255 #else
1256 int is_64 = 0, is_opd = 0;
1257 #endif
1258
1259 #ifdef __powerpc64__
1260 if (is_64)
1261 {
1262 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1263 rsz = 8;
1264 min_frame = 112;
1265 frame_size = (40 * rsz) + min_frame;
1266 tp_reg = 13;
1267 }
1268 else
1269 {
1270 #endif
1271 rsz = 4;
1272 min_frame = 16;
1273 frame_size = (40 * rsz) + min_frame;
1274 tp_reg = 2;
1275 #ifdef __powerpc64__
1276 }
1277 #endif
1278
1279 /* Stack frame layout for this jump pad,
1280
1281 High thread_area (r13/r2) |
1282 tpoint - collecting_t obj
1283 PC/<tpaddr> | +36
1284 CTR | +35
1285 LR | +34
1286 XER | +33
1287 CR | +32
1288 R31 |
1289 R29 |
1290 ... |
1291 R1 | +1
1292 R0 - collected registers
1293 ... |
1294 ... |
1295 Low Back-chain -
1296
1297
1298 The code flow of this jump pad,
1299
1300 1. Adjust SP
1301 2. Save GPR and SPR
1302 3. Prepare argument
1303 4. Call gdb_collector
1304 5. Restore GPR and SPR
1305 6. Restore SP
1306 7. Build a jump for back to the program
1307 8. Copy/relocate original instruction
1308 9. Build a jump for replacing orignal instruction. */
1309
1310 /* Adjust stack pointer. */
1311 if (is_64)
1312 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1313 else
1314 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1315
1316 /* Store GPRs. Save R1 later, because it had just been modified, but
1317 we want the original value. */
1318 for (j = 2; j < 32; j++)
1319 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1320 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1321 /* Set r0 to the original value of r1 before adjusting stack frame,
1322 and then save it. */
1323 p += GEN_ADDI (p, 0, 1, frame_size);
1324 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1325
1326 /* Save CR, XER, LR, and CTR. */
1327 p += GEN_MFCR (p, 3); /* mfcr r3 */
1328 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1329 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1330 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1331 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1332 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1333 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1334 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1335
1336 /* Save PC<tpaddr> */
1337 p += gen_limm (p, 3, tpaddr, is_64);
1338 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1339
1340
1341 /* Setup arguments to collector. */
1342 /* Set r4 to collected registers. */
1343 p += GEN_ADDI (p, 4, 1, min_frame);
1344 /* Set r3 to TPOINT. */
1345 p += gen_limm (p, 3, tpoint, is_64);
1346
1347 /* Prepare collecting_t object for lock. */
1348 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1349 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1350 /* Set R5 to collecting object. */
1351 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1352
1353 p += GEN_LWSYNC (p);
1354 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1355 p += GEN_LWSYNC (p);
1356
1357 /* Call to collector. */
1358 p += gen_call (p, collector, is_64, is_opd);
1359
1360 /* Simply write 0 to release the lock. */
1361 p += gen_limm (p, 3, lockaddr, is_64);
1362 p += gen_limm (p, 4, 0, is_64);
1363 p += GEN_LWSYNC (p);
1364 p += GEN_STORE (p, 4, 3, 0, is_64);
1365
1366 /* Restore stack and registers. */
1367 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1368 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1369 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1370 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1371 p += GEN_MTCR (p, 3); /* mtcr r3 */
1372 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1373 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1374 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1375
1376 /* Restore GPRs. */
1377 for (j = 2; j < 32; j++)
1378 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1379 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1380 /* Restore SP. */
1381 p += GEN_ADDI (p, 1, 1, frame_size);
1382
1383 /* Flush instructions to inferior memory. */
1384 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1385
1386 /* Now, insert the original instruction to execute in the jump pad. */
1387 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1388 *adjusted_insn_addr_end = *adjusted_insn_addr;
1389 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1390
1391 /* Verify the relocation size. If should be 4 for normal copy,
1392 8 or 12 for some conditional branch. */
1393 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1394 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1395 {
1396 sprintf (err, "E.Unexpected instruction length = %d"
1397 "when relocate instruction.",
1398 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1399 return 1;
1400 }
1401
1402 buildaddr = *adjusted_insn_addr_end;
1403 p = buf;
1404 /* Finally, write a jump back to the program. */
1405 offset = (tpaddr + 4) - buildaddr;
1406 if (offset >= (1 << 25) || offset < -(1 << 25))
1407 {
1408 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1409 "(offset 0x%x > 26-bit).", offset);
1410 return 1;
1411 }
1412 /* b <tpaddr+4> */
1413 p += GEN_B (p, offset);
1414 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1415 *jump_entry = buildaddr + (p - buf) * 4;
1416
1417 /* The jump pad is now built. Wire in a jump to our jump pad. This
1418 is always done last (by our caller actually), so that we can
1419 install fast tracepoints with threads running. This relies on
1420 the agent's atomic write support. */
1421 offset = entryaddr - tpaddr;
1422 if (offset >= (1 << 25) || offset < -(1 << 25))
1423 {
1424 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1425 "(offset 0x%x > 26-bit).", offset);
1426 return 1;
1427 }
1428 /* b <jentry> */
1429 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1430 *jjump_pad_insn_size = 4;
1431
1432 return 0;
1433 }
1434
1435 /* Returns the minimum instruction length for installing a tracepoint. */
1436
1437 static int
1438 ppc_get_min_fast_tracepoint_insn_len (void)
1439 {
1440 return 4;
1441 }
1442
1443 /* Emits a given buffer into the target at current_insn_ptr. Length
1444 is in units of 32-bit words. */
1445
1446 static void
1447 emit_insns (uint32_t *buf, int n)
1448 {
1449 n = n * sizeof (uint32_t);
1450 write_inferior_memory (current_insn_ptr, (unsigned char *) buf, n);
1451 current_insn_ptr += n;
1452 }
1453
1454 #define __EMIT_ASM(NAME, INSNS) \
1455 do \
1456 { \
1457 extern uint32_t start_bcax_ ## NAME []; \
1458 extern uint32_t end_bcax_ ## NAME []; \
1459 emit_insns (start_bcax_ ## NAME, \
1460 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1461 __asm__ (".section .text.__ppcbcax\n\t" \
1462 "start_bcax_" #NAME ":\n\t" \
1463 INSNS "\n\t" \
1464 "end_bcax_" #NAME ":\n\t" \
1465 ".previous\n\t"); \
1466 } while (0)
1467
1468 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1469 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1470
1471 /*
1472
1473 Bytecode execution stack frame - 32-bit
1474
1475 | LR save area (SP + 4)
1476 SP' -> +- Back chain (SP + 0)
1477 | Save r31 for access saved arguments
1478 | Save r30 for bytecode stack pointer
1479 | Save r4 for incoming argument *value
1480 | Save r3 for incoming argument regs
1481 r30 -> +- Bytecode execution stack
1482 |
1483 | 64-byte (8 doublewords) at initial.
1484 | Expand stack as needed.
1485 |
1486 +-
1487 | Some padding for minimum stack frame and 16-byte alignment.
1488 | 16 bytes.
1489 SP +- Back-chain (SP')
1490
1491 initial frame size
1492 = 16 + (4 * 4) + 64
1493 = 96
1494
1495 r30 is the stack-pointer for bytecode machine.
1496 It should point to next-empty, so we can use LDU for pop.
1497 r3 is used for cache of the high part of TOP value.
1498 It was the first argument, pointer to regs.
1499 r4 is used for cache of the low part of TOP value.
1500 It was the second argument, pointer to the result.
1501 We should set *result = TOP after leaving this function.
1502
1503 Note:
1504 * To restore stack at epilogue
1505 => sp = r31
1506 * To check stack is big enough for bytecode execution.
1507 => r30 - 8 > SP + 8
1508 * To return execution result.
1509 => 0(r4) = TOP
1510
1511 */
1512
1513 /* Regardless of endian, register 3 is always high part, 4 is low part.
1514 These defines are used when the register pair is stored/loaded.
1515 Likewise, to simplify code, have a similiar define for 5:6. */
1516
1517 #if __BYTE_ORDER == __LITTLE_ENDIAN
1518 #define TOP_FIRST "4"
1519 #define TOP_SECOND "3"
1520 #define TMP_FIRST "6"
1521 #define TMP_SECOND "5"
1522 #else
1523 #define TOP_FIRST "3"
1524 #define TOP_SECOND "4"
1525 #define TMP_FIRST "5"
1526 #define TMP_SECOND "6"
1527 #endif
1528
1529 /* Emit prologue in inferior memory. See above comments. */
1530
1531 static void
1532 ppc_emit_prologue (void)
1533 {
1534 EMIT_ASM (/* Save return address. */
1535 "mflr 0 \n"
1536 "stw 0, 4(1) \n"
1537 /* Adjust SP. 96 is the initial frame size. */
1538 "stwu 1, -96(1) \n"
1539 /* Save r30 and incoming arguments. */
1540 "stw 31, 96-4(1) \n"
1541 "stw 30, 96-8(1) \n"
1542 "stw 4, 96-12(1) \n"
1543 "stw 3, 96-16(1) \n"
1544 /* Point r31 to original r1 for access arguments. */
1545 "addi 31, 1, 96 \n"
1546 /* Set r30 to pointing stack-top. */
1547 "addi 30, 1, 64 \n"
1548 /* Initial r3/TOP to 0. */
1549 "li 3, 0 \n"
1550 "li 4, 0 \n");
1551 }
1552
1553 /* Emit epilogue in inferior memory. See above comments. */
1554
1555 static void
1556 ppc_emit_epilogue (void)
1557 {
1558 EMIT_ASM (/* *result = TOP */
1559 "lwz 5, -12(31) \n"
1560 "stw " TOP_FIRST ", 0(5) \n"
1561 "stw " TOP_SECOND ", 4(5) \n"
1562 /* Restore registers. */
1563 "lwz 31, -4(31) \n"
1564 "lwz 30, -8(31) \n"
1565 /* Restore SP. */
1566 "lwz 1, 0(1) \n"
1567 /* Restore LR. */
1568 "lwz 0, 4(1) \n"
1569 /* Return 0 for no-error. */
1570 "li 3, 0 \n"
1571 "mtlr 0 \n"
1572 "blr \n");
1573 }
1574
1575 /* TOP = stack[--sp] + TOP */
1576
1577 static void
1578 ppc_emit_add (void)
1579 {
1580 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1581 "lwz " TMP_SECOND ", 4(30)\n"
1582 "addc 4, 6, 4 \n"
1583 "adde 3, 5, 3 \n");
1584 }
1585
1586 /* TOP = stack[--sp] - TOP */
1587
1588 static void
1589 ppc_emit_sub (void)
1590 {
1591 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1592 "lwz " TMP_SECOND ", 4(30) \n"
1593 "subfc 4, 4, 6 \n"
1594 "subfe 3, 3, 5 \n");
1595 }
1596
1597 /* TOP = stack[--sp] * TOP */
1598
1599 static void
1600 ppc_emit_mul (void)
1601 {
1602 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1603 "lwz " TMP_SECOND ", 4(30) \n"
1604 "mulhwu 7, 6, 4 \n"
1605 "mullw 3, 6, 3 \n"
1606 "mullw 5, 4, 5 \n"
1607 "mullw 4, 6, 4 \n"
1608 "add 3, 5, 3 \n"
1609 "add 3, 7, 3 \n");
1610 }
1611
1612 /* TOP = stack[--sp] << TOP */
1613
1614 static void
1615 ppc_emit_lsh (void)
1616 {
1617 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1618 "lwz " TMP_SECOND ", 4(30) \n"
1619 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1620 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1621 "slw 5, 5, 4\n" /* Shift high part left */
1622 "slw 4, 6, 4\n" /* Shift low part left */
1623 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1624 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1625 "or 3, 5, 3\n"
1626 "or 3, 7, 3\n"); /* Assemble high part */
1627 }
1628
1629 /* Top = stack[--sp] >> TOP
1630 (Arithmetic shift right) */
1631
1632 static void
1633 ppc_emit_rsh_signed (void)
1634 {
1635 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1636 "lwz " TMP_SECOND ", 4(30) \n"
1637 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1638 "sraw 3, 5, 4\n" /* Shift high part right */
1639 "cmpwi 7, 1\n"
1640 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1641 "sraw 4, 5, 7\n" /* Shift high to low */
1642 "b 2f\n"
1643 "1:\n"
1644 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1645 "srw 4, 6, 4\n" /* Shift low part right */
1646 "slw 5, 5, 7\n" /* Shift high to low */
1647 "or 4, 4, 5\n" /* Assemble low part */
1648 "2:\n");
1649 }
1650
1651 /* Top = stack[--sp] >> TOP
1652 (Logical shift right) */
1653
1654 static void
1655 ppc_emit_rsh_unsigned (void)
1656 {
1657 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1658 "lwz " TMP_SECOND ", 4(30) \n"
1659 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1660 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1661 "srw 6, 6, 4\n" /* Shift low part right */
1662 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1663 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1664 "or 6, 6, 3\n"
1665 "srw 3, 5, 4\n" /* Shift high part right */
1666 "or 4, 6, 7\n"); /* Assemble low part */
1667 }
1668
1669 /* Emit code for signed-extension specified by ARG. */
1670
1671 static void
1672 ppc_emit_ext (int arg)
1673 {
1674 switch (arg)
1675 {
1676 case 8:
1677 EMIT_ASM ("extsb 4, 4\n"
1678 "srawi 3, 4, 31");
1679 break;
1680 case 16:
1681 EMIT_ASM ("extsh 4, 4\n"
1682 "srawi 3, 4, 31");
1683 break;
1684 case 32:
1685 EMIT_ASM ("srawi 3, 4, 31");
1686 break;
1687 default:
1688 emit_error = 1;
1689 }
1690 }
1691
1692 /* Emit code for zero-extension specified by ARG. */
1693
1694 static void
1695 ppc_emit_zero_ext (int arg)
1696 {
1697 switch (arg)
1698 {
1699 case 8:
1700 EMIT_ASM ("clrlwi 4,4,24\n"
1701 "li 3, 0\n");
1702 break;
1703 case 16:
1704 EMIT_ASM ("clrlwi 4,4,16\n"
1705 "li 3, 0\n");
1706 break;
1707 case 32:
1708 EMIT_ASM ("li 3, 0");
1709 break;
1710 default:
1711 emit_error = 1;
1712 }
1713 }
1714
1715 /* TOP = !TOP
1716 i.e., TOP = (TOP == 0) ? 1 : 0; */
1717
1718 static void
1719 ppc_emit_log_not (void)
1720 {
1721 EMIT_ASM ("or 4, 3, 4 \n"
1722 "cntlzw 4, 4 \n"
1723 "srwi 4, 4, 5 \n"
1724 "li 3, 0 \n");
1725 }
1726
1727 /* TOP = stack[--sp] & TOP */
1728
1729 static void
1730 ppc_emit_bit_and (void)
1731 {
1732 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1733 "lwz " TMP_SECOND ", 4(30) \n"
1734 "and 4, 6, 4 \n"
1735 "and 3, 5, 3 \n");
1736 }
1737
1738 /* TOP = stack[--sp] | TOP */
1739
1740 static void
1741 ppc_emit_bit_or (void)
1742 {
1743 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1744 "lwz " TMP_SECOND ", 4(30) \n"
1745 "or 4, 6, 4 \n"
1746 "or 3, 5, 3 \n");
1747 }
1748
1749 /* TOP = stack[--sp] ^ TOP */
1750
1751 static void
1752 ppc_emit_bit_xor (void)
1753 {
1754 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1755 "lwz " TMP_SECOND ", 4(30) \n"
1756 "xor 4, 6, 4 \n"
1757 "xor 3, 5, 3 \n");
1758 }
1759
1760 /* TOP = ~TOP
1761 i.e., TOP = ~(TOP | TOP) */
1762
1763 static void
1764 ppc_emit_bit_not (void)
1765 {
1766 EMIT_ASM ("nor 3, 3, 3 \n"
1767 "nor 4, 4, 4 \n");
1768 }
1769
1770 /* TOP = stack[--sp] == TOP */
1771
1772 static void
1773 ppc_emit_equal (void)
1774 {
1775 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1776 "lwz " TMP_SECOND ", 4(30) \n"
1777 "xor 4, 6, 4 \n"
1778 "xor 3, 5, 3 \n"
1779 "or 4, 3, 4 \n"
1780 "cntlzw 4, 4 \n"
1781 "srwi 4, 4, 5 \n"
1782 "li 3, 0 \n");
1783 }
1784
1785 /* TOP = stack[--sp] < TOP
1786 (Signed comparison) */
1787
1788 static void
1789 ppc_emit_less_signed (void)
1790 {
1791 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1792 "lwz " TMP_SECOND ", 4(30) \n"
1793 "cmplw 6, 6, 4 \n"
1794 "cmpw 7, 5, 3 \n"
1795 /* CR6 bit 0 = low less and high equal */
1796 "crand 6*4+0, 6*4+0, 7*4+2\n"
1797 /* CR7 bit 0 = (low less and high equal) or high less */
1798 "cror 7*4+0, 7*4+0, 6*4+0\n"
1799 "mfcr 4 \n"
1800 "rlwinm 4, 4, 29, 31, 31 \n"
1801 "li 3, 0 \n");
1802 }
1803
1804 /* TOP = stack[--sp] < TOP
1805 (Unsigned comparison) */
1806
1807 static void
1808 ppc_emit_less_unsigned (void)
1809 {
1810 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1811 "lwz " TMP_SECOND ", 4(30) \n"
1812 "cmplw 6, 6, 4 \n"
1813 "cmplw 7, 5, 3 \n"
1814 /* CR6 bit 0 = low less and high equal */
1815 "crand 6*4+0, 6*4+0, 7*4+2\n"
1816 /* CR7 bit 0 = (low less and high equal) or high less */
1817 "cror 7*4+0, 7*4+0, 6*4+0\n"
1818 "mfcr 4 \n"
1819 "rlwinm 4, 4, 29, 31, 31 \n"
1820 "li 3, 0 \n");
1821 }
1822
1823 /* Access the memory address in TOP in size of SIZE.
1824 Zero-extend the read value. */
1825
1826 static void
1827 ppc_emit_ref (int size)
1828 {
1829 switch (size)
1830 {
1831 case 1:
1832 EMIT_ASM ("lbz 4, 0(4)\n"
1833 "li 3, 0");
1834 break;
1835 case 2:
1836 EMIT_ASM ("lhz 4, 0(4)\n"
1837 "li 3, 0");
1838 break;
1839 case 4:
1840 EMIT_ASM ("lwz 4, 0(4)\n"
1841 "li 3, 0");
1842 break;
1843 case 8:
1844 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1845 EMIT_ASM ("lwz 3, 4(4)\n"
1846 "lwz 4, 0(4)");
1847 else
1848 EMIT_ASM ("lwz 3, 0(4)\n"
1849 "lwz 4, 4(4)");
1850 break;
1851 }
1852 }
1853
1854 /* TOP = NUM */
1855
1856 static void
1857 ppc_emit_const (LONGEST num)
1858 {
1859 uint32_t buf[10];
1860 uint32_t *p = buf;
1861
1862 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
1863 p += gen_limm (p, 4, num & 0xffffffff, 0);
1864
1865 emit_insns (buf, p - buf);
1866 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1867 }
1868
1869 /* Set TOP to the value of register REG by calling get_raw_reg function
1870 with two argument, collected buffer and register number. */
1871
1872 static void
1873 ppc_emit_reg (int reg)
1874 {
1875 uint32_t buf[13];
1876 uint32_t *p = buf;
1877
1878 /* fctx->regs is passed in r3 and then saved in -16(31). */
1879 p += GEN_LWZ (p, 3, 31, -16);
1880 p += GEN_LI (p, 4, reg); /* li r4, reg */
1881 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
1882
1883 emit_insns (buf, p - buf);
1884 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1885
1886 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1887 {
1888 EMIT_ASM ("mr 5, 4\n"
1889 "mr 4, 3\n"
1890 "mr 3, 5\n");
1891 }
1892 }
1893
1894 /* TOP = stack[--sp] */
1895
1896 static void
1897 ppc_emit_pop (void)
1898 {
1899 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
1900 "lwz " TOP_SECOND ", 4(30) \n");
1901 }
1902
1903 /* stack[sp++] = TOP
1904
1905 Because we may use up bytecode stack, expand 8 doublewords more
1906 if needed. */
1907
1908 static void
1909 ppc_emit_stack_flush (void)
1910 {
1911 /* Make sure bytecode stack is big enough before push.
1912 Otherwise, expand 64-byte more. */
1913
1914 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
1915 " stw " TOP_SECOND ", 4(30)\n"
1916 " addi 5, 30, -(8 + 8) \n"
1917 " cmpw 7, 5, 1 \n"
1918 " bgt 7, 1f \n"
1919 " stwu 31, -64(1) \n"
1920 "1:addi 30, 30, -8 \n");
1921 }
1922
1923 /* Swap TOP and stack[sp-1] */
1924
1925 static void
1926 ppc_emit_swap (void)
1927 {
1928 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
1929 "lwz " TMP_SECOND ", 12(30) \n"
1930 "stw " TOP_FIRST ", 8(30) \n"
1931 "stw " TOP_SECOND ", 12(30) \n"
1932 "mr 3, 5 \n"
1933 "mr 4, 6 \n");
1934 }
1935
1936 /* Discard N elements in the stack. Also used for ppc64. */
1937
1938 static void
1939 ppc_emit_stack_adjust (int n)
1940 {
1941 uint32_t buf[6];
1942 uint32_t *p = buf;
1943
1944 n = n << 3;
1945 if ((n >> 15) != 0)
1946 {
1947 emit_error = 1;
1948 return;
1949 }
1950
1951 p += GEN_ADDI (p, 30, 30, n);
1952
1953 emit_insns (buf, p - buf);
1954 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1955 }
1956
1957 /* Call function FN. */
1958
1959 static void
1960 ppc_emit_call (CORE_ADDR fn)
1961 {
1962 uint32_t buf[11];
1963 uint32_t *p = buf;
1964
1965 p += gen_call (p, fn, 0, 0);
1966
1967 emit_insns (buf, p - buf);
1968 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1969 }
1970
1971 /* FN's prototype is `LONGEST(*fn)(int)'.
1972 TOP = fn (arg1)
1973 */
1974
1975 static void
1976 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
1977 {
1978 uint32_t buf[15];
1979 uint32_t *p = buf;
1980
1981 /* Setup argument. arg1 is a 16-bit value. */
1982 p += gen_limm (p, 3, (uint32_t) arg1, 0);
1983 p += gen_call (p, fn, 0, 0);
1984
1985 emit_insns (buf, p - buf);
1986 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1987
1988 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1989 {
1990 EMIT_ASM ("mr 5, 4\n"
1991 "mr 4, 3\n"
1992 "mr 3, 5\n");
1993 }
1994 }
1995
1996 /* FN's prototype is `void(*fn)(int,LONGEST)'.
1997 fn (arg1, TOP)
1998
1999 TOP should be preserved/restored before/after the call. */
2000
2001 static void
2002 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2003 {
2004 uint32_t buf[21];
2005 uint32_t *p = buf;
2006
2007 /* Save TOP. 0(30) is next-empty. */
2008 p += GEN_STW (p, 3, 30, 0);
2009 p += GEN_STW (p, 4, 30, 4);
2010
2011 /* Setup argument. arg1 is a 16-bit value. */
2012 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2013 {
2014 p += GEN_MR (p, 5, 4);
2015 p += GEN_MR (p, 6, 3);
2016 }
2017 else
2018 {
2019 p += GEN_MR (p, 5, 3);
2020 p += GEN_MR (p, 6, 4);
2021 }
2022 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2023 p += gen_call (p, fn, 0, 0);
2024
2025 /* Restore TOP */
2026 p += GEN_LWZ (p, 3, 30, 0);
2027 p += GEN_LWZ (p, 4, 30, 4);
2028
2029 emit_insns (buf, p - buf);
2030 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2031 }
2032
2033 /* Note in the following goto ops:
2034
2035 When emitting goto, the target address is later relocated by
2036 write_goto_address. OFFSET_P is the offset of the branch instruction
2037 in the code sequence, and SIZE_P is how to relocate the instruction,
2038 recognized by ppc_write_goto_address. In current implementation,
2039 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2040 */
2041
2042 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2043
2044 static void
2045 ppc_emit_if_goto (int *offset_p, int *size_p)
2046 {
2047 EMIT_ASM ("or. 3, 3, 4 \n"
2048 "lwzu " TOP_FIRST ", 8(30) \n"
2049 "lwz " TOP_SECOND ", 4(30) \n"
2050 "1:bne 0, 1b \n");
2051
2052 if (offset_p)
2053 *offset_p = 12;
2054 if (size_p)
2055 *size_p = 14;
2056 }
2057
2058 /* Unconditional goto. Also used for ppc64. */
2059
2060 static void
2061 ppc_emit_goto (int *offset_p, int *size_p)
2062 {
2063 EMIT_ASM ("1:b 1b");
2064
2065 if (offset_p)
2066 *offset_p = 0;
2067 if (size_p)
2068 *size_p = 24;
2069 }
2070
2071 /* Goto if stack[--sp] == TOP */
2072
2073 static void
2074 ppc_emit_eq_goto (int *offset_p, int *size_p)
2075 {
2076 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2077 "lwz " TMP_SECOND ", 4(30) \n"
2078 "xor 4, 6, 4 \n"
2079 "xor 3, 5, 3 \n"
2080 "or. 3, 3, 4 \n"
2081 "lwzu " TOP_FIRST ", 8(30) \n"
2082 "lwz " TOP_SECOND ", 4(30) \n"
2083 "1:beq 0, 1b \n");
2084
2085 if (offset_p)
2086 *offset_p = 28;
2087 if (size_p)
2088 *size_p = 14;
2089 }
2090
2091 /* Goto if stack[--sp] != TOP */
2092
2093 static void
2094 ppc_emit_ne_goto (int *offset_p, int *size_p)
2095 {
2096 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2097 "lwz " TMP_SECOND ", 4(30) \n"
2098 "xor 4, 6, 4 \n"
2099 "xor 3, 5, 3 \n"
2100 "or. 3, 3, 4 \n"
2101 "lwzu " TOP_FIRST ", 8(30) \n"
2102 "lwz " TOP_SECOND ", 4(30) \n"
2103 "1:bne 0, 1b \n");
2104
2105 if (offset_p)
2106 *offset_p = 28;
2107 if (size_p)
2108 *size_p = 14;
2109 }
2110
2111 /* Goto if stack[--sp] < TOP */
2112
2113 static void
2114 ppc_emit_lt_goto (int *offset_p, int *size_p)
2115 {
2116 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2117 "lwz " TMP_SECOND ", 4(30) \n"
2118 "cmplw 6, 6, 4 \n"
2119 "cmpw 7, 5, 3 \n"
2120 /* CR6 bit 0 = low less and high equal */
2121 "crand 6*4+0, 6*4+0, 7*4+2\n"
2122 /* CR7 bit 0 = (low less and high equal) or high less */
2123 "cror 7*4+0, 7*4+0, 6*4+0\n"
2124 "lwzu " TOP_FIRST ", 8(30) \n"
2125 "lwz " TOP_SECOND ", 4(30)\n"
2126 "1:blt 7, 1b \n");
2127
2128 if (offset_p)
2129 *offset_p = 32;
2130 if (size_p)
2131 *size_p = 14;
2132 }
2133
2134 /* Goto if stack[--sp] <= TOP */
2135
2136 static void
2137 ppc_emit_le_goto (int *offset_p, int *size_p)
2138 {
2139 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2140 "lwz " TMP_SECOND ", 4(30) \n"
2141 "cmplw 6, 6, 4 \n"
2142 "cmpw 7, 5, 3 \n"
2143 /* CR6 bit 0 = low less/equal and high equal */
2144 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2145 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2146 "cror 7*4+0, 7*4+0, 6*4+0\n"
2147 "lwzu " TOP_FIRST ", 8(30) \n"
2148 "lwz " TOP_SECOND ", 4(30)\n"
2149 "1:blt 7, 1b \n");
2150
2151 if (offset_p)
2152 *offset_p = 32;
2153 if (size_p)
2154 *size_p = 14;
2155 }
2156
2157 /* Goto if stack[--sp] > TOP */
2158
2159 static void
2160 ppc_emit_gt_goto (int *offset_p, int *size_p)
2161 {
2162 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2163 "lwz " TMP_SECOND ", 4(30) \n"
2164 "cmplw 6, 6, 4 \n"
2165 "cmpw 7, 5, 3 \n"
2166 /* CR6 bit 0 = low greater and high equal */
2167 "crand 6*4+0, 6*4+1, 7*4+2\n"
2168 /* CR7 bit 0 = (low greater and high equal) or high greater */
2169 "cror 7*4+0, 7*4+1, 6*4+0\n"
2170 "lwzu " TOP_FIRST ", 8(30) \n"
2171 "lwz " TOP_SECOND ", 4(30)\n"
2172 "1:blt 7, 1b \n");
2173
2174 if (offset_p)
2175 *offset_p = 32;
2176 if (size_p)
2177 *size_p = 14;
2178 }
2179
2180 /* Goto if stack[--sp] >= TOP */
2181
2182 static void
2183 ppc_emit_ge_goto (int *offset_p, int *size_p)
2184 {
2185 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2186 "lwz " TMP_SECOND ", 4(30) \n"
2187 "cmplw 6, 6, 4 \n"
2188 "cmpw 7, 5, 3 \n"
2189 /* CR6 bit 0 = low ge and high equal */
2190 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2191 /* CR7 bit 0 = (low ge and high equal) or high greater */
2192 "cror 7*4+0, 7*4+1, 6*4+0\n"
2193 "lwzu " TOP_FIRST ", 8(30)\n"
2194 "lwz " TOP_SECOND ", 4(30)\n"
2195 "1:blt 7, 1b \n");
2196
2197 if (offset_p)
2198 *offset_p = 32;
2199 if (size_p)
2200 *size_p = 14;
2201 }
2202
2203 /* Relocate previous emitted branch instruction. FROM is the address
2204 of the branch instruction, TO is the goto target address, and SIZE
2205 if the value we set by *SIZE_P before. Currently, it is either
2206 24 or 14 of branch and conditional-branch instruction.
2207 Also used for ppc64. */
2208
2209 static void
2210 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2211 {
2212 long rel = to - from;
2213 uint32_t insn;
2214 int opcd;
2215
2216 read_inferior_memory (from, (unsigned char *) &insn, 4);
2217 opcd = (insn >> 26) & 0x3f;
2218
2219 switch (size)
2220 {
2221 case 14:
2222 if (opcd != 16
2223 || (rel >= (1 << 15) || rel < -(1 << 15)))
2224 emit_error = 1;
2225 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2226 break;
2227 case 24:
2228 if (opcd != 18
2229 || (rel >= (1 << 25) || rel < -(1 << 25)))
2230 emit_error = 1;
2231 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2232 break;
2233 default:
2234 emit_error = 1;
2235 }
2236
2237 if (!emit_error)
2238 write_inferior_memory (from, (unsigned char *) &insn, 4);
2239 }
2240
2241 /* Table of emit ops for 32-bit. */
2242
2243 static struct emit_ops ppc_emit_ops_impl =
2244 {
2245 ppc_emit_prologue,
2246 ppc_emit_epilogue,
2247 ppc_emit_add,
2248 ppc_emit_sub,
2249 ppc_emit_mul,
2250 ppc_emit_lsh,
2251 ppc_emit_rsh_signed,
2252 ppc_emit_rsh_unsigned,
2253 ppc_emit_ext,
2254 ppc_emit_log_not,
2255 ppc_emit_bit_and,
2256 ppc_emit_bit_or,
2257 ppc_emit_bit_xor,
2258 ppc_emit_bit_not,
2259 ppc_emit_equal,
2260 ppc_emit_less_signed,
2261 ppc_emit_less_unsigned,
2262 ppc_emit_ref,
2263 ppc_emit_if_goto,
2264 ppc_emit_goto,
2265 ppc_write_goto_address,
2266 ppc_emit_const,
2267 ppc_emit_call,
2268 ppc_emit_reg,
2269 ppc_emit_pop,
2270 ppc_emit_stack_flush,
2271 ppc_emit_zero_ext,
2272 ppc_emit_swap,
2273 ppc_emit_stack_adjust,
2274 ppc_emit_int_call_1,
2275 ppc_emit_void_call_2,
2276 ppc_emit_eq_goto,
2277 ppc_emit_ne_goto,
2278 ppc_emit_lt_goto,
2279 ppc_emit_le_goto,
2280 ppc_emit_gt_goto,
2281 ppc_emit_ge_goto
2282 };
2283
2284 #ifdef __powerpc64__
2285
2286 /*
2287
2288 Bytecode execution stack frame - 64-bit
2289
2290 | LR save area (SP + 16)
2291 | CR save area (SP + 8)
2292 SP' -> +- Back chain (SP + 0)
2293 | Save r31 for access saved arguments
2294 | Save r30 for bytecode stack pointer
2295 | Save r4 for incoming argument *value
2296 | Save r3 for incoming argument regs
2297 r30 -> +- Bytecode execution stack
2298 |
2299 | 64-byte (8 doublewords) at initial.
2300 | Expand stack as needed.
2301 |
2302 +-
2303 | Some padding for minimum stack frame.
2304 | 112 for ELFv1.
2305 SP +- Back-chain (SP')
2306
2307 initial frame size
2308 = 112 + (4 * 8) + 64
2309 = 208
2310
2311 r30 is the stack-pointer for bytecode machine.
2312 It should point to next-empty, so we can use LDU for pop.
2313 r3 is used for cache of TOP value.
2314 It was the first argument, pointer to regs.
2315 r4 is the second argument, pointer to the result.
2316 We should set *result = TOP after leaving this function.
2317
2318 Note:
2319 * To restore stack at epilogue
2320 => sp = r31
2321 * To check stack is big enough for bytecode execution.
2322 => r30 - 8 > SP + 112
2323 * To return execution result.
2324 => 0(r4) = TOP
2325
2326 */
2327
2328 /* Emit prologue in inferior memory. See above comments. */
2329
2330 static void
2331 ppc64v1_emit_prologue (void)
2332 {
2333 /* On ELFv1, function pointers really point to function descriptor,
2334 so emit one here. We don't care about contents of words 1 and 2,
2335 so let them just overlap out code. */
2336 uint64_t opd = current_insn_ptr + 8;
2337 uint32_t buf[2];
2338
2339 /* Mind the strict aliasing rules. */
2340 memcpy (buf, &opd, sizeof buf);
2341 emit_insns(buf, 2);
2342 EMIT_ASM (/* Save return address. */
2343 "mflr 0 \n"
2344 "std 0, 16(1) \n"
2345 /* Save r30 and incoming arguments. */
2346 "std 31, -8(1) \n"
2347 "std 30, -16(1) \n"
2348 "std 4, -24(1) \n"
2349 "std 3, -32(1) \n"
2350 /* Point r31 to current r1 for access arguments. */
2351 "mr 31, 1 \n"
2352 /* Adjust SP. 208 is the initial frame size. */
2353 "stdu 1, -208(1) \n"
2354 /* Set r30 to pointing stack-top. */
2355 "addi 30, 1, 168 \n"
2356 /* Initial r3/TOP to 0. */
2357 "li 3, 0 \n");
2358 }
2359
2360 /* Emit prologue in inferior memory. See above comments. */
2361
2362 static void
2363 ppc64v2_emit_prologue (void)
2364 {
2365 EMIT_ASM (/* Save return address. */
2366 "mflr 0 \n"
2367 "std 0, 16(1) \n"
2368 /* Save r30 and incoming arguments. */
2369 "std 31, -8(1) \n"
2370 "std 30, -16(1) \n"
2371 "std 4, -24(1) \n"
2372 "std 3, -32(1) \n"
2373 /* Point r31 to current r1 for access arguments. */
2374 "mr 31, 1 \n"
2375 /* Adjust SP. 208 is the initial frame size. */
2376 "stdu 1, -208(1) \n"
2377 /* Set r30 to pointing stack-top. */
2378 "addi 30, 1, 168 \n"
2379 /* Initial r3/TOP to 0. */
2380 "li 3, 0 \n");
2381 }
2382
2383 /* Emit epilogue in inferior memory. See above comments. */
2384
2385 static void
2386 ppc64_emit_epilogue (void)
2387 {
2388 EMIT_ASM (/* Restore SP. */
2389 "ld 1, 0(1) \n"
2390 /* *result = TOP */
2391 "ld 4, -24(1) \n"
2392 "std 3, 0(4) \n"
2393 /* Restore registers. */
2394 "ld 31, -8(1) \n"
2395 "ld 30, -16(1) \n"
2396 /* Restore LR. */
2397 "ld 0, 16(1) \n"
2398 /* Return 0 for no-error. */
2399 "li 3, 0 \n"
2400 "mtlr 0 \n"
2401 "blr \n");
2402 }
2403
2404 /* TOP = stack[--sp] + TOP */
2405
2406 static void
2407 ppc64_emit_add (void)
2408 {
2409 EMIT_ASM ("ldu 4, 8(30) \n"
2410 "add 3, 4, 3 \n");
2411 }
2412
2413 /* TOP = stack[--sp] - TOP */
2414
2415 static void
2416 ppc64_emit_sub (void)
2417 {
2418 EMIT_ASM ("ldu 4, 8(30) \n"
2419 "sub 3, 4, 3 \n");
2420 }
2421
2422 /* TOP = stack[--sp] * TOP */
2423
2424 static void
2425 ppc64_emit_mul (void)
2426 {
2427 EMIT_ASM ("ldu 4, 8(30) \n"
2428 "mulld 3, 4, 3 \n");
2429 }
2430
2431 /* TOP = stack[--sp] << TOP */
2432
2433 static void
2434 ppc64_emit_lsh (void)
2435 {
2436 EMIT_ASM ("ldu 4, 8(30) \n"
2437 "sld 3, 4, 3 \n");
2438 }
2439
2440 /* Top = stack[--sp] >> TOP
2441 (Arithmetic shift right) */
2442
2443 static void
2444 ppc64_emit_rsh_signed (void)
2445 {
2446 EMIT_ASM ("ldu 4, 8(30) \n"
2447 "srad 3, 4, 3 \n");
2448 }
2449
2450 /* Top = stack[--sp] >> TOP
2451 (Logical shift right) */
2452
2453 static void
2454 ppc64_emit_rsh_unsigned (void)
2455 {
2456 EMIT_ASM ("ldu 4, 8(30) \n"
2457 "srd 3, 4, 3 \n");
2458 }
2459
2460 /* Emit code for signed-extension specified by ARG. */
2461
2462 static void
2463 ppc64_emit_ext (int arg)
2464 {
2465 switch (arg)
2466 {
2467 case 8:
2468 EMIT_ASM ("extsb 3, 3");
2469 break;
2470 case 16:
2471 EMIT_ASM ("extsh 3, 3");
2472 break;
2473 case 32:
2474 EMIT_ASM ("extsw 3, 3");
2475 break;
2476 default:
2477 emit_error = 1;
2478 }
2479 }
2480
2481 /* Emit code for zero-extension specified by ARG. */
2482
2483 static void
2484 ppc64_emit_zero_ext (int arg)
2485 {
2486 switch (arg)
2487 {
2488 case 8:
2489 EMIT_ASM ("rldicl 3,3,0,56");
2490 break;
2491 case 16:
2492 EMIT_ASM ("rldicl 3,3,0,48");
2493 break;
2494 case 32:
2495 EMIT_ASM ("rldicl 3,3,0,32");
2496 break;
2497 default:
2498 emit_error = 1;
2499 }
2500 }
2501
2502 /* TOP = !TOP
2503 i.e., TOP = (TOP == 0) ? 1 : 0; */
2504
2505 static void
2506 ppc64_emit_log_not (void)
2507 {
2508 EMIT_ASM ("cntlzd 3, 3 \n"
2509 "srdi 3, 3, 6 \n");
2510 }
2511
2512 /* TOP = stack[--sp] & TOP */
2513
2514 static void
2515 ppc64_emit_bit_and (void)
2516 {
2517 EMIT_ASM ("ldu 4, 8(30) \n"
2518 "and 3, 4, 3 \n");
2519 }
2520
2521 /* TOP = stack[--sp] | TOP */
2522
2523 static void
2524 ppc64_emit_bit_or (void)
2525 {
2526 EMIT_ASM ("ldu 4, 8(30) \n"
2527 "or 3, 4, 3 \n");
2528 }
2529
2530 /* TOP = stack[--sp] ^ TOP */
2531
2532 static void
2533 ppc64_emit_bit_xor (void)
2534 {
2535 EMIT_ASM ("ldu 4, 8(30) \n"
2536 "xor 3, 4, 3 \n");
2537 }
2538
2539 /* TOP = ~TOP
2540 i.e., TOP = ~(TOP | TOP) */
2541
2542 static void
2543 ppc64_emit_bit_not (void)
2544 {
2545 EMIT_ASM ("nor 3, 3, 3 \n");
2546 }
2547
2548 /* TOP = stack[--sp] == TOP */
2549
2550 static void
2551 ppc64_emit_equal (void)
2552 {
2553 EMIT_ASM ("ldu 4, 8(30) \n"
2554 "xor 3, 3, 4 \n"
2555 "cntlzd 3, 3 \n"
2556 "srdi 3, 3, 6 \n");
2557 }
2558
2559 /* TOP = stack[--sp] < TOP
2560 (Signed comparison) */
2561
2562 static void
2563 ppc64_emit_less_signed (void)
2564 {
2565 EMIT_ASM ("ldu 4, 8(30) \n"
2566 "cmpd 7, 4, 3 \n"
2567 "mfcr 3 \n"
2568 "rlwinm 3, 3, 29, 31, 31 \n");
2569 }
2570
2571 /* TOP = stack[--sp] < TOP
2572 (Unsigned comparison) */
2573
2574 static void
2575 ppc64_emit_less_unsigned (void)
2576 {
2577 EMIT_ASM ("ldu 4, 8(30) \n"
2578 "cmpld 7, 4, 3 \n"
2579 "mfcr 3 \n"
2580 "rlwinm 3, 3, 29, 31, 31 \n");
2581 }
2582
2583 /* Access the memory address in TOP in size of SIZE.
2584 Zero-extend the read value. */
2585
2586 static void
2587 ppc64_emit_ref (int size)
2588 {
2589 switch (size)
2590 {
2591 case 1:
2592 EMIT_ASM ("lbz 3, 0(3)");
2593 break;
2594 case 2:
2595 EMIT_ASM ("lhz 3, 0(3)");
2596 break;
2597 case 4:
2598 EMIT_ASM ("lwz 3, 0(3)");
2599 break;
2600 case 8:
2601 EMIT_ASM ("ld 3, 0(3)");
2602 break;
2603 }
2604 }
2605
2606 /* TOP = NUM */
2607
2608 static void
2609 ppc64_emit_const (LONGEST num)
2610 {
2611 uint32_t buf[5];
2612 uint32_t *p = buf;
2613
2614 p += gen_limm (p, 3, num, 1);
2615
2616 emit_insns (buf, p - buf);
2617 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2618 }
2619
2620 /* Set TOP to the value of register REG by calling get_raw_reg function
2621 with two argument, collected buffer and register number. */
2622
2623 static void
2624 ppc64v1_emit_reg (int reg)
2625 {
2626 uint32_t buf[15];
2627 uint32_t *p = buf;
2628
2629 /* fctx->regs is passed in r3 and then saved in 176(1). */
2630 p += GEN_LD (p, 3, 31, -32);
2631 p += GEN_LI (p, 4, reg);
2632 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2633 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2634 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2635
2636 emit_insns (buf, p - buf);
2637 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2638 }
2639
2640 /* Likewise, for ELFv2. */
2641
2642 static void
2643 ppc64v2_emit_reg (int reg)
2644 {
2645 uint32_t buf[12];
2646 uint32_t *p = buf;
2647
2648 /* fctx->regs is passed in r3 and then saved in 176(1). */
2649 p += GEN_LD (p, 3, 31, -32);
2650 p += GEN_LI (p, 4, reg);
2651 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2652 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2653 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2654
2655 emit_insns (buf, p - buf);
2656 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2657 }
2658
2659 /* TOP = stack[--sp] */
2660
2661 static void
2662 ppc64_emit_pop (void)
2663 {
2664 EMIT_ASM ("ldu 3, 8(30)");
2665 }
2666
2667 /* stack[sp++] = TOP
2668
2669 Because we may use up bytecode stack, expand 8 doublewords more
2670 if needed. */
2671
2672 static void
2673 ppc64_emit_stack_flush (void)
2674 {
2675 /* Make sure bytecode stack is big enough before push.
2676 Otherwise, expand 64-byte more. */
2677
2678 EMIT_ASM (" std 3, 0(30) \n"
2679 " addi 4, 30, -(112 + 8) \n"
2680 " cmpd 7, 4, 1 \n"
2681 " bgt 7, 1f \n"
2682 " stdu 31, -64(1) \n"
2683 "1:addi 30, 30, -8 \n");
2684 }
2685
2686 /* Swap TOP and stack[sp-1] */
2687
2688 static void
2689 ppc64_emit_swap (void)
2690 {
2691 EMIT_ASM ("ld 4, 8(30) \n"
2692 "std 3, 8(30) \n"
2693 "mr 3, 4 \n");
2694 }
2695
2696 /* Call function FN - ELFv1. */
2697
2698 static void
2699 ppc64v1_emit_call (CORE_ADDR fn)
2700 {
2701 uint32_t buf[13];
2702 uint32_t *p = buf;
2703
2704 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2705 p += gen_call (p, fn, 1, 1);
2706 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2707
2708 emit_insns (buf, p - buf);
2709 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2710 }
2711
2712 /* Call function FN - ELFv2. */
2713
2714 static void
2715 ppc64v2_emit_call (CORE_ADDR fn)
2716 {
2717 uint32_t buf[10];
2718 uint32_t *p = buf;
2719
2720 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2721 p += gen_call (p, fn, 1, 0);
2722 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2723
2724 emit_insns (buf, p - buf);
2725 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2726 }
2727
2728 /* FN's prototype is `LONGEST(*fn)(int)'.
2729 TOP = fn (arg1)
2730 */
2731
2732 static void
2733 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
2734 {
2735 uint32_t buf[13];
2736 uint32_t *p = buf;
2737
2738 /* Setup argument. arg1 is a 16-bit value. */
2739 p += gen_limm (p, 3, arg1, 1);
2740 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2741 p += gen_call (p, fn, 1, 1);
2742 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2743
2744 emit_insns (buf, p - buf);
2745 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2746 }
2747
2748 /* Likewise for ELFv2. */
2749
2750 static void
2751 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
2752 {
2753 uint32_t buf[10];
2754 uint32_t *p = buf;
2755
2756 /* Setup argument. arg1 is a 16-bit value. */
2757 p += gen_limm (p, 3, arg1, 1);
2758 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2759 p += gen_call (p, fn, 1, 0);
2760 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2761
2762 emit_insns (buf, p - buf);
2763 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2764 }
2765
2766 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2767 fn (arg1, TOP)
2768
2769 TOP should be preserved/restored before/after the call. */
2770
2771 static void
2772 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
2773 {
2774 uint32_t buf[17];
2775 uint32_t *p = buf;
2776
2777 /* Save TOP. 0(30) is next-empty. */
2778 p += GEN_STD (p, 3, 30, 0);
2779
2780 /* Setup argument. arg1 is a 16-bit value. */
2781 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2782 p += gen_limm (p, 3, arg1, 1);
2783 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2784 p += gen_call (p, fn, 1, 1);
2785 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2786
2787 /* Restore TOP */
2788 p += GEN_LD (p, 3, 30, 0);
2789
2790 emit_insns (buf, p - buf);
2791 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2792 }
2793
2794 /* Likewise for ELFv2. */
2795
2796 static void
2797 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
2798 {
2799 uint32_t buf[14];
2800 uint32_t *p = buf;
2801
2802 /* Save TOP. 0(30) is next-empty. */
2803 p += GEN_STD (p, 3, 30, 0);
2804
2805 /* Setup argument. arg1 is a 16-bit value. */
2806 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2807 p += gen_limm (p, 3, arg1, 1);
2808 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2809 p += gen_call (p, fn, 1, 0);
2810 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2811
2812 /* Restore TOP */
2813 p += GEN_LD (p, 3, 30, 0);
2814
2815 emit_insns (buf, p - buf);
2816 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2817 }
2818
2819 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2820
2821 static void
2822 ppc64_emit_if_goto (int *offset_p, int *size_p)
2823 {
2824 EMIT_ASM ("cmpdi 7, 3, 0 \n"
2825 "ldu 3, 8(30) \n"
2826 "1:bne 7, 1b \n");
2827
2828 if (offset_p)
2829 *offset_p = 8;
2830 if (size_p)
2831 *size_p = 14;
2832 }
2833
2834 /* Goto if stack[--sp] == TOP */
2835
2836 static void
2837 ppc64_emit_eq_goto (int *offset_p, int *size_p)
2838 {
2839 EMIT_ASM ("ldu 4, 8(30) \n"
2840 "cmpd 7, 4, 3 \n"
2841 "ldu 3, 8(30) \n"
2842 "1:beq 7, 1b \n");
2843
2844 if (offset_p)
2845 *offset_p = 12;
2846 if (size_p)
2847 *size_p = 14;
2848 }
2849
2850 /* Goto if stack[--sp] != TOP */
2851
2852 static void
2853 ppc64_emit_ne_goto (int *offset_p, int *size_p)
2854 {
2855 EMIT_ASM ("ldu 4, 8(30) \n"
2856 "cmpd 7, 4, 3 \n"
2857 "ldu 3, 8(30) \n"
2858 "1:bne 7, 1b \n");
2859
2860 if (offset_p)
2861 *offset_p = 12;
2862 if (size_p)
2863 *size_p = 14;
2864 }
2865
2866 /* Goto if stack[--sp] < TOP */
2867
2868 static void
2869 ppc64_emit_lt_goto (int *offset_p, int *size_p)
2870 {
2871 EMIT_ASM ("ldu 4, 8(30) \n"
2872 "cmpd 7, 4, 3 \n"
2873 "ldu 3, 8(30) \n"
2874 "1:blt 7, 1b \n");
2875
2876 if (offset_p)
2877 *offset_p = 12;
2878 if (size_p)
2879 *size_p = 14;
2880 }
2881
2882 /* Goto if stack[--sp] <= TOP */
2883
2884 static void
2885 ppc64_emit_le_goto (int *offset_p, int *size_p)
2886 {
2887 EMIT_ASM ("ldu 4, 8(30) \n"
2888 "cmpd 7, 4, 3 \n"
2889 "ldu 3, 8(30) \n"
2890 "1:ble 7, 1b \n");
2891
2892 if (offset_p)
2893 *offset_p = 12;
2894 if (size_p)
2895 *size_p = 14;
2896 }
2897
2898 /* Goto if stack[--sp] > TOP */
2899
2900 static void
2901 ppc64_emit_gt_goto (int *offset_p, int *size_p)
2902 {
2903 EMIT_ASM ("ldu 4, 8(30) \n"
2904 "cmpd 7, 4, 3 \n"
2905 "ldu 3, 8(30) \n"
2906 "1:bgt 7, 1b \n");
2907
2908 if (offset_p)
2909 *offset_p = 12;
2910 if (size_p)
2911 *size_p = 14;
2912 }
2913
2914 /* Goto if stack[--sp] >= TOP */
2915
2916 static void
2917 ppc64_emit_ge_goto (int *offset_p, int *size_p)
2918 {
2919 EMIT_ASM ("ldu 4, 8(30) \n"
2920 "cmpd 7, 4, 3 \n"
2921 "ldu 3, 8(30) \n"
2922 "1:bge 7, 1b \n");
2923
2924 if (offset_p)
2925 *offset_p = 12;
2926 if (size_p)
2927 *size_p = 14;
2928 }
2929
2930 /* Table of emit ops for 64-bit ELFv1. */
2931
2932 static struct emit_ops ppc64v1_emit_ops_impl =
2933 {
2934 ppc64v1_emit_prologue,
2935 ppc64_emit_epilogue,
2936 ppc64_emit_add,
2937 ppc64_emit_sub,
2938 ppc64_emit_mul,
2939 ppc64_emit_lsh,
2940 ppc64_emit_rsh_signed,
2941 ppc64_emit_rsh_unsigned,
2942 ppc64_emit_ext,
2943 ppc64_emit_log_not,
2944 ppc64_emit_bit_and,
2945 ppc64_emit_bit_or,
2946 ppc64_emit_bit_xor,
2947 ppc64_emit_bit_not,
2948 ppc64_emit_equal,
2949 ppc64_emit_less_signed,
2950 ppc64_emit_less_unsigned,
2951 ppc64_emit_ref,
2952 ppc64_emit_if_goto,
2953 ppc_emit_goto,
2954 ppc_write_goto_address,
2955 ppc64_emit_const,
2956 ppc64v1_emit_call,
2957 ppc64v1_emit_reg,
2958 ppc64_emit_pop,
2959 ppc64_emit_stack_flush,
2960 ppc64_emit_zero_ext,
2961 ppc64_emit_swap,
2962 ppc_emit_stack_adjust,
2963 ppc64v1_emit_int_call_1,
2964 ppc64v1_emit_void_call_2,
2965 ppc64_emit_eq_goto,
2966 ppc64_emit_ne_goto,
2967 ppc64_emit_lt_goto,
2968 ppc64_emit_le_goto,
2969 ppc64_emit_gt_goto,
2970 ppc64_emit_ge_goto
2971 };
2972
2973 /* Table of emit ops for 64-bit ELFv2. */
2974
2975 static struct emit_ops ppc64v2_emit_ops_impl =
2976 {
2977 ppc64v2_emit_prologue,
2978 ppc64_emit_epilogue,
2979 ppc64_emit_add,
2980 ppc64_emit_sub,
2981 ppc64_emit_mul,
2982 ppc64_emit_lsh,
2983 ppc64_emit_rsh_signed,
2984 ppc64_emit_rsh_unsigned,
2985 ppc64_emit_ext,
2986 ppc64_emit_log_not,
2987 ppc64_emit_bit_and,
2988 ppc64_emit_bit_or,
2989 ppc64_emit_bit_xor,
2990 ppc64_emit_bit_not,
2991 ppc64_emit_equal,
2992 ppc64_emit_less_signed,
2993 ppc64_emit_less_unsigned,
2994 ppc64_emit_ref,
2995 ppc64_emit_if_goto,
2996 ppc_emit_goto,
2997 ppc_write_goto_address,
2998 ppc64_emit_const,
2999 ppc64v2_emit_call,
3000 ppc64v2_emit_reg,
3001 ppc64_emit_pop,
3002 ppc64_emit_stack_flush,
3003 ppc64_emit_zero_ext,
3004 ppc64_emit_swap,
3005 ppc_emit_stack_adjust,
3006 ppc64v2_emit_int_call_1,
3007 ppc64v2_emit_void_call_2,
3008 ppc64_emit_eq_goto,
3009 ppc64_emit_ne_goto,
3010 ppc64_emit_lt_goto,
3011 ppc64_emit_le_goto,
3012 ppc64_emit_gt_goto,
3013 ppc64_emit_ge_goto
3014 };
3015
3016 #endif
3017
3018 /* Implementation of linux_target_ops method "emit_ops". */
3019
3020 static struct emit_ops *
3021 ppc_emit_ops (void)
3022 {
3023 #ifdef __powerpc64__
3024 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3025
3026 if (register_size (regcache->tdesc, 0) == 8)
3027 {
3028 if (is_elfv2_inferior ())
3029 return &ppc64v2_emit_ops_impl;
3030 else
3031 return &ppc64v1_emit_ops_impl;
3032 }
3033 #endif
3034 return &ppc_emit_ops_impl;
3035 }
3036
3037 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3038
3039 static int
3040 ppc_get_ipa_tdesc_idx (void)
3041 {
3042 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3043 const struct target_desc *tdesc = regcache->tdesc;
3044
3045 #ifdef __powerpc64__
3046 if (tdesc == tdesc_powerpc_64l)
3047 return PPC_TDESC_BASE;
3048 if (tdesc == tdesc_powerpc_altivec64l)
3049 return PPC_TDESC_ALTIVEC;
3050 if (tdesc == tdesc_powerpc_cell64l)
3051 return PPC_TDESC_CELL;
3052 if (tdesc == tdesc_powerpc_vsx64l)
3053 return PPC_TDESC_VSX;
3054 if (tdesc == tdesc_powerpc_isa205_64l)
3055 return PPC_TDESC_ISA205;
3056 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3057 return PPC_TDESC_ISA205_ALTIVEC;
3058 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3059 return PPC_TDESC_ISA205_VSX;
3060 #endif
3061
3062 if (tdesc == tdesc_powerpc_32l)
3063 return PPC_TDESC_BASE;
3064 if (tdesc == tdesc_powerpc_altivec32l)
3065 return PPC_TDESC_ALTIVEC;
3066 if (tdesc == tdesc_powerpc_cell32l)
3067 return PPC_TDESC_CELL;
3068 if (tdesc == tdesc_powerpc_vsx32l)
3069 return PPC_TDESC_VSX;
3070 if (tdesc == tdesc_powerpc_isa205_32l)
3071 return PPC_TDESC_ISA205;
3072 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3073 return PPC_TDESC_ISA205_ALTIVEC;
3074 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3075 return PPC_TDESC_ISA205_VSX;
3076 if (tdesc == tdesc_powerpc_e500l)
3077 return PPC_TDESC_E500;
3078
3079 return 0;
3080 }
3081
3082 struct linux_target_ops the_low_target = {
3083 ppc_arch_setup,
3084 ppc_regs_info,
3085 ppc_cannot_fetch_register,
3086 ppc_cannot_store_register,
3087 NULL, /* fetch_register */
3088 ppc_get_pc,
3089 ppc_set_pc,
3090 NULL, /* breakpoint_kind_from_pc */
3091 ppc_sw_breakpoint_from_kind,
3092 NULL,
3093 0,
3094 ppc_breakpoint_at,
3095 ppc_supports_z_point_type,
3096 ppc_insert_point,
3097 ppc_remove_point,
3098 NULL,
3099 NULL,
3100 ppc_collect_ptrace_register,
3101 ppc_supply_ptrace_register,
3102 NULL, /* siginfo_fixup */
3103 NULL, /* new_process */
3104 NULL, /* delete_process */
3105 NULL, /* new_thread */
3106 NULL, /* delete_thread */
3107 NULL, /* new_fork */
3108 NULL, /* prepare_to_resume */
3109 NULL, /* process_qsupported */
3110 ppc_supports_tracepoints,
3111 ppc_get_thread_area,
3112 ppc_install_fast_tracepoint_jump_pad,
3113 ppc_emit_ops,
3114 ppc_get_min_fast_tracepoint_insn_len,
3115 NULL, /* supports_range_stepping */
3116 NULL, /* breakpoint_kind_from_current_state */
3117 ppc_supports_hardware_single_step,
3118 NULL, /* get_syscall_trapinfo */
3119 ppc_get_ipa_tdesc_idx,
3120 };
3121
3122 void
3123 initialize_low_arch (void)
3124 {
3125 /* Initialize the Linux target descriptions. */
3126
3127 init_registers_powerpc_32l ();
3128 init_registers_powerpc_altivec32l ();
3129 init_registers_powerpc_cell32l ();
3130 init_registers_powerpc_vsx32l ();
3131 init_registers_powerpc_isa205_32l ();
3132 init_registers_powerpc_isa205_altivec32l ();
3133 init_registers_powerpc_isa205_vsx32l ();
3134 init_registers_powerpc_e500l ();
3135 #if __powerpc64__
3136 init_registers_powerpc_64l ();
3137 init_registers_powerpc_altivec64l ();
3138 init_registers_powerpc_cell64l ();
3139 init_registers_powerpc_vsx64l ();
3140 init_registers_powerpc_isa205_64l ();
3141 init_registers_powerpc_isa205_altivec64l ();
3142 init_registers_powerpc_isa205_vsx64l ();
3143 #endif
3144
3145 initialize_regsets_info (&ppc_regsets_info);
3146 }
This page took 0.11298 seconds and 4 git commands to generate.