Correct shell compatibility issue detected with pkgsrc.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <elf.h>
24 #include <asm/ptrace.h>
25
26 #include "nat/ppc-linux.h"
27 #include "linux-ppc-tdesc.h"
28 #include "ax.h"
29 #include "tracepoint.h"
30
31 #define PPC_FIELD(value, from, len) \
32 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
33 #define PPC_SEXT(v, bs) \
34 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
35 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
36 - ((CORE_ADDR) 1 << ((bs) - 1)))
37 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
38 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
39 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
40 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
41
42 static unsigned long ppc_hwcap;
43
44
45 #define ppc_num_regs 73
46
47 #ifdef __powerpc64__
48 /* We use a constant for FPSCR instead of PT_FPSCR, because
49 many shipped PPC64 kernels had the wrong value in ptrace.h. */
50 static int ppc_regmap[] =
51 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
52 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
53 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
54 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
55 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
56 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
57 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
58 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
59 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
60 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
61 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
62 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
63 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
64 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
65 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
66 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
67 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
68 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
69 PT_ORIG_R3 * 8, PT_TRAP * 8 };
70 #else
71 /* Currently, don't check/send MQ. */
72 static int ppc_regmap[] =
73 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
74 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
75 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
76 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
77 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
78 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
79 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
80 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
81 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
82 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
83 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
84 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
85 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
86 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
87 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
88 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
89 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
90 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
91 PT_ORIG_R3 * 4, PT_TRAP * 4
92 };
93
94 static int ppc_regmap_e500[] =
95 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
96 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
97 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
98 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
99 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
100 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
101 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
102 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
103 -1, -1, -1, -1,
104 -1, -1, -1, -1,
105 -1, -1, -1, -1,
106 -1, -1, -1, -1,
107 -1, -1, -1, -1,
108 -1, -1, -1, -1,
109 -1, -1, -1, -1,
110 -1, -1, -1, -1,
111 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
112 PT_CTR * 4, PT_XER * 4, -1,
113 PT_ORIG_R3 * 4, PT_TRAP * 4
114 };
115 #endif
116
117 static int
118 ppc_cannot_store_register (int regno)
119 {
120 const struct target_desc *tdesc = current_process ()->tdesc;
121
122 #ifndef __powerpc64__
123 /* Some kernels do not allow us to store fpscr. */
124 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
125 && regno == find_regno (tdesc, "fpscr"))
126 return 2;
127 #endif
128
129 /* Some kernels do not allow us to store orig_r3 or trap. */
130 if (regno == find_regno (tdesc, "orig_r3")
131 || regno == find_regno (tdesc, "trap"))
132 return 2;
133
134 return 0;
135 }
136
137 static int
138 ppc_cannot_fetch_register (int regno)
139 {
140 return 0;
141 }
142
143 static void
144 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
145 {
146 memset (buf, 0, sizeof (long));
147
148 if (__BYTE_ORDER == __LITTLE_ENDIAN)
149 {
150 /* Little-endian values always sit at the left end of the buffer. */
151 collect_register (regcache, regno, buf);
152 }
153 else if (__BYTE_ORDER == __BIG_ENDIAN)
154 {
155 /* Big-endian values sit at the right end of the buffer. In case of
156 registers whose sizes are smaller than sizeof (long), we must use a
157 padding to access them correctly. */
158 int size = register_size (regcache->tdesc, regno);
159
160 if (size < sizeof (long))
161 collect_register (regcache, regno, buf + sizeof (long) - size);
162 else
163 collect_register (regcache, regno, buf);
164 }
165 else
166 perror_with_name ("Unexpected byte order");
167 }
168
169 static void
170 ppc_supply_ptrace_register (struct regcache *regcache,
171 int regno, const char *buf)
172 {
173 if (__BYTE_ORDER == __LITTLE_ENDIAN)
174 {
175 /* Little-endian values always sit at the left end of the buffer. */
176 supply_register (regcache, regno, buf);
177 }
178 else if (__BYTE_ORDER == __BIG_ENDIAN)
179 {
180 /* Big-endian values sit at the right end of the buffer. In case of
181 registers whose sizes are smaller than sizeof (long), we must use a
182 padding to access them correctly. */
183 int size = register_size (regcache->tdesc, regno);
184
185 if (size < sizeof (long))
186 supply_register (regcache, regno, buf + sizeof (long) - size);
187 else
188 supply_register (regcache, regno, buf);
189 }
190 else
191 perror_with_name ("Unexpected byte order");
192 }
193
194
195 #define INSTR_SC 0x44000002
196 #define NR_spu_run 0x0116
197
198 /* If the PPU thread is currently stopped on a spu_run system call,
199 return to FD and ADDR the file handle and NPC parameter address
200 used with the system call. Return non-zero if successful. */
201 static int
202 parse_spufs_run (struct regcache *regcache, int *fd, CORE_ADDR *addr)
203 {
204 CORE_ADDR curr_pc;
205 int curr_insn;
206 int curr_r0;
207
208 if (register_size (regcache->tdesc, 0) == 4)
209 {
210 unsigned int pc, r0, r3, r4;
211 collect_register_by_name (regcache, "pc", &pc);
212 collect_register_by_name (regcache, "r0", &r0);
213 collect_register_by_name (regcache, "orig_r3", &r3);
214 collect_register_by_name (regcache, "r4", &r4);
215 curr_pc = (CORE_ADDR) pc;
216 curr_r0 = (int) r0;
217 *fd = (int) r3;
218 *addr = (CORE_ADDR) r4;
219 }
220 else
221 {
222 unsigned long pc, r0, r3, r4;
223 collect_register_by_name (regcache, "pc", &pc);
224 collect_register_by_name (regcache, "r0", &r0);
225 collect_register_by_name (regcache, "orig_r3", &r3);
226 collect_register_by_name (regcache, "r4", &r4);
227 curr_pc = (CORE_ADDR) pc;
228 curr_r0 = (int) r0;
229 *fd = (int) r3;
230 *addr = (CORE_ADDR) r4;
231 }
232
233 /* Fetch instruction preceding current NIP. */
234 if ((*the_target->read_memory) (curr_pc - 4,
235 (unsigned char *) &curr_insn, 4) != 0)
236 return 0;
237 /* It should be a "sc" instruction. */
238 if (curr_insn != INSTR_SC)
239 return 0;
240 /* System call number should be NR_spu_run. */
241 if (curr_r0 != NR_spu_run)
242 return 0;
243
244 return 1;
245 }
246
247 static CORE_ADDR
248 ppc_get_pc (struct regcache *regcache)
249 {
250 CORE_ADDR addr;
251 int fd;
252
253 if (parse_spufs_run (regcache, &fd, &addr))
254 {
255 unsigned int pc;
256 (*the_target->read_memory) (addr, (unsigned char *) &pc, 4);
257 return ((CORE_ADDR)1 << 63)
258 | ((CORE_ADDR)fd << 32) | (CORE_ADDR) (pc - 4);
259 }
260 else if (register_size (regcache->tdesc, 0) == 4)
261 {
262 unsigned int pc;
263 collect_register_by_name (regcache, "pc", &pc);
264 return (CORE_ADDR) pc;
265 }
266 else
267 {
268 unsigned long pc;
269 collect_register_by_name (regcache, "pc", &pc);
270 return (CORE_ADDR) pc;
271 }
272 }
273
274 static void
275 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
276 {
277 CORE_ADDR addr;
278 int fd;
279
280 if (parse_spufs_run (regcache, &fd, &addr))
281 {
282 unsigned int newpc = pc;
283 (*the_target->write_memory) (addr, (unsigned char *) &newpc, 4);
284 }
285 else if (register_size (regcache->tdesc, 0) == 4)
286 {
287 unsigned int newpc = pc;
288 supply_register_by_name (regcache, "pc", &newpc);
289 }
290 else
291 {
292 unsigned long newpc = pc;
293 supply_register_by_name (regcache, "pc", &newpc);
294 }
295 }
296
297
298 static int
299 ppc_get_auxv (unsigned long type, unsigned long *valp)
300 {
301 const struct target_desc *tdesc = current_process ()->tdesc;
302 int wordsize = register_size (tdesc, 0);
303 unsigned char *data = (unsigned char *) alloca (2 * wordsize);
304 int offset = 0;
305
306 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
307 {
308 if (wordsize == 4)
309 {
310 unsigned int *data_p = (unsigned int *)data;
311 if (data_p[0] == type)
312 {
313 *valp = data_p[1];
314 return 1;
315 }
316 }
317 else
318 {
319 unsigned long *data_p = (unsigned long *)data;
320 if (data_p[0] == type)
321 {
322 *valp = data_p[1];
323 return 1;
324 }
325 }
326
327 offset += 2 * wordsize;
328 }
329
330 *valp = 0;
331 return 0;
332 }
333
334 #ifndef __powerpc64__
335 static int ppc_regmap_adjusted;
336 #endif
337
338
339 /* Correct in either endianness.
340 This instruction is "twge r2, r2", which GDB uses as a software
341 breakpoint. */
342 static const unsigned int ppc_breakpoint = 0x7d821008;
343 #define ppc_breakpoint_len 4
344
345 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
346
347 static const gdb_byte *
348 ppc_sw_breakpoint_from_kind (int kind, int *size)
349 {
350 *size = ppc_breakpoint_len;
351 return (const gdb_byte *) &ppc_breakpoint;
352 }
353
354 static int
355 ppc_breakpoint_at (CORE_ADDR where)
356 {
357 unsigned int insn;
358
359 if (where & ((CORE_ADDR)1 << 63))
360 {
361 char mem_annex[32];
362 sprintf (mem_annex, "%d/mem", (int)((where >> 32) & 0x7fffffff));
363 (*the_target->qxfer_spu) (mem_annex, (unsigned char *) &insn,
364 NULL, where & 0xffffffff, 4);
365 if (insn == 0x3fff)
366 return 1;
367 }
368 else
369 {
370 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
371 if (insn == ppc_breakpoint)
372 return 1;
373 /* If necessary, recognize more trap instructions here. GDB only uses
374 the one. */
375 }
376
377 return 0;
378 }
379
380 /* Implement supports_z_point_type target-ops.
381 Returns true if type Z_TYPE breakpoint is supported.
382
383 Handling software breakpoint at server side, so tracepoints
384 and breakpoints can be inserted at the same location. */
385
386 static int
387 ppc_supports_z_point_type (char z_type)
388 {
389 switch (z_type)
390 {
391 case Z_PACKET_SW_BP:
392 return 1;
393 case Z_PACKET_HW_BP:
394 case Z_PACKET_WRITE_WP:
395 case Z_PACKET_ACCESS_WP:
396 default:
397 return 0;
398 }
399 }
400
401 /* Implement insert_point target-ops.
402 Returns 0 on success, -1 on failure and 1 on unsupported. */
403
404 static int
405 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
406 int size, struct raw_breakpoint *bp)
407 {
408 switch (type)
409 {
410 case raw_bkpt_type_sw:
411 return insert_memory_breakpoint (bp);
412
413 case raw_bkpt_type_hw:
414 case raw_bkpt_type_write_wp:
415 case raw_bkpt_type_access_wp:
416 default:
417 /* Unsupported. */
418 return 1;
419 }
420 }
421
422 /* Implement remove_point target-ops.
423 Returns 0 on success, -1 on failure and 1 on unsupported. */
424
425 static int
426 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
427 int size, struct raw_breakpoint *bp)
428 {
429 switch (type)
430 {
431 case raw_bkpt_type_sw:
432 return remove_memory_breakpoint (bp);
433
434 case raw_bkpt_type_hw:
435 case raw_bkpt_type_write_wp:
436 case raw_bkpt_type_access_wp:
437 default:
438 /* Unsupported. */
439 return 1;
440 }
441 }
442
443 /* Provide only a fill function for the general register set. ps_lgetregs
444 will use this for NPTL support. */
445
446 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
447 {
448 int i;
449
450 for (i = 0; i < 32; i++)
451 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
452
453 for (i = 64; i < 70; i++)
454 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
455
456 for (i = 71; i < 73; i++)
457 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
458 }
459
460 #define SIZEOF_VSXREGS 32*8
461
462 static void
463 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
464 {
465 int i, base;
466 char *regset = (char *) buf;
467
468 if (!(ppc_hwcap & PPC_FEATURE_HAS_VSX))
469 return;
470
471 base = find_regno (regcache->tdesc, "vs0h");
472 for (i = 0; i < 32; i++)
473 collect_register (regcache, base + i, &regset[i * 8]);
474 }
475
476 static void
477 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
478 {
479 int i, base;
480 const char *regset = (const char *) buf;
481
482 if (!(ppc_hwcap & PPC_FEATURE_HAS_VSX))
483 return;
484
485 base = find_regno (regcache->tdesc, "vs0h");
486 for (i = 0; i < 32; i++)
487 supply_register (regcache, base + i, &regset[i * 8]);
488 }
489
490 #define SIZEOF_VRREGS 33*16+4
491
492 static void
493 ppc_fill_vrregset (struct regcache *regcache, void *buf)
494 {
495 int i, base;
496 char *regset = (char *) buf;
497
498 if (!(ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC))
499 return;
500
501 base = find_regno (regcache->tdesc, "vr0");
502 for (i = 0; i < 32; i++)
503 collect_register (regcache, base + i, &regset[i * 16]);
504
505 collect_register_by_name (regcache, "vscr", &regset[32 * 16 + 12]);
506 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
507 }
508
509 static void
510 ppc_store_vrregset (struct regcache *regcache, const void *buf)
511 {
512 int i, base;
513 const char *regset = (const char *) buf;
514
515 if (!(ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC))
516 return;
517
518 base = find_regno (regcache->tdesc, "vr0");
519 for (i = 0; i < 32; i++)
520 supply_register (regcache, base + i, &regset[i * 16]);
521
522 supply_register_by_name (regcache, "vscr", &regset[32 * 16 + 12]);
523 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
524 }
525
526 struct gdb_evrregset_t
527 {
528 unsigned long evr[32];
529 unsigned long long acc;
530 unsigned long spefscr;
531 };
532
533 static void
534 ppc_fill_evrregset (struct regcache *regcache, void *buf)
535 {
536 int i, ev0;
537 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
538
539 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE))
540 return;
541
542 ev0 = find_regno (regcache->tdesc, "ev0h");
543 for (i = 0; i < 32; i++)
544 collect_register (regcache, ev0 + i, &regset->evr[i]);
545
546 collect_register_by_name (regcache, "acc", &regset->acc);
547 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
548 }
549
550 static void
551 ppc_store_evrregset (struct regcache *regcache, const void *buf)
552 {
553 int i, ev0;
554 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
555
556 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE))
557 return;
558
559 ev0 = find_regno (regcache->tdesc, "ev0h");
560 for (i = 0; i < 32; i++)
561 supply_register (regcache, ev0 + i, &regset->evr[i]);
562
563 supply_register_by_name (regcache, "acc", &regset->acc);
564 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
565 }
566
567 /* Support for hardware single step. */
568
569 static int
570 ppc_supports_hardware_single_step (void)
571 {
572 return 1;
573 }
574
575 static struct regset_info ppc_regsets[] = {
576 /* List the extra register sets before GENERAL_REGS. That way we will
577 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
578 general registers. Some kernels support these, but not the newer
579 PPC_PTRACE_GETREGS. */
580 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, SIZEOF_VSXREGS, EXTENDED_REGS,
581 ppc_fill_vsxregset, ppc_store_vsxregset },
582 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, SIZEOF_VRREGS, EXTENDED_REGS,
583 ppc_fill_vrregset, ppc_store_vrregset },
584 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 32 * 4 + 8 + 4, EXTENDED_REGS,
585 ppc_fill_evrregset, ppc_store_evrregset },
586 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
587 NULL_REGSET
588 };
589
590 static struct usrregs_info ppc_usrregs_info =
591 {
592 ppc_num_regs,
593 ppc_regmap,
594 };
595
596 static struct regsets_info ppc_regsets_info =
597 {
598 ppc_regsets, /* regsets */
599 0, /* num_regsets */
600 NULL, /* disabled_regsets */
601 };
602
603 static struct regs_info regs_info =
604 {
605 NULL, /* regset_bitmap */
606 &ppc_usrregs_info,
607 &ppc_regsets_info
608 };
609
610 static const struct regs_info *
611 ppc_regs_info (void)
612 {
613 return &regs_info;
614 }
615
616 static void
617 ppc_arch_setup (void)
618 {
619 const struct target_desc *tdesc;
620 #ifdef __powerpc64__
621 long msr;
622 struct regcache *regcache;
623
624 /* On a 64-bit host, assume 64-bit inferior process with no
625 AltiVec registers. Reset ppc_hwcap to ensure that the
626 collect_register call below does not fail. */
627 tdesc = tdesc_powerpc_64l;
628 current_process ()->tdesc = tdesc;
629 ppc_hwcap = 0;
630
631 regcache = new_register_cache (tdesc);
632 fetch_inferior_registers (regcache, find_regno (tdesc, "msr"));
633 collect_register_by_name (regcache, "msr", &msr);
634 free_register_cache (regcache);
635 if (ppc64_64bit_inferior_p (msr))
636 {
637 ppc_get_auxv (AT_HWCAP, &ppc_hwcap);
638 if (ppc_hwcap & PPC_FEATURE_CELL)
639 tdesc = tdesc_powerpc_cell64l;
640 else if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
641 {
642 /* Power ISA 2.05 (implemented by Power 6 and newer processors)
643 increases the FPSCR from 32 bits to 64 bits. Even though Power 7
644 supports this ISA version, it doesn't have PPC_FEATURE_ARCH_2_05
645 set, only PPC_FEATURE_ARCH_2_06. Since for now the only bits
646 used in the higher half of the register are for Decimal Floating
647 Point, we check if that feature is available to decide the size
648 of the FPSCR. */
649 if (ppc_hwcap & PPC_FEATURE_HAS_DFP)
650 tdesc = tdesc_powerpc_isa205_vsx64l;
651 else
652 tdesc = tdesc_powerpc_vsx64l;
653 }
654 else if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
655 {
656 if (ppc_hwcap & PPC_FEATURE_HAS_DFP)
657 tdesc = tdesc_powerpc_isa205_altivec64l;
658 else
659 tdesc = tdesc_powerpc_altivec64l;
660 }
661
662 current_process ()->tdesc = tdesc;
663 return;
664 }
665 #endif
666
667 /* OK, we have a 32-bit inferior. */
668 tdesc = tdesc_powerpc_32l;
669 current_process ()->tdesc = tdesc;
670
671 ppc_get_auxv (AT_HWCAP, &ppc_hwcap);
672 if (ppc_hwcap & PPC_FEATURE_CELL)
673 tdesc = tdesc_powerpc_cell32l;
674 else if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
675 {
676 if (ppc_hwcap & PPC_FEATURE_HAS_DFP)
677 tdesc = tdesc_powerpc_isa205_vsx32l;
678 else
679 tdesc = tdesc_powerpc_vsx32l;
680 }
681 else if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
682 {
683 if (ppc_hwcap & PPC_FEATURE_HAS_DFP)
684 tdesc = tdesc_powerpc_isa205_altivec32l;
685 else
686 tdesc = tdesc_powerpc_altivec32l;
687 }
688
689 /* On 32-bit machines, check for SPE registers.
690 Set the low target's regmap field as appropriately. */
691 #ifndef __powerpc64__
692 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
693 tdesc = tdesc_powerpc_e500l;
694
695 if (!ppc_regmap_adjusted)
696 {
697 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
698 ppc_usrregs_info.regmap = ppc_regmap_e500;
699
700 /* If the FPSCR is 64-bit wide, we need to fetch the whole
701 64-bit slot and not just its second word. The PT_FPSCR
702 supplied in a 32-bit GDB compilation doesn't reflect
703 this. */
704 if (register_size (tdesc, 70) == 8)
705 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
706
707 ppc_regmap_adjusted = 1;
708 }
709 #endif
710 current_process ()->tdesc = tdesc;
711 }
712
713 /* Implementation of linux_target_ops method "supports_tracepoints". */
714
715 static int
716 ppc_supports_tracepoints (void)
717 {
718 return 1;
719 }
720
721 /* Get the thread area address. This is used to recognize which
722 thread is which when tracing with the in-process agent library. We
723 don't read anything from the address, and treat it as opaque; it's
724 the address itself that we assume is unique per-thread. */
725
726 static int
727 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
728 {
729 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
730 struct thread_info *thr = get_lwp_thread (lwp);
731 struct regcache *regcache = get_thread_regcache (thr, 1);
732 ULONGEST tp = 0;
733
734 #ifdef __powerpc64__
735 if (register_size (regcache->tdesc, 0) == 8)
736 collect_register_by_name (regcache, "r13", &tp);
737 else
738 #endif
739 collect_register_by_name (regcache, "r2", &tp);
740
741 *addr = tp;
742
743 return 0;
744 }
745
746 #ifdef __powerpc64__
747
748 /* Older glibc doesn't provide this. */
749
750 #ifndef EF_PPC64_ABI
751 #define EF_PPC64_ABI 3
752 #endif
753
754 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
755 inferiors. */
756
757 static int
758 is_elfv2_inferior (void)
759 {
760 /* To be used as fallback if we're unable to determine the right result -
761 assume inferior uses the same ABI as gdbserver. */
762 #if _CALL_ELF == 2
763 const int def_res = 1;
764 #else
765 const int def_res = 0;
766 #endif
767 unsigned long phdr;
768 Elf64_Ehdr ehdr;
769
770 if (!ppc_get_auxv (AT_PHDR, &phdr))
771 return def_res;
772
773 /* Assume ELF header is at the beginning of the page where program headers
774 are located. If it doesn't look like one, bail. */
775
776 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
777 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
778 return def_res;
779
780 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
781 }
782
783 #endif
784
785 /* Generate a ds-form instruction in BUF and return the number of bytes written
786
787 0 6 11 16 30 32
788 | OPCD | RST | RA | DS |XO| */
789
790 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
791 static int
792 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
793 {
794 uint32_t insn;
795
796 gdb_assert ((opcd & ~0x3f) == 0);
797 gdb_assert ((rst & ~0x1f) == 0);
798 gdb_assert ((ra & ~0x1f) == 0);
799 gdb_assert ((xo & ~0x3) == 0);
800
801 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
802 *buf = (opcd << 26) | insn;
803 return 1;
804 }
805
806 /* Followings are frequently used ds-form instructions. */
807
808 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
809 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
810 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
811 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
812
813 /* Generate a d-form instruction in BUF.
814
815 0 6 11 16 32
816 | OPCD | RST | RA | D | */
817
818 static int
819 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
820 {
821 uint32_t insn;
822
823 gdb_assert ((opcd & ~0x3f) == 0);
824 gdb_assert ((rst & ~0x1f) == 0);
825 gdb_assert ((ra & ~0x1f) == 0);
826
827 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
828 *buf = (opcd << 26) | insn;
829 return 1;
830 }
831
832 /* Followings are frequently used d-form instructions. */
833
834 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
835 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
836 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
837 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
838 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
839 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
840 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
841 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
842 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
843
844 /* Generate a xfx-form instruction in BUF and return the number of bytes
845 written.
846
847 0 6 11 21 31 32
848 | OPCD | RST | RI | XO |/| */
849
850 static int
851 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
852 {
853 uint32_t insn;
854 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
855
856 gdb_assert ((opcd & ~0x3f) == 0);
857 gdb_assert ((rst & ~0x1f) == 0);
858 gdb_assert ((xo & ~0x3ff) == 0);
859
860 insn = (rst << 21) | (n << 11) | (xo << 1);
861 *buf = (opcd << 26) | insn;
862 return 1;
863 }
864
865 /* Followings are frequently used xfx-form instructions. */
866
867 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
868 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
869 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
870 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
871 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
872 E & 0xf, 598)
873 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
874
875
876 /* Generate a x-form instruction in BUF and return the number of bytes written.
877
878 0 6 11 16 21 31 32
879 | OPCD | RST | RA | RB | XO |RC| */
880
881 static int
882 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
883 {
884 uint32_t insn;
885
886 gdb_assert ((opcd & ~0x3f) == 0);
887 gdb_assert ((rst & ~0x1f) == 0);
888 gdb_assert ((ra & ~0x1f) == 0);
889 gdb_assert ((rb & ~0x1f) == 0);
890 gdb_assert ((xo & ~0x3ff) == 0);
891 gdb_assert ((rc & ~1) == 0);
892
893 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
894 *buf = (opcd << 26) | insn;
895 return 1;
896 }
897
898 /* Followings are frequently used x-form instructions. */
899
900 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
901 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
902 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
903 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
904 /* Assume bf = cr7. */
905 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
906
907
908 /* Generate a md-form instruction in BUF and return the number of bytes written.
909
910 0 6 11 16 21 27 30 31 32
911 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
912
913 static int
914 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
915 int xo, int rc)
916 {
917 uint32_t insn;
918 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
919 unsigned int sh0_4 = sh & 0x1f;
920 unsigned int sh5 = (sh >> 5) & 1;
921
922 gdb_assert ((opcd & ~0x3f) == 0);
923 gdb_assert ((rs & ~0x1f) == 0);
924 gdb_assert ((ra & ~0x1f) == 0);
925 gdb_assert ((sh & ~0x3f) == 0);
926 gdb_assert ((mb & ~0x3f) == 0);
927 gdb_assert ((xo & ~0x7) == 0);
928 gdb_assert ((rc & ~0x1) == 0);
929
930 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
931 | (sh5 << 1) | (xo << 2) | (rc & 1);
932 *buf = (opcd << 26) | insn;
933 return 1;
934 }
935
936 /* The following are frequently used md-form instructions. */
937
938 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
939 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
940 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
941 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
942
943 /* Generate a i-form instruction in BUF and return the number of bytes written.
944
945 0 6 30 31 32
946 | OPCD | LI |AA|LK| */
947
948 static int
949 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
950 {
951 uint32_t insn;
952
953 gdb_assert ((opcd & ~0x3f) == 0);
954
955 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
956 *buf = (opcd << 26) | insn;
957 return 1;
958 }
959
960 /* The following are frequently used i-form instructions. */
961
962 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
963 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
964
965 /* Generate a b-form instruction in BUF and return the number of bytes written.
966
967 0 6 11 16 30 31 32
968 | OPCD | BO | BI | BD |AA|LK| */
969
970 static int
971 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
972 int aa, int lk)
973 {
974 uint32_t insn;
975
976 gdb_assert ((opcd & ~0x3f) == 0);
977 gdb_assert ((bo & ~0x1f) == 0);
978 gdb_assert ((bi & ~0x1f) == 0);
979
980 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
981 *buf = (opcd << 26) | insn;
982 return 1;
983 }
984
985 /* The following are frequently used b-form instructions. */
986 /* Assume bi = cr7. */
987 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
988
989 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
990 respectively. They are primary used for save/restore GPRs in jump-pad,
991 not used for bytecode compiling. */
992
993 #ifdef __powerpc64__
994 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
995 GEN_LD (buf, rt, ra, si) : \
996 GEN_LWZ (buf, rt, ra, si))
997 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
998 GEN_STD (buf, rt, ra, si) : \
999 GEN_STW (buf, rt, ra, si))
1000 #else
1001 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1002 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1003 #endif
1004
1005 /* Generate a sequence of instructions to load IMM in the register REG.
1006 Write the instructions in BUF and return the number of bytes written. */
1007
1008 static int
1009 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1010 {
1011 uint32_t *p = buf;
1012
1013 if ((imm + 32768) < 65536)
1014 {
1015 /* li reg, imm[15:0] */
1016 p += GEN_LI (p, reg, imm);
1017 }
1018 else if ((imm >> 32) == 0)
1019 {
1020 /* lis reg, imm[31:16]
1021 ori reg, reg, imm[15:0]
1022 rldicl reg, reg, 0, 32 */
1023 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1024 if ((imm & 0xffff) != 0)
1025 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1026 /* Clear upper 32-bit if sign-bit is set. */
1027 if (imm & (1u << 31) && is_64)
1028 p += GEN_RLDICL (p, reg, reg, 0, 32);
1029 }
1030 else
1031 {
1032 gdb_assert (is_64);
1033 /* lis reg, <imm[63:48]>
1034 ori reg, reg, <imm[48:32]>
1035 rldicr reg, reg, 32, 31
1036 oris reg, reg, <imm[31:16]>
1037 ori reg, reg, <imm[15:0]> */
1038 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1039 if (((imm >> 32) & 0xffff) != 0)
1040 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1041 p += GEN_RLDICR (p, reg, reg, 32, 31);
1042 if (((imm >> 16) & 0xffff) != 0)
1043 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1044 if ((imm & 0xffff) != 0)
1045 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1046 }
1047
1048 return p - buf;
1049 }
1050
1051 /* Generate a sequence for atomically exchange at location LOCK.
1052 This code sequence clobbers r6, r7, r8. LOCK is the location for
1053 the atomic-xchg, OLD_VALUE is expected old value stored in the
1054 location, and R_NEW is a register for the new value. */
1055
1056 static int
1057 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1058 int is_64)
1059 {
1060 const int r_lock = 6;
1061 const int r_old = 7;
1062 const int r_tmp = 8;
1063 uint32_t *p = buf;
1064
1065 /*
1066 1: lwarx TMP, 0, LOCK
1067 cmpwi TMP, OLD
1068 bne 1b
1069 stwcx. NEW, 0, LOCK
1070 bne 1b */
1071
1072 p += gen_limm (p, r_lock, lock, is_64);
1073 p += gen_limm (p, r_old, old_value, is_64);
1074
1075 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1076 p += GEN_CMPW (p, r_tmp, r_old);
1077 p += GEN_BNE (p, -8);
1078 p += GEN_STWCX (p, r_new, 0, r_lock);
1079 p += GEN_BNE (p, -16);
1080
1081 return p - buf;
1082 }
1083
1084 /* Generate a sequence of instructions for calling a function
1085 at address of FN. Return the number of bytes are written in BUF. */
1086
1087 static int
1088 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1089 {
1090 uint32_t *p = buf;
1091
1092 /* Must be called by r12 for caller to calculate TOC address. */
1093 p += gen_limm (p, 12, fn, is_64);
1094 if (is_opd)
1095 {
1096 p += GEN_LOAD (p, 11, 12, 16, is_64);
1097 p += GEN_LOAD (p, 2, 12, 8, is_64);
1098 p += GEN_LOAD (p, 12, 12, 0, is_64);
1099 }
1100 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1101 *p++ = 0x4e800421; /* bctrl */
1102
1103 return p - buf;
1104 }
1105
1106 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1107 of instruction. This function is used to adjust pc-relative instructions
1108 when copying. */
1109
1110 static void
1111 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1112 {
1113 uint32_t insn, op6;
1114 long rel, newrel;
1115
1116 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1117 op6 = PPC_OP6 (insn);
1118
1119 if (op6 == 18 && (insn & 2) == 0)
1120 {
1121 /* branch && AA = 0 */
1122 rel = PPC_LI (insn);
1123 newrel = (oldloc - *to) + rel;
1124
1125 /* Out of range. Cannot relocate instruction. */
1126 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1127 return;
1128
1129 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1130 }
1131 else if (op6 == 16 && (insn & 2) == 0)
1132 {
1133 /* conditional branch && AA = 0 */
1134
1135 /* If the new relocation is too big for even a 26-bit unconditional
1136 branch, there is nothing we can do. Just abort.
1137
1138 Otherwise, if it can be fit in 16-bit conditional branch, just
1139 copy the instruction and relocate the address.
1140
1141 If the it's big for conditional-branch (16-bit), try to invert the
1142 condition and jump with 26-bit branch. For example,
1143
1144 beq .Lgoto
1145 INSN1
1146
1147 =>
1148
1149 bne 1f (+8)
1150 b .Lgoto
1151 1:INSN1
1152
1153 After this transform, we are actually jump from *TO+4 instead of *TO,
1154 so check the relocation again because it will be 1-insn farther then
1155 before if *TO is after OLDLOC.
1156
1157
1158 For BDNZT (or so) is transformed from
1159
1160 bdnzt eq, .Lgoto
1161 INSN1
1162
1163 =>
1164
1165 bdz 1f (+12)
1166 bf eq, 1f (+8)
1167 b .Lgoto
1168 1:INSN1
1169
1170 See also "BO field encodings". */
1171
1172 rel = PPC_BD (insn);
1173 newrel = (oldloc - *to) + rel;
1174
1175 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1176 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1177 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1178 {
1179 newrel -= 4;
1180
1181 /* Out of range. Cannot relocate instruction. */
1182 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1183 return;
1184
1185 if ((PPC_BO (insn) & 0x14) == 0x4)
1186 insn ^= (1 << 24);
1187 else if ((PPC_BO (insn) & 0x14) == 0x10)
1188 insn ^= (1 << 22);
1189
1190 /* Jump over the unconditional branch. */
1191 insn = (insn & ~0xfffc) | 0x8;
1192 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1193 *to += 4;
1194
1195 /* Build a unconditional branch and copy LK bit. */
1196 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1197 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1198 *to += 4;
1199
1200 return;
1201 }
1202 else if ((PPC_BO (insn) & 0x14) == 0)
1203 {
1204 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1205 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1206
1207 newrel -= 8;
1208
1209 /* Out of range. Cannot relocate instruction. */
1210 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1211 return;
1212
1213 /* Copy BI field. */
1214 bf_insn |= (insn & 0x1f0000);
1215
1216 /* Invert condition. */
1217 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1218 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1219
1220 write_inferior_memory (*to, (unsigned char *) &bdnz_insn, 4);
1221 *to += 4;
1222 write_inferior_memory (*to, (unsigned char *) &bf_insn, 4);
1223 *to += 4;
1224
1225 /* Build a unconditional branch and copy LK bit. */
1226 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1227 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1228 *to += 4;
1229
1230 return;
1231 }
1232 else /* (BO & 0x14) == 0x14, branch always. */
1233 {
1234 /* Out of range. Cannot relocate instruction. */
1235 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1236 return;
1237
1238 /* Build a unconditional branch and copy LK bit. */
1239 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1240 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1241 *to += 4;
1242
1243 return;
1244 }
1245 }
1246
1247 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1248 *to += 4;
1249 }
1250
1251 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1252 See target.h for details. */
1253
1254 static int
1255 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1256 CORE_ADDR collector,
1257 CORE_ADDR lockaddr,
1258 ULONGEST orig_size,
1259 CORE_ADDR *jump_entry,
1260 CORE_ADDR *trampoline,
1261 ULONGEST *trampoline_size,
1262 unsigned char *jjump_pad_insn,
1263 ULONGEST *jjump_pad_insn_size,
1264 CORE_ADDR *adjusted_insn_addr,
1265 CORE_ADDR *adjusted_insn_addr_end,
1266 char *err)
1267 {
1268 uint32_t buf[256];
1269 uint32_t *p = buf;
1270 int j, offset;
1271 CORE_ADDR buildaddr = *jump_entry;
1272 const CORE_ADDR entryaddr = *jump_entry;
1273 int rsz, min_frame, frame_size, tp_reg;
1274 #ifdef __powerpc64__
1275 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1276 int is_64 = register_size (regcache->tdesc, 0) == 8;
1277 int is_opd = is_64 && !is_elfv2_inferior ();
1278 #else
1279 int is_64 = 0, is_opd = 0;
1280 #endif
1281
1282 #ifdef __powerpc64__
1283 if (is_64)
1284 {
1285 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1286 rsz = 8;
1287 min_frame = 112;
1288 frame_size = (40 * rsz) + min_frame;
1289 tp_reg = 13;
1290 }
1291 else
1292 {
1293 #endif
1294 rsz = 4;
1295 min_frame = 16;
1296 frame_size = (40 * rsz) + min_frame;
1297 tp_reg = 2;
1298 #ifdef __powerpc64__
1299 }
1300 #endif
1301
1302 /* Stack frame layout for this jump pad,
1303
1304 High thread_area (r13/r2) |
1305 tpoint - collecting_t obj
1306 PC/<tpaddr> | +36
1307 CTR | +35
1308 LR | +34
1309 XER | +33
1310 CR | +32
1311 R31 |
1312 R29 |
1313 ... |
1314 R1 | +1
1315 R0 - collected registers
1316 ... |
1317 ... |
1318 Low Back-chain -
1319
1320
1321 The code flow of this jump pad,
1322
1323 1. Adjust SP
1324 2. Save GPR and SPR
1325 3. Prepare argument
1326 4. Call gdb_collector
1327 5. Restore GPR and SPR
1328 6. Restore SP
1329 7. Build a jump for back to the program
1330 8. Copy/relocate original instruction
1331 9. Build a jump for replacing orignal instruction. */
1332
1333 /* Adjust stack pointer. */
1334 if (is_64)
1335 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1336 else
1337 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1338
1339 /* Store GPRs. Save R1 later, because it had just been modified, but
1340 we want the original value. */
1341 for (j = 2; j < 32; j++)
1342 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1343 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1344 /* Set r0 to the original value of r1 before adjusting stack frame,
1345 and then save it. */
1346 p += GEN_ADDI (p, 0, 1, frame_size);
1347 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1348
1349 /* Save CR, XER, LR, and CTR. */
1350 p += GEN_MFCR (p, 3); /* mfcr r3 */
1351 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1352 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1353 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1354 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1355 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1356 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1357 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1358
1359 /* Save PC<tpaddr> */
1360 p += gen_limm (p, 3, tpaddr, is_64);
1361 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1362
1363
1364 /* Setup arguments to collector. */
1365 /* Set r4 to collected registers. */
1366 p += GEN_ADDI (p, 4, 1, min_frame);
1367 /* Set r3 to TPOINT. */
1368 p += gen_limm (p, 3, tpoint, is_64);
1369
1370 /* Prepare collecting_t object for lock. */
1371 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1372 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1373 /* Set R5 to collecting object. */
1374 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1375
1376 p += GEN_LWSYNC (p);
1377 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1378 p += GEN_LWSYNC (p);
1379
1380 /* Call to collector. */
1381 p += gen_call (p, collector, is_64, is_opd);
1382
1383 /* Simply write 0 to release the lock. */
1384 p += gen_limm (p, 3, lockaddr, is_64);
1385 p += gen_limm (p, 4, 0, is_64);
1386 p += GEN_LWSYNC (p);
1387 p += GEN_STORE (p, 4, 3, 0, is_64);
1388
1389 /* Restore stack and registers. */
1390 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1391 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1392 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1393 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1394 p += GEN_MTCR (p, 3); /* mtcr r3 */
1395 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1396 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1397 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1398
1399 /* Restore GPRs. */
1400 for (j = 2; j < 32; j++)
1401 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1402 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1403 /* Restore SP. */
1404 p += GEN_ADDI (p, 1, 1, frame_size);
1405
1406 /* Flush instructions to inferior memory. */
1407 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1408
1409 /* Now, insert the original instruction to execute in the jump pad. */
1410 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1411 *adjusted_insn_addr_end = *adjusted_insn_addr;
1412 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1413
1414 /* Verify the relocation size. If should be 4 for normal copy,
1415 8 or 12 for some conditional branch. */
1416 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1417 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1418 {
1419 sprintf (err, "E.Unexpected instruction length = %d"
1420 "when relocate instruction.",
1421 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1422 return 1;
1423 }
1424
1425 buildaddr = *adjusted_insn_addr_end;
1426 p = buf;
1427 /* Finally, write a jump back to the program. */
1428 offset = (tpaddr + 4) - buildaddr;
1429 if (offset >= (1 << 25) || offset < -(1 << 25))
1430 {
1431 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1432 "(offset 0x%x > 26-bit).", offset);
1433 return 1;
1434 }
1435 /* b <tpaddr+4> */
1436 p += GEN_B (p, offset);
1437 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1438 *jump_entry = buildaddr + (p - buf) * 4;
1439
1440 /* The jump pad is now built. Wire in a jump to our jump pad. This
1441 is always done last (by our caller actually), so that we can
1442 install fast tracepoints with threads running. This relies on
1443 the agent's atomic write support. */
1444 offset = entryaddr - tpaddr;
1445 if (offset >= (1 << 25) || offset < -(1 << 25))
1446 {
1447 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1448 "(offset 0x%x > 26-bit).", offset);
1449 return 1;
1450 }
1451 /* b <jentry> */
1452 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1453 *jjump_pad_insn_size = 4;
1454
1455 return 0;
1456 }
1457
1458 /* Returns the minimum instruction length for installing a tracepoint. */
1459
1460 static int
1461 ppc_get_min_fast_tracepoint_insn_len (void)
1462 {
1463 return 4;
1464 }
1465
1466 /* Emits a given buffer into the target at current_insn_ptr. Length
1467 is in units of 32-bit words. */
1468
1469 static void
1470 emit_insns (uint32_t *buf, int n)
1471 {
1472 n = n * sizeof (uint32_t);
1473 write_inferior_memory (current_insn_ptr, (unsigned char *) buf, n);
1474 current_insn_ptr += n;
1475 }
1476
1477 #define __EMIT_ASM(NAME, INSNS) \
1478 do \
1479 { \
1480 extern uint32_t start_bcax_ ## NAME []; \
1481 extern uint32_t end_bcax_ ## NAME []; \
1482 emit_insns (start_bcax_ ## NAME, \
1483 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1484 __asm__ (".section .text.__ppcbcax\n\t" \
1485 "start_bcax_" #NAME ":\n\t" \
1486 INSNS "\n\t" \
1487 "end_bcax_" #NAME ":\n\t" \
1488 ".previous\n\t"); \
1489 } while (0)
1490
1491 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1492 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1493
1494 /*
1495
1496 Bytecode execution stack frame - 32-bit
1497
1498 | LR save area (SP + 4)
1499 SP' -> +- Back chain (SP + 0)
1500 | Save r31 for access saved arguments
1501 | Save r30 for bytecode stack pointer
1502 | Save r4 for incoming argument *value
1503 | Save r3 for incoming argument regs
1504 r30 -> +- Bytecode execution stack
1505 |
1506 | 64-byte (8 doublewords) at initial.
1507 | Expand stack as needed.
1508 |
1509 +-
1510 | Some padding for minimum stack frame and 16-byte alignment.
1511 | 16 bytes.
1512 SP +- Back-chain (SP')
1513
1514 initial frame size
1515 = 16 + (4 * 4) + 64
1516 = 96
1517
1518 r30 is the stack-pointer for bytecode machine.
1519 It should point to next-empty, so we can use LDU for pop.
1520 r3 is used for cache of the high part of TOP value.
1521 It was the first argument, pointer to regs.
1522 r4 is used for cache of the low part of TOP value.
1523 It was the second argument, pointer to the result.
1524 We should set *result = TOP after leaving this function.
1525
1526 Note:
1527 * To restore stack at epilogue
1528 => sp = r31
1529 * To check stack is big enough for bytecode execution.
1530 => r30 - 8 > SP + 8
1531 * To return execution result.
1532 => 0(r4) = TOP
1533
1534 */
1535
1536 /* Regardless of endian, register 3 is always high part, 4 is low part.
1537 These defines are used when the register pair is stored/loaded.
1538 Likewise, to simplify code, have a similiar define for 5:6. */
1539
1540 #if __BYTE_ORDER == __LITTLE_ENDIAN
1541 #define TOP_FIRST "4"
1542 #define TOP_SECOND "3"
1543 #define TMP_FIRST "6"
1544 #define TMP_SECOND "5"
1545 #else
1546 #define TOP_FIRST "3"
1547 #define TOP_SECOND "4"
1548 #define TMP_FIRST "5"
1549 #define TMP_SECOND "6"
1550 #endif
1551
1552 /* Emit prologue in inferior memory. See above comments. */
1553
1554 static void
1555 ppc_emit_prologue (void)
1556 {
1557 EMIT_ASM (/* Save return address. */
1558 "mflr 0 \n"
1559 "stw 0, 4(1) \n"
1560 /* Adjust SP. 96 is the initial frame size. */
1561 "stwu 1, -96(1) \n"
1562 /* Save r30 and incoming arguments. */
1563 "stw 31, 96-4(1) \n"
1564 "stw 30, 96-8(1) \n"
1565 "stw 4, 96-12(1) \n"
1566 "stw 3, 96-16(1) \n"
1567 /* Point r31 to original r1 for access arguments. */
1568 "addi 31, 1, 96 \n"
1569 /* Set r30 to pointing stack-top. */
1570 "addi 30, 1, 64 \n"
1571 /* Initial r3/TOP to 0. */
1572 "li 3, 0 \n"
1573 "li 4, 0 \n");
1574 }
1575
1576 /* Emit epilogue in inferior memory. See above comments. */
1577
1578 static void
1579 ppc_emit_epilogue (void)
1580 {
1581 EMIT_ASM (/* *result = TOP */
1582 "lwz 5, -12(31) \n"
1583 "stw " TOP_FIRST ", 0(5) \n"
1584 "stw " TOP_SECOND ", 4(5) \n"
1585 /* Restore registers. */
1586 "lwz 31, -4(31) \n"
1587 "lwz 30, -8(31) \n"
1588 /* Restore SP. */
1589 "lwz 1, 0(1) \n"
1590 /* Restore LR. */
1591 "lwz 0, 4(1) \n"
1592 /* Return 0 for no-error. */
1593 "li 3, 0 \n"
1594 "mtlr 0 \n"
1595 "blr \n");
1596 }
1597
1598 /* TOP = stack[--sp] + TOP */
1599
1600 static void
1601 ppc_emit_add (void)
1602 {
1603 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1604 "lwz " TMP_SECOND ", 4(30)\n"
1605 "addc 4, 6, 4 \n"
1606 "adde 3, 5, 3 \n");
1607 }
1608
1609 /* TOP = stack[--sp] - TOP */
1610
1611 static void
1612 ppc_emit_sub (void)
1613 {
1614 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1615 "lwz " TMP_SECOND ", 4(30) \n"
1616 "subfc 4, 4, 6 \n"
1617 "subfe 3, 3, 5 \n");
1618 }
1619
1620 /* TOP = stack[--sp] * TOP */
1621
1622 static void
1623 ppc_emit_mul (void)
1624 {
1625 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1626 "lwz " TMP_SECOND ", 4(30) \n"
1627 "mulhwu 7, 6, 4 \n"
1628 "mullw 3, 6, 3 \n"
1629 "mullw 5, 4, 5 \n"
1630 "mullw 4, 6, 4 \n"
1631 "add 3, 5, 3 \n"
1632 "add 3, 7, 3 \n");
1633 }
1634
1635 /* TOP = stack[--sp] << TOP */
1636
1637 static void
1638 ppc_emit_lsh (void)
1639 {
1640 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1641 "lwz " TMP_SECOND ", 4(30) \n"
1642 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1643 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1644 "slw 5, 5, 4\n" /* Shift high part left */
1645 "slw 4, 6, 4\n" /* Shift low part left */
1646 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1647 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1648 "or 3, 5, 3\n"
1649 "or 3, 7, 3\n"); /* Assemble high part */
1650 }
1651
1652 /* Top = stack[--sp] >> TOP
1653 (Arithmetic shift right) */
1654
1655 static void
1656 ppc_emit_rsh_signed (void)
1657 {
1658 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1659 "lwz " TMP_SECOND ", 4(30) \n"
1660 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1661 "sraw 3, 5, 4\n" /* Shift high part right */
1662 "cmpwi 7, 1\n"
1663 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1664 "sraw 4, 5, 7\n" /* Shift high to low */
1665 "b 2f\n"
1666 "1:\n"
1667 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1668 "srw 4, 6, 4\n" /* Shift low part right */
1669 "slw 5, 5, 7\n" /* Shift high to low */
1670 "or 4, 4, 5\n" /* Assemble low part */
1671 "2:\n");
1672 }
1673
1674 /* Top = stack[--sp] >> TOP
1675 (Logical shift right) */
1676
1677 static void
1678 ppc_emit_rsh_unsigned (void)
1679 {
1680 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1681 "lwz " TMP_SECOND ", 4(30) \n"
1682 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1683 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1684 "srw 6, 6, 4\n" /* Shift low part right */
1685 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1686 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1687 "or 6, 6, 3\n"
1688 "srw 3, 5, 4\n" /* Shift high part right */
1689 "or 4, 6, 7\n"); /* Assemble low part */
1690 }
1691
1692 /* Emit code for signed-extension specified by ARG. */
1693
1694 static void
1695 ppc_emit_ext (int arg)
1696 {
1697 switch (arg)
1698 {
1699 case 8:
1700 EMIT_ASM ("extsb 4, 4\n"
1701 "srawi 3, 4, 31");
1702 break;
1703 case 16:
1704 EMIT_ASM ("extsh 4, 4\n"
1705 "srawi 3, 4, 31");
1706 break;
1707 case 32:
1708 EMIT_ASM ("srawi 3, 4, 31");
1709 break;
1710 default:
1711 emit_error = 1;
1712 }
1713 }
1714
1715 /* Emit code for zero-extension specified by ARG. */
1716
1717 static void
1718 ppc_emit_zero_ext (int arg)
1719 {
1720 switch (arg)
1721 {
1722 case 8:
1723 EMIT_ASM ("clrlwi 4,4,24\n"
1724 "li 3, 0\n");
1725 break;
1726 case 16:
1727 EMIT_ASM ("clrlwi 4,4,16\n"
1728 "li 3, 0\n");
1729 break;
1730 case 32:
1731 EMIT_ASM ("li 3, 0");
1732 break;
1733 default:
1734 emit_error = 1;
1735 }
1736 }
1737
1738 /* TOP = !TOP
1739 i.e., TOP = (TOP == 0) ? 1 : 0; */
1740
1741 static void
1742 ppc_emit_log_not (void)
1743 {
1744 EMIT_ASM ("or 4, 3, 4 \n"
1745 "cntlzw 4, 4 \n"
1746 "srwi 4, 4, 5 \n"
1747 "li 3, 0 \n");
1748 }
1749
1750 /* TOP = stack[--sp] & TOP */
1751
1752 static void
1753 ppc_emit_bit_and (void)
1754 {
1755 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1756 "lwz " TMP_SECOND ", 4(30) \n"
1757 "and 4, 6, 4 \n"
1758 "and 3, 5, 3 \n");
1759 }
1760
1761 /* TOP = stack[--sp] | TOP */
1762
1763 static void
1764 ppc_emit_bit_or (void)
1765 {
1766 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1767 "lwz " TMP_SECOND ", 4(30) \n"
1768 "or 4, 6, 4 \n"
1769 "or 3, 5, 3 \n");
1770 }
1771
1772 /* TOP = stack[--sp] ^ TOP */
1773
1774 static void
1775 ppc_emit_bit_xor (void)
1776 {
1777 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1778 "lwz " TMP_SECOND ", 4(30) \n"
1779 "xor 4, 6, 4 \n"
1780 "xor 3, 5, 3 \n");
1781 }
1782
1783 /* TOP = ~TOP
1784 i.e., TOP = ~(TOP | TOP) */
1785
1786 static void
1787 ppc_emit_bit_not (void)
1788 {
1789 EMIT_ASM ("nor 3, 3, 3 \n"
1790 "nor 4, 4, 4 \n");
1791 }
1792
1793 /* TOP = stack[--sp] == TOP */
1794
1795 static void
1796 ppc_emit_equal (void)
1797 {
1798 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1799 "lwz " TMP_SECOND ", 4(30) \n"
1800 "xor 4, 6, 4 \n"
1801 "xor 3, 5, 3 \n"
1802 "or 4, 3, 4 \n"
1803 "cntlzw 4, 4 \n"
1804 "srwi 4, 4, 5 \n"
1805 "li 3, 0 \n");
1806 }
1807
1808 /* TOP = stack[--sp] < TOP
1809 (Signed comparison) */
1810
1811 static void
1812 ppc_emit_less_signed (void)
1813 {
1814 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1815 "lwz " TMP_SECOND ", 4(30) \n"
1816 "cmplw 6, 6, 4 \n"
1817 "cmpw 7, 5, 3 \n"
1818 /* CR6 bit 0 = low less and high equal */
1819 "crand 6*4+0, 6*4+0, 7*4+2\n"
1820 /* CR7 bit 0 = (low less and high equal) or high less */
1821 "cror 7*4+0, 7*4+0, 6*4+0\n"
1822 "mfcr 4 \n"
1823 "rlwinm 4, 4, 29, 31, 31 \n"
1824 "li 3, 0 \n");
1825 }
1826
1827 /* TOP = stack[--sp] < TOP
1828 (Unsigned comparison) */
1829
1830 static void
1831 ppc_emit_less_unsigned (void)
1832 {
1833 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1834 "lwz " TMP_SECOND ", 4(30) \n"
1835 "cmplw 6, 6, 4 \n"
1836 "cmplw 7, 5, 3 \n"
1837 /* CR6 bit 0 = low less and high equal */
1838 "crand 6*4+0, 6*4+0, 7*4+2\n"
1839 /* CR7 bit 0 = (low less and high equal) or high less */
1840 "cror 7*4+0, 7*4+0, 6*4+0\n"
1841 "mfcr 4 \n"
1842 "rlwinm 4, 4, 29, 31, 31 \n"
1843 "li 3, 0 \n");
1844 }
1845
1846 /* Access the memory address in TOP in size of SIZE.
1847 Zero-extend the read value. */
1848
1849 static void
1850 ppc_emit_ref (int size)
1851 {
1852 switch (size)
1853 {
1854 case 1:
1855 EMIT_ASM ("lbz 4, 0(4)\n"
1856 "li 3, 0");
1857 break;
1858 case 2:
1859 EMIT_ASM ("lhz 4, 0(4)\n"
1860 "li 3, 0");
1861 break;
1862 case 4:
1863 EMIT_ASM ("lwz 4, 0(4)\n"
1864 "li 3, 0");
1865 break;
1866 case 8:
1867 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1868 EMIT_ASM ("lwz 3, 4(4)\n"
1869 "lwz 4, 0(4)");
1870 else
1871 EMIT_ASM ("lwz 3, 0(4)\n"
1872 "lwz 4, 4(4)");
1873 break;
1874 }
1875 }
1876
1877 /* TOP = NUM */
1878
1879 static void
1880 ppc_emit_const (LONGEST num)
1881 {
1882 uint32_t buf[10];
1883 uint32_t *p = buf;
1884
1885 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
1886 p += gen_limm (p, 4, num & 0xffffffff, 0);
1887
1888 emit_insns (buf, p - buf);
1889 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1890 }
1891
1892 /* Set TOP to the value of register REG by calling get_raw_reg function
1893 with two argument, collected buffer and register number. */
1894
1895 static void
1896 ppc_emit_reg (int reg)
1897 {
1898 uint32_t buf[13];
1899 uint32_t *p = buf;
1900
1901 /* fctx->regs is passed in r3 and then saved in -16(31). */
1902 p += GEN_LWZ (p, 3, 31, -16);
1903 p += GEN_LI (p, 4, reg); /* li r4, reg */
1904 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
1905
1906 emit_insns (buf, p - buf);
1907 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1908
1909 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1910 {
1911 EMIT_ASM ("mr 5, 4\n"
1912 "mr 4, 3\n"
1913 "mr 3, 5\n");
1914 }
1915 }
1916
1917 /* TOP = stack[--sp] */
1918
1919 static void
1920 ppc_emit_pop (void)
1921 {
1922 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
1923 "lwz " TOP_SECOND ", 4(30) \n");
1924 }
1925
1926 /* stack[sp++] = TOP
1927
1928 Because we may use up bytecode stack, expand 8 doublewords more
1929 if needed. */
1930
1931 static void
1932 ppc_emit_stack_flush (void)
1933 {
1934 /* Make sure bytecode stack is big enough before push.
1935 Otherwise, expand 64-byte more. */
1936
1937 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
1938 " stw " TOP_SECOND ", 4(30)\n"
1939 " addi 5, 30, -(8 + 8) \n"
1940 " cmpw 7, 5, 1 \n"
1941 " bgt 7, 1f \n"
1942 " stwu 31, -64(1) \n"
1943 "1:addi 30, 30, -8 \n");
1944 }
1945
1946 /* Swap TOP and stack[sp-1] */
1947
1948 static void
1949 ppc_emit_swap (void)
1950 {
1951 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
1952 "lwz " TMP_SECOND ", 12(30) \n"
1953 "stw " TOP_FIRST ", 8(30) \n"
1954 "stw " TOP_SECOND ", 12(30) \n"
1955 "mr 3, 5 \n"
1956 "mr 4, 6 \n");
1957 }
1958
1959 /* Discard N elements in the stack. Also used for ppc64. */
1960
1961 static void
1962 ppc_emit_stack_adjust (int n)
1963 {
1964 uint32_t buf[6];
1965 uint32_t *p = buf;
1966
1967 n = n << 3;
1968 if ((n >> 15) != 0)
1969 {
1970 emit_error = 1;
1971 return;
1972 }
1973
1974 p += GEN_ADDI (p, 30, 30, n);
1975
1976 emit_insns (buf, p - buf);
1977 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1978 }
1979
1980 /* Call function FN. */
1981
1982 static void
1983 ppc_emit_call (CORE_ADDR fn)
1984 {
1985 uint32_t buf[11];
1986 uint32_t *p = buf;
1987
1988 p += gen_call (p, fn, 0, 0);
1989
1990 emit_insns (buf, p - buf);
1991 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1992 }
1993
1994 /* FN's prototype is `LONGEST(*fn)(int)'.
1995 TOP = fn (arg1)
1996 */
1997
1998 static void
1999 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2000 {
2001 uint32_t buf[15];
2002 uint32_t *p = buf;
2003
2004 /* Setup argument. arg1 is a 16-bit value. */
2005 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2006 p += gen_call (p, fn, 0, 0);
2007
2008 emit_insns (buf, p - buf);
2009 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2010
2011 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2012 {
2013 EMIT_ASM ("mr 5, 4\n"
2014 "mr 4, 3\n"
2015 "mr 3, 5\n");
2016 }
2017 }
2018
2019 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2020 fn (arg1, TOP)
2021
2022 TOP should be preserved/restored before/after the call. */
2023
2024 static void
2025 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2026 {
2027 uint32_t buf[21];
2028 uint32_t *p = buf;
2029
2030 /* Save TOP. 0(30) is next-empty. */
2031 p += GEN_STW (p, 3, 30, 0);
2032 p += GEN_STW (p, 4, 30, 4);
2033
2034 /* Setup argument. arg1 is a 16-bit value. */
2035 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2036 {
2037 p += GEN_MR (p, 5, 4);
2038 p += GEN_MR (p, 6, 3);
2039 }
2040 else
2041 {
2042 p += GEN_MR (p, 5, 3);
2043 p += GEN_MR (p, 6, 4);
2044 }
2045 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2046 p += gen_call (p, fn, 0, 0);
2047
2048 /* Restore TOP */
2049 p += GEN_LWZ (p, 3, 30, 0);
2050 p += GEN_LWZ (p, 4, 30, 4);
2051
2052 emit_insns (buf, p - buf);
2053 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2054 }
2055
2056 /* Note in the following goto ops:
2057
2058 When emitting goto, the target address is later relocated by
2059 write_goto_address. OFFSET_P is the offset of the branch instruction
2060 in the code sequence, and SIZE_P is how to relocate the instruction,
2061 recognized by ppc_write_goto_address. In current implementation,
2062 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2063 */
2064
2065 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2066
2067 static void
2068 ppc_emit_if_goto (int *offset_p, int *size_p)
2069 {
2070 EMIT_ASM ("or. 3, 3, 4 \n"
2071 "lwzu " TOP_FIRST ", 8(30) \n"
2072 "lwz " TOP_SECOND ", 4(30) \n"
2073 "1:bne 0, 1b \n");
2074
2075 if (offset_p)
2076 *offset_p = 12;
2077 if (size_p)
2078 *size_p = 14;
2079 }
2080
2081 /* Unconditional goto. Also used for ppc64. */
2082
2083 static void
2084 ppc_emit_goto (int *offset_p, int *size_p)
2085 {
2086 EMIT_ASM ("1:b 1b");
2087
2088 if (offset_p)
2089 *offset_p = 0;
2090 if (size_p)
2091 *size_p = 24;
2092 }
2093
2094 /* Goto if stack[--sp] == TOP */
2095
2096 static void
2097 ppc_emit_eq_goto (int *offset_p, int *size_p)
2098 {
2099 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2100 "lwz " TMP_SECOND ", 4(30) \n"
2101 "xor 4, 6, 4 \n"
2102 "xor 3, 5, 3 \n"
2103 "or. 3, 3, 4 \n"
2104 "lwzu " TOP_FIRST ", 8(30) \n"
2105 "lwz " TOP_SECOND ", 4(30) \n"
2106 "1:beq 0, 1b \n");
2107
2108 if (offset_p)
2109 *offset_p = 28;
2110 if (size_p)
2111 *size_p = 14;
2112 }
2113
2114 /* Goto if stack[--sp] != TOP */
2115
2116 static void
2117 ppc_emit_ne_goto (int *offset_p, int *size_p)
2118 {
2119 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2120 "lwz " TMP_SECOND ", 4(30) \n"
2121 "xor 4, 6, 4 \n"
2122 "xor 3, 5, 3 \n"
2123 "or. 3, 3, 4 \n"
2124 "lwzu " TOP_FIRST ", 8(30) \n"
2125 "lwz " TOP_SECOND ", 4(30) \n"
2126 "1:bne 0, 1b \n");
2127
2128 if (offset_p)
2129 *offset_p = 28;
2130 if (size_p)
2131 *size_p = 14;
2132 }
2133
2134 /* Goto if stack[--sp] < TOP */
2135
2136 static void
2137 ppc_emit_lt_goto (int *offset_p, int *size_p)
2138 {
2139 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2140 "lwz " TMP_SECOND ", 4(30) \n"
2141 "cmplw 6, 6, 4 \n"
2142 "cmpw 7, 5, 3 \n"
2143 /* CR6 bit 0 = low less and high equal */
2144 "crand 6*4+0, 6*4+0, 7*4+2\n"
2145 /* CR7 bit 0 = (low less and high equal) or high less */
2146 "cror 7*4+0, 7*4+0, 6*4+0\n"
2147 "lwzu " TOP_FIRST ", 8(30) \n"
2148 "lwz " TOP_SECOND ", 4(30)\n"
2149 "1:blt 7, 1b \n");
2150
2151 if (offset_p)
2152 *offset_p = 32;
2153 if (size_p)
2154 *size_p = 14;
2155 }
2156
2157 /* Goto if stack[--sp] <= TOP */
2158
2159 static void
2160 ppc_emit_le_goto (int *offset_p, int *size_p)
2161 {
2162 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2163 "lwz " TMP_SECOND ", 4(30) \n"
2164 "cmplw 6, 6, 4 \n"
2165 "cmpw 7, 5, 3 \n"
2166 /* CR6 bit 0 = low less/equal and high equal */
2167 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2168 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2169 "cror 7*4+0, 7*4+0, 6*4+0\n"
2170 "lwzu " TOP_FIRST ", 8(30) \n"
2171 "lwz " TOP_SECOND ", 4(30)\n"
2172 "1:blt 7, 1b \n");
2173
2174 if (offset_p)
2175 *offset_p = 32;
2176 if (size_p)
2177 *size_p = 14;
2178 }
2179
2180 /* Goto if stack[--sp] > TOP */
2181
2182 static void
2183 ppc_emit_gt_goto (int *offset_p, int *size_p)
2184 {
2185 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2186 "lwz " TMP_SECOND ", 4(30) \n"
2187 "cmplw 6, 6, 4 \n"
2188 "cmpw 7, 5, 3 \n"
2189 /* CR6 bit 0 = low greater and high equal */
2190 "crand 6*4+0, 6*4+1, 7*4+2\n"
2191 /* CR7 bit 0 = (low greater and high equal) or high greater */
2192 "cror 7*4+0, 7*4+1, 6*4+0\n"
2193 "lwzu " TOP_FIRST ", 8(30) \n"
2194 "lwz " TOP_SECOND ", 4(30)\n"
2195 "1:blt 7, 1b \n");
2196
2197 if (offset_p)
2198 *offset_p = 32;
2199 if (size_p)
2200 *size_p = 14;
2201 }
2202
2203 /* Goto if stack[--sp] >= TOP */
2204
2205 static void
2206 ppc_emit_ge_goto (int *offset_p, int *size_p)
2207 {
2208 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2209 "lwz " TMP_SECOND ", 4(30) \n"
2210 "cmplw 6, 6, 4 \n"
2211 "cmpw 7, 5, 3 \n"
2212 /* CR6 bit 0 = low ge and high equal */
2213 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2214 /* CR7 bit 0 = (low ge and high equal) or high greater */
2215 "cror 7*4+0, 7*4+1, 6*4+0\n"
2216 "lwzu " TOP_FIRST ", 8(30)\n"
2217 "lwz " TOP_SECOND ", 4(30)\n"
2218 "1:blt 7, 1b \n");
2219
2220 if (offset_p)
2221 *offset_p = 32;
2222 if (size_p)
2223 *size_p = 14;
2224 }
2225
2226 /* Relocate previous emitted branch instruction. FROM is the address
2227 of the branch instruction, TO is the goto target address, and SIZE
2228 if the value we set by *SIZE_P before. Currently, it is either
2229 24 or 14 of branch and conditional-branch instruction.
2230 Also used for ppc64. */
2231
2232 static void
2233 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2234 {
2235 long rel = to - from;
2236 uint32_t insn;
2237 int opcd;
2238
2239 read_inferior_memory (from, (unsigned char *) &insn, 4);
2240 opcd = (insn >> 26) & 0x3f;
2241
2242 switch (size)
2243 {
2244 case 14:
2245 if (opcd != 16
2246 || (rel >= (1 << 15) || rel < -(1 << 15)))
2247 emit_error = 1;
2248 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2249 break;
2250 case 24:
2251 if (opcd != 18
2252 || (rel >= (1 << 25) || rel < -(1 << 25)))
2253 emit_error = 1;
2254 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2255 break;
2256 default:
2257 emit_error = 1;
2258 }
2259
2260 if (!emit_error)
2261 write_inferior_memory (from, (unsigned char *) &insn, 4);
2262 }
2263
2264 /* Table of emit ops for 32-bit. */
2265
2266 static struct emit_ops ppc_emit_ops_impl =
2267 {
2268 ppc_emit_prologue,
2269 ppc_emit_epilogue,
2270 ppc_emit_add,
2271 ppc_emit_sub,
2272 ppc_emit_mul,
2273 ppc_emit_lsh,
2274 ppc_emit_rsh_signed,
2275 ppc_emit_rsh_unsigned,
2276 ppc_emit_ext,
2277 ppc_emit_log_not,
2278 ppc_emit_bit_and,
2279 ppc_emit_bit_or,
2280 ppc_emit_bit_xor,
2281 ppc_emit_bit_not,
2282 ppc_emit_equal,
2283 ppc_emit_less_signed,
2284 ppc_emit_less_unsigned,
2285 ppc_emit_ref,
2286 ppc_emit_if_goto,
2287 ppc_emit_goto,
2288 ppc_write_goto_address,
2289 ppc_emit_const,
2290 ppc_emit_call,
2291 ppc_emit_reg,
2292 ppc_emit_pop,
2293 ppc_emit_stack_flush,
2294 ppc_emit_zero_ext,
2295 ppc_emit_swap,
2296 ppc_emit_stack_adjust,
2297 ppc_emit_int_call_1,
2298 ppc_emit_void_call_2,
2299 ppc_emit_eq_goto,
2300 ppc_emit_ne_goto,
2301 ppc_emit_lt_goto,
2302 ppc_emit_le_goto,
2303 ppc_emit_gt_goto,
2304 ppc_emit_ge_goto
2305 };
2306
2307 #ifdef __powerpc64__
2308
2309 /*
2310
2311 Bytecode execution stack frame - 64-bit
2312
2313 | LR save area (SP + 16)
2314 | CR save area (SP + 8)
2315 SP' -> +- Back chain (SP + 0)
2316 | Save r31 for access saved arguments
2317 | Save r30 for bytecode stack pointer
2318 | Save r4 for incoming argument *value
2319 | Save r3 for incoming argument regs
2320 r30 -> +- Bytecode execution stack
2321 |
2322 | 64-byte (8 doublewords) at initial.
2323 | Expand stack as needed.
2324 |
2325 +-
2326 | Some padding for minimum stack frame.
2327 | 112 for ELFv1.
2328 SP +- Back-chain (SP')
2329
2330 initial frame size
2331 = 112 + (4 * 8) + 64
2332 = 208
2333
2334 r30 is the stack-pointer for bytecode machine.
2335 It should point to next-empty, so we can use LDU for pop.
2336 r3 is used for cache of TOP value.
2337 It was the first argument, pointer to regs.
2338 r4 is the second argument, pointer to the result.
2339 We should set *result = TOP after leaving this function.
2340
2341 Note:
2342 * To restore stack at epilogue
2343 => sp = r31
2344 * To check stack is big enough for bytecode execution.
2345 => r30 - 8 > SP + 112
2346 * To return execution result.
2347 => 0(r4) = TOP
2348
2349 */
2350
2351 /* Emit prologue in inferior memory. See above comments. */
2352
2353 static void
2354 ppc64v1_emit_prologue (void)
2355 {
2356 /* On ELFv1, function pointers really point to function descriptor,
2357 so emit one here. We don't care about contents of words 1 and 2,
2358 so let them just overlap out code. */
2359 uint64_t opd = current_insn_ptr + 8;
2360 uint32_t buf[2];
2361
2362 /* Mind the strict aliasing rules. */
2363 memcpy (buf, &opd, sizeof buf);
2364 emit_insns(buf, 2);
2365 EMIT_ASM (/* Save return address. */
2366 "mflr 0 \n"
2367 "std 0, 16(1) \n"
2368 /* Save r30 and incoming arguments. */
2369 "std 31, -8(1) \n"
2370 "std 30, -16(1) \n"
2371 "std 4, -24(1) \n"
2372 "std 3, -32(1) \n"
2373 /* Point r31 to current r1 for access arguments. */
2374 "mr 31, 1 \n"
2375 /* Adjust SP. 208 is the initial frame size. */
2376 "stdu 1, -208(1) \n"
2377 /* Set r30 to pointing stack-top. */
2378 "addi 30, 1, 168 \n"
2379 /* Initial r3/TOP to 0. */
2380 "li 3, 0 \n");
2381 }
2382
2383 /* Emit prologue in inferior memory. See above comments. */
2384
2385 static void
2386 ppc64v2_emit_prologue (void)
2387 {
2388 EMIT_ASM (/* Save return address. */
2389 "mflr 0 \n"
2390 "std 0, 16(1) \n"
2391 /* Save r30 and incoming arguments. */
2392 "std 31, -8(1) \n"
2393 "std 30, -16(1) \n"
2394 "std 4, -24(1) \n"
2395 "std 3, -32(1) \n"
2396 /* Point r31 to current r1 for access arguments. */
2397 "mr 31, 1 \n"
2398 /* Adjust SP. 208 is the initial frame size. */
2399 "stdu 1, -208(1) \n"
2400 /* Set r30 to pointing stack-top. */
2401 "addi 30, 1, 168 \n"
2402 /* Initial r3/TOP to 0. */
2403 "li 3, 0 \n");
2404 }
2405
2406 /* Emit epilogue in inferior memory. See above comments. */
2407
2408 static void
2409 ppc64_emit_epilogue (void)
2410 {
2411 EMIT_ASM (/* Restore SP. */
2412 "ld 1, 0(1) \n"
2413 /* *result = TOP */
2414 "ld 4, -24(1) \n"
2415 "std 3, 0(4) \n"
2416 /* Restore registers. */
2417 "ld 31, -8(1) \n"
2418 "ld 30, -16(1) \n"
2419 /* Restore LR. */
2420 "ld 0, 16(1) \n"
2421 /* Return 0 for no-error. */
2422 "li 3, 0 \n"
2423 "mtlr 0 \n"
2424 "blr \n");
2425 }
2426
2427 /* TOP = stack[--sp] + TOP */
2428
2429 static void
2430 ppc64_emit_add (void)
2431 {
2432 EMIT_ASM ("ldu 4, 8(30) \n"
2433 "add 3, 4, 3 \n");
2434 }
2435
2436 /* TOP = stack[--sp] - TOP */
2437
2438 static void
2439 ppc64_emit_sub (void)
2440 {
2441 EMIT_ASM ("ldu 4, 8(30) \n"
2442 "sub 3, 4, 3 \n");
2443 }
2444
2445 /* TOP = stack[--sp] * TOP */
2446
2447 static void
2448 ppc64_emit_mul (void)
2449 {
2450 EMIT_ASM ("ldu 4, 8(30) \n"
2451 "mulld 3, 4, 3 \n");
2452 }
2453
2454 /* TOP = stack[--sp] << TOP */
2455
2456 static void
2457 ppc64_emit_lsh (void)
2458 {
2459 EMIT_ASM ("ldu 4, 8(30) \n"
2460 "sld 3, 4, 3 \n");
2461 }
2462
2463 /* Top = stack[--sp] >> TOP
2464 (Arithmetic shift right) */
2465
2466 static void
2467 ppc64_emit_rsh_signed (void)
2468 {
2469 EMIT_ASM ("ldu 4, 8(30) \n"
2470 "srad 3, 4, 3 \n");
2471 }
2472
2473 /* Top = stack[--sp] >> TOP
2474 (Logical shift right) */
2475
2476 static void
2477 ppc64_emit_rsh_unsigned (void)
2478 {
2479 EMIT_ASM ("ldu 4, 8(30) \n"
2480 "srd 3, 4, 3 \n");
2481 }
2482
2483 /* Emit code for signed-extension specified by ARG. */
2484
2485 static void
2486 ppc64_emit_ext (int arg)
2487 {
2488 switch (arg)
2489 {
2490 case 8:
2491 EMIT_ASM ("extsb 3, 3");
2492 break;
2493 case 16:
2494 EMIT_ASM ("extsh 3, 3");
2495 break;
2496 case 32:
2497 EMIT_ASM ("extsw 3, 3");
2498 break;
2499 default:
2500 emit_error = 1;
2501 }
2502 }
2503
2504 /* Emit code for zero-extension specified by ARG. */
2505
2506 static void
2507 ppc64_emit_zero_ext (int arg)
2508 {
2509 switch (arg)
2510 {
2511 case 8:
2512 EMIT_ASM ("rldicl 3,3,0,56");
2513 break;
2514 case 16:
2515 EMIT_ASM ("rldicl 3,3,0,48");
2516 break;
2517 case 32:
2518 EMIT_ASM ("rldicl 3,3,0,32");
2519 break;
2520 default:
2521 emit_error = 1;
2522 }
2523 }
2524
2525 /* TOP = !TOP
2526 i.e., TOP = (TOP == 0) ? 1 : 0; */
2527
2528 static void
2529 ppc64_emit_log_not (void)
2530 {
2531 EMIT_ASM ("cntlzd 3, 3 \n"
2532 "srdi 3, 3, 6 \n");
2533 }
2534
2535 /* TOP = stack[--sp] & TOP */
2536
2537 static void
2538 ppc64_emit_bit_and (void)
2539 {
2540 EMIT_ASM ("ldu 4, 8(30) \n"
2541 "and 3, 4, 3 \n");
2542 }
2543
2544 /* TOP = stack[--sp] | TOP */
2545
2546 static void
2547 ppc64_emit_bit_or (void)
2548 {
2549 EMIT_ASM ("ldu 4, 8(30) \n"
2550 "or 3, 4, 3 \n");
2551 }
2552
2553 /* TOP = stack[--sp] ^ TOP */
2554
2555 static void
2556 ppc64_emit_bit_xor (void)
2557 {
2558 EMIT_ASM ("ldu 4, 8(30) \n"
2559 "xor 3, 4, 3 \n");
2560 }
2561
2562 /* TOP = ~TOP
2563 i.e., TOP = ~(TOP | TOP) */
2564
2565 static void
2566 ppc64_emit_bit_not (void)
2567 {
2568 EMIT_ASM ("nor 3, 3, 3 \n");
2569 }
2570
2571 /* TOP = stack[--sp] == TOP */
2572
2573 static void
2574 ppc64_emit_equal (void)
2575 {
2576 EMIT_ASM ("ldu 4, 8(30) \n"
2577 "xor 3, 3, 4 \n"
2578 "cntlzd 3, 3 \n"
2579 "srdi 3, 3, 6 \n");
2580 }
2581
2582 /* TOP = stack[--sp] < TOP
2583 (Signed comparison) */
2584
2585 static void
2586 ppc64_emit_less_signed (void)
2587 {
2588 EMIT_ASM ("ldu 4, 8(30) \n"
2589 "cmpd 7, 4, 3 \n"
2590 "mfcr 3 \n"
2591 "rlwinm 3, 3, 29, 31, 31 \n");
2592 }
2593
2594 /* TOP = stack[--sp] < TOP
2595 (Unsigned comparison) */
2596
2597 static void
2598 ppc64_emit_less_unsigned (void)
2599 {
2600 EMIT_ASM ("ldu 4, 8(30) \n"
2601 "cmpld 7, 4, 3 \n"
2602 "mfcr 3 \n"
2603 "rlwinm 3, 3, 29, 31, 31 \n");
2604 }
2605
2606 /* Access the memory address in TOP in size of SIZE.
2607 Zero-extend the read value. */
2608
2609 static void
2610 ppc64_emit_ref (int size)
2611 {
2612 switch (size)
2613 {
2614 case 1:
2615 EMIT_ASM ("lbz 3, 0(3)");
2616 break;
2617 case 2:
2618 EMIT_ASM ("lhz 3, 0(3)");
2619 break;
2620 case 4:
2621 EMIT_ASM ("lwz 3, 0(3)");
2622 break;
2623 case 8:
2624 EMIT_ASM ("ld 3, 0(3)");
2625 break;
2626 }
2627 }
2628
2629 /* TOP = NUM */
2630
2631 static void
2632 ppc64_emit_const (LONGEST num)
2633 {
2634 uint32_t buf[5];
2635 uint32_t *p = buf;
2636
2637 p += gen_limm (p, 3, num, 1);
2638
2639 emit_insns (buf, p - buf);
2640 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2641 }
2642
2643 /* Set TOP to the value of register REG by calling get_raw_reg function
2644 with two argument, collected buffer and register number. */
2645
2646 static void
2647 ppc64v1_emit_reg (int reg)
2648 {
2649 uint32_t buf[15];
2650 uint32_t *p = buf;
2651
2652 /* fctx->regs is passed in r3 and then saved in 176(1). */
2653 p += GEN_LD (p, 3, 31, -32);
2654 p += GEN_LI (p, 4, reg);
2655 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2656 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2657 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2658
2659 emit_insns (buf, p - buf);
2660 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2661 }
2662
2663 /* Likewise, for ELFv2. */
2664
2665 static void
2666 ppc64v2_emit_reg (int reg)
2667 {
2668 uint32_t buf[12];
2669 uint32_t *p = buf;
2670
2671 /* fctx->regs is passed in r3 and then saved in 176(1). */
2672 p += GEN_LD (p, 3, 31, -32);
2673 p += GEN_LI (p, 4, reg);
2674 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2675 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2676 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2677
2678 emit_insns (buf, p - buf);
2679 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2680 }
2681
2682 /* TOP = stack[--sp] */
2683
2684 static void
2685 ppc64_emit_pop (void)
2686 {
2687 EMIT_ASM ("ldu 3, 8(30)");
2688 }
2689
2690 /* stack[sp++] = TOP
2691
2692 Because we may use up bytecode stack, expand 8 doublewords more
2693 if needed. */
2694
2695 static void
2696 ppc64_emit_stack_flush (void)
2697 {
2698 /* Make sure bytecode stack is big enough before push.
2699 Otherwise, expand 64-byte more. */
2700
2701 EMIT_ASM (" std 3, 0(30) \n"
2702 " addi 4, 30, -(112 + 8) \n"
2703 " cmpd 7, 4, 1 \n"
2704 " bgt 7, 1f \n"
2705 " stdu 31, -64(1) \n"
2706 "1:addi 30, 30, -8 \n");
2707 }
2708
2709 /* Swap TOP and stack[sp-1] */
2710
2711 static void
2712 ppc64_emit_swap (void)
2713 {
2714 EMIT_ASM ("ld 4, 8(30) \n"
2715 "std 3, 8(30) \n"
2716 "mr 3, 4 \n");
2717 }
2718
2719 /* Call function FN - ELFv1. */
2720
2721 static void
2722 ppc64v1_emit_call (CORE_ADDR fn)
2723 {
2724 uint32_t buf[13];
2725 uint32_t *p = buf;
2726
2727 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2728 p += gen_call (p, fn, 1, 1);
2729 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2730
2731 emit_insns (buf, p - buf);
2732 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2733 }
2734
2735 /* Call function FN - ELFv2. */
2736
2737 static void
2738 ppc64v2_emit_call (CORE_ADDR fn)
2739 {
2740 uint32_t buf[10];
2741 uint32_t *p = buf;
2742
2743 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2744 p += gen_call (p, fn, 1, 0);
2745 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2746
2747 emit_insns (buf, p - buf);
2748 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2749 }
2750
2751 /* FN's prototype is `LONGEST(*fn)(int)'.
2752 TOP = fn (arg1)
2753 */
2754
2755 static void
2756 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
2757 {
2758 uint32_t buf[13];
2759 uint32_t *p = buf;
2760
2761 /* Setup argument. arg1 is a 16-bit value. */
2762 p += gen_limm (p, 3, arg1, 1);
2763 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2764 p += gen_call (p, fn, 1, 1);
2765 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2766
2767 emit_insns (buf, p - buf);
2768 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2769 }
2770
2771 /* Likewise for ELFv2. */
2772
2773 static void
2774 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
2775 {
2776 uint32_t buf[10];
2777 uint32_t *p = buf;
2778
2779 /* Setup argument. arg1 is a 16-bit value. */
2780 p += gen_limm (p, 3, arg1, 1);
2781 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2782 p += gen_call (p, fn, 1, 0);
2783 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2784
2785 emit_insns (buf, p - buf);
2786 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2787 }
2788
2789 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2790 fn (arg1, TOP)
2791
2792 TOP should be preserved/restored before/after the call. */
2793
2794 static void
2795 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
2796 {
2797 uint32_t buf[17];
2798 uint32_t *p = buf;
2799
2800 /* Save TOP. 0(30) is next-empty. */
2801 p += GEN_STD (p, 3, 30, 0);
2802
2803 /* Setup argument. arg1 is a 16-bit value. */
2804 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2805 p += gen_limm (p, 3, arg1, 1);
2806 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2807 p += gen_call (p, fn, 1, 1);
2808 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2809
2810 /* Restore TOP */
2811 p += GEN_LD (p, 3, 30, 0);
2812
2813 emit_insns (buf, p - buf);
2814 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2815 }
2816
2817 /* Likewise for ELFv2. */
2818
2819 static void
2820 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
2821 {
2822 uint32_t buf[14];
2823 uint32_t *p = buf;
2824
2825 /* Save TOP. 0(30) is next-empty. */
2826 p += GEN_STD (p, 3, 30, 0);
2827
2828 /* Setup argument. arg1 is a 16-bit value. */
2829 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2830 p += gen_limm (p, 3, arg1, 1);
2831 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2832 p += gen_call (p, fn, 1, 0);
2833 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2834
2835 /* Restore TOP */
2836 p += GEN_LD (p, 3, 30, 0);
2837
2838 emit_insns (buf, p - buf);
2839 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2840 }
2841
2842 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2843
2844 static void
2845 ppc64_emit_if_goto (int *offset_p, int *size_p)
2846 {
2847 EMIT_ASM ("cmpdi 7, 3, 0 \n"
2848 "ldu 3, 8(30) \n"
2849 "1:bne 7, 1b \n");
2850
2851 if (offset_p)
2852 *offset_p = 8;
2853 if (size_p)
2854 *size_p = 14;
2855 }
2856
2857 /* Goto if stack[--sp] == TOP */
2858
2859 static void
2860 ppc64_emit_eq_goto (int *offset_p, int *size_p)
2861 {
2862 EMIT_ASM ("ldu 4, 8(30) \n"
2863 "cmpd 7, 4, 3 \n"
2864 "ldu 3, 8(30) \n"
2865 "1:beq 7, 1b \n");
2866
2867 if (offset_p)
2868 *offset_p = 12;
2869 if (size_p)
2870 *size_p = 14;
2871 }
2872
2873 /* Goto if stack[--sp] != TOP */
2874
2875 static void
2876 ppc64_emit_ne_goto (int *offset_p, int *size_p)
2877 {
2878 EMIT_ASM ("ldu 4, 8(30) \n"
2879 "cmpd 7, 4, 3 \n"
2880 "ldu 3, 8(30) \n"
2881 "1:bne 7, 1b \n");
2882
2883 if (offset_p)
2884 *offset_p = 12;
2885 if (size_p)
2886 *size_p = 14;
2887 }
2888
2889 /* Goto if stack[--sp] < TOP */
2890
2891 static void
2892 ppc64_emit_lt_goto (int *offset_p, int *size_p)
2893 {
2894 EMIT_ASM ("ldu 4, 8(30) \n"
2895 "cmpd 7, 4, 3 \n"
2896 "ldu 3, 8(30) \n"
2897 "1:blt 7, 1b \n");
2898
2899 if (offset_p)
2900 *offset_p = 12;
2901 if (size_p)
2902 *size_p = 14;
2903 }
2904
2905 /* Goto if stack[--sp] <= TOP */
2906
2907 static void
2908 ppc64_emit_le_goto (int *offset_p, int *size_p)
2909 {
2910 EMIT_ASM ("ldu 4, 8(30) \n"
2911 "cmpd 7, 4, 3 \n"
2912 "ldu 3, 8(30) \n"
2913 "1:ble 7, 1b \n");
2914
2915 if (offset_p)
2916 *offset_p = 12;
2917 if (size_p)
2918 *size_p = 14;
2919 }
2920
2921 /* Goto if stack[--sp] > TOP */
2922
2923 static void
2924 ppc64_emit_gt_goto (int *offset_p, int *size_p)
2925 {
2926 EMIT_ASM ("ldu 4, 8(30) \n"
2927 "cmpd 7, 4, 3 \n"
2928 "ldu 3, 8(30) \n"
2929 "1:bgt 7, 1b \n");
2930
2931 if (offset_p)
2932 *offset_p = 12;
2933 if (size_p)
2934 *size_p = 14;
2935 }
2936
2937 /* Goto if stack[--sp] >= TOP */
2938
2939 static void
2940 ppc64_emit_ge_goto (int *offset_p, int *size_p)
2941 {
2942 EMIT_ASM ("ldu 4, 8(30) \n"
2943 "cmpd 7, 4, 3 \n"
2944 "ldu 3, 8(30) \n"
2945 "1:bge 7, 1b \n");
2946
2947 if (offset_p)
2948 *offset_p = 12;
2949 if (size_p)
2950 *size_p = 14;
2951 }
2952
2953 /* Table of emit ops for 64-bit ELFv1. */
2954
2955 static struct emit_ops ppc64v1_emit_ops_impl =
2956 {
2957 ppc64v1_emit_prologue,
2958 ppc64_emit_epilogue,
2959 ppc64_emit_add,
2960 ppc64_emit_sub,
2961 ppc64_emit_mul,
2962 ppc64_emit_lsh,
2963 ppc64_emit_rsh_signed,
2964 ppc64_emit_rsh_unsigned,
2965 ppc64_emit_ext,
2966 ppc64_emit_log_not,
2967 ppc64_emit_bit_and,
2968 ppc64_emit_bit_or,
2969 ppc64_emit_bit_xor,
2970 ppc64_emit_bit_not,
2971 ppc64_emit_equal,
2972 ppc64_emit_less_signed,
2973 ppc64_emit_less_unsigned,
2974 ppc64_emit_ref,
2975 ppc64_emit_if_goto,
2976 ppc_emit_goto,
2977 ppc_write_goto_address,
2978 ppc64_emit_const,
2979 ppc64v1_emit_call,
2980 ppc64v1_emit_reg,
2981 ppc64_emit_pop,
2982 ppc64_emit_stack_flush,
2983 ppc64_emit_zero_ext,
2984 ppc64_emit_swap,
2985 ppc_emit_stack_adjust,
2986 ppc64v1_emit_int_call_1,
2987 ppc64v1_emit_void_call_2,
2988 ppc64_emit_eq_goto,
2989 ppc64_emit_ne_goto,
2990 ppc64_emit_lt_goto,
2991 ppc64_emit_le_goto,
2992 ppc64_emit_gt_goto,
2993 ppc64_emit_ge_goto
2994 };
2995
2996 /* Table of emit ops for 64-bit ELFv2. */
2997
2998 static struct emit_ops ppc64v2_emit_ops_impl =
2999 {
3000 ppc64v2_emit_prologue,
3001 ppc64_emit_epilogue,
3002 ppc64_emit_add,
3003 ppc64_emit_sub,
3004 ppc64_emit_mul,
3005 ppc64_emit_lsh,
3006 ppc64_emit_rsh_signed,
3007 ppc64_emit_rsh_unsigned,
3008 ppc64_emit_ext,
3009 ppc64_emit_log_not,
3010 ppc64_emit_bit_and,
3011 ppc64_emit_bit_or,
3012 ppc64_emit_bit_xor,
3013 ppc64_emit_bit_not,
3014 ppc64_emit_equal,
3015 ppc64_emit_less_signed,
3016 ppc64_emit_less_unsigned,
3017 ppc64_emit_ref,
3018 ppc64_emit_if_goto,
3019 ppc_emit_goto,
3020 ppc_write_goto_address,
3021 ppc64_emit_const,
3022 ppc64v2_emit_call,
3023 ppc64v2_emit_reg,
3024 ppc64_emit_pop,
3025 ppc64_emit_stack_flush,
3026 ppc64_emit_zero_ext,
3027 ppc64_emit_swap,
3028 ppc_emit_stack_adjust,
3029 ppc64v2_emit_int_call_1,
3030 ppc64v2_emit_void_call_2,
3031 ppc64_emit_eq_goto,
3032 ppc64_emit_ne_goto,
3033 ppc64_emit_lt_goto,
3034 ppc64_emit_le_goto,
3035 ppc64_emit_gt_goto,
3036 ppc64_emit_ge_goto
3037 };
3038
3039 #endif
3040
3041 /* Implementation of linux_target_ops method "emit_ops". */
3042
3043 static struct emit_ops *
3044 ppc_emit_ops (void)
3045 {
3046 #ifdef __powerpc64__
3047 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3048
3049 if (register_size (regcache->tdesc, 0) == 8)
3050 {
3051 if (is_elfv2_inferior ())
3052 return &ppc64v2_emit_ops_impl;
3053 else
3054 return &ppc64v1_emit_ops_impl;
3055 }
3056 #endif
3057 return &ppc_emit_ops_impl;
3058 }
3059
3060 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3061
3062 static int
3063 ppc_get_ipa_tdesc_idx (void)
3064 {
3065 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3066 const struct target_desc *tdesc = regcache->tdesc;
3067
3068 #ifdef __powerpc64__
3069 if (tdesc == tdesc_powerpc_64l)
3070 return PPC_TDESC_BASE;
3071 if (tdesc == tdesc_powerpc_altivec64l)
3072 return PPC_TDESC_ALTIVEC;
3073 if (tdesc == tdesc_powerpc_cell64l)
3074 return PPC_TDESC_CELL;
3075 if (tdesc == tdesc_powerpc_vsx64l)
3076 return PPC_TDESC_VSX;
3077 if (tdesc == tdesc_powerpc_isa205_64l)
3078 return PPC_TDESC_ISA205;
3079 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3080 return PPC_TDESC_ISA205_ALTIVEC;
3081 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3082 return PPC_TDESC_ISA205_VSX;
3083 #endif
3084
3085 if (tdesc == tdesc_powerpc_32l)
3086 return PPC_TDESC_BASE;
3087 if (tdesc == tdesc_powerpc_altivec32l)
3088 return PPC_TDESC_ALTIVEC;
3089 if (tdesc == tdesc_powerpc_cell32l)
3090 return PPC_TDESC_CELL;
3091 if (tdesc == tdesc_powerpc_vsx32l)
3092 return PPC_TDESC_VSX;
3093 if (tdesc == tdesc_powerpc_isa205_32l)
3094 return PPC_TDESC_ISA205;
3095 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3096 return PPC_TDESC_ISA205_ALTIVEC;
3097 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3098 return PPC_TDESC_ISA205_VSX;
3099 if (tdesc == tdesc_powerpc_e500l)
3100 return PPC_TDESC_E500;
3101
3102 return 0;
3103 }
3104
3105 struct linux_target_ops the_low_target = {
3106 ppc_arch_setup,
3107 ppc_regs_info,
3108 ppc_cannot_fetch_register,
3109 ppc_cannot_store_register,
3110 NULL, /* fetch_register */
3111 ppc_get_pc,
3112 ppc_set_pc,
3113 NULL, /* breakpoint_kind_from_pc */
3114 ppc_sw_breakpoint_from_kind,
3115 NULL,
3116 0,
3117 ppc_breakpoint_at,
3118 ppc_supports_z_point_type,
3119 ppc_insert_point,
3120 ppc_remove_point,
3121 NULL,
3122 NULL,
3123 ppc_collect_ptrace_register,
3124 ppc_supply_ptrace_register,
3125 NULL, /* siginfo_fixup */
3126 NULL, /* new_process */
3127 NULL, /* new_thread */
3128 NULL, /* new_fork */
3129 NULL, /* prepare_to_resume */
3130 NULL, /* process_qsupported */
3131 ppc_supports_tracepoints,
3132 ppc_get_thread_area,
3133 ppc_install_fast_tracepoint_jump_pad,
3134 ppc_emit_ops,
3135 ppc_get_min_fast_tracepoint_insn_len,
3136 NULL, /* supports_range_stepping */
3137 NULL, /* breakpoint_kind_from_current_state */
3138 ppc_supports_hardware_single_step,
3139 NULL, /* get_syscall_trapinfo */
3140 ppc_get_ipa_tdesc_idx,
3141 };
3142
3143 void
3144 initialize_low_arch (void)
3145 {
3146 /* Initialize the Linux target descriptions. */
3147
3148 init_registers_powerpc_32l ();
3149 init_registers_powerpc_altivec32l ();
3150 init_registers_powerpc_cell32l ();
3151 init_registers_powerpc_vsx32l ();
3152 init_registers_powerpc_isa205_32l ();
3153 init_registers_powerpc_isa205_altivec32l ();
3154 init_registers_powerpc_isa205_vsx32l ();
3155 init_registers_powerpc_e500l ();
3156 #if __powerpc64__
3157 init_registers_powerpc_64l ();
3158 init_registers_powerpc_altivec64l ();
3159 init_registers_powerpc_cell64l ();
3160 init_registers_powerpc_vsx64l ();
3161 init_registers_powerpc_isa205_64l ();
3162 init_registers_powerpc_isa205_altivec64l ();
3163 init_registers_powerpc_isa205_vsx64l ();
3164 #endif
3165
3166 initialize_regsets_info (&ppc_regsets_info);
3167 }
This page took 0.141369 seconds and 4 git commands to generate.