[PowerPC] Consolidate linux target description selection
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <elf.h>
24 #include <asm/ptrace.h>
25
26 #include "arch/ppc-linux-common.h"
27 #include "arch/ppc-linux-tdesc.h"
28 #include "nat/ppc-linux.h"
29 #include "linux-ppc-tdesc-init.h"
30 #include "ax.h"
31 #include "tracepoint.h"
32
33 #define PPC_FIELD(value, from, len) \
34 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
35 #define PPC_SEXT(v, bs) \
36 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
37 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
38 - ((CORE_ADDR) 1 << ((bs) - 1)))
39 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
40 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
41 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
42 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
43
44 static unsigned long ppc_hwcap;
45
46
47 #define ppc_num_regs 73
48
49 #ifdef __powerpc64__
50 /* We use a constant for FPSCR instead of PT_FPSCR, because
51 many shipped PPC64 kernels had the wrong value in ptrace.h. */
52 static int ppc_regmap[] =
53 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
54 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
55 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
56 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
57 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
58 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
59 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
60 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
61 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
62 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
63 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
64 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
65 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
66 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
67 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
68 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
69 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
70 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
71 PT_ORIG_R3 * 8, PT_TRAP * 8 };
72 #else
73 /* Currently, don't check/send MQ. */
74 static int ppc_regmap[] =
75 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
76 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
77 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
78 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
79 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
80 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
81 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
82 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
83 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
84 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
85 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
86 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
87 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
88 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
89 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
90 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
91 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
92 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
93 PT_ORIG_R3 * 4, PT_TRAP * 4
94 };
95
96 static int ppc_regmap_e500[] =
97 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
98 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
99 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
100 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
101 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
102 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
103 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
104 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
105 -1, -1, -1, -1,
106 -1, -1, -1, -1,
107 -1, -1, -1, -1,
108 -1, -1, -1, -1,
109 -1, -1, -1, -1,
110 -1, -1, -1, -1,
111 -1, -1, -1, -1,
112 -1, -1, -1, -1,
113 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
114 PT_CTR * 4, PT_XER * 4, -1,
115 PT_ORIG_R3 * 4, PT_TRAP * 4
116 };
117 #endif
118
119 static int
120 ppc_cannot_store_register (int regno)
121 {
122 const struct target_desc *tdesc = current_process ()->tdesc;
123
124 #ifndef __powerpc64__
125 /* Some kernels do not allow us to store fpscr. */
126 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
127 && regno == find_regno (tdesc, "fpscr"))
128 return 2;
129 #endif
130
131 /* Some kernels do not allow us to store orig_r3 or trap. */
132 if (regno == find_regno (tdesc, "orig_r3")
133 || regno == find_regno (tdesc, "trap"))
134 return 2;
135
136 return 0;
137 }
138
139 static int
140 ppc_cannot_fetch_register (int regno)
141 {
142 return 0;
143 }
144
145 static void
146 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
147 {
148 memset (buf, 0, sizeof (long));
149
150 if (__BYTE_ORDER == __LITTLE_ENDIAN)
151 {
152 /* Little-endian values always sit at the left end of the buffer. */
153 collect_register (regcache, regno, buf);
154 }
155 else if (__BYTE_ORDER == __BIG_ENDIAN)
156 {
157 /* Big-endian values sit at the right end of the buffer. In case of
158 registers whose sizes are smaller than sizeof (long), we must use a
159 padding to access them correctly. */
160 int size = register_size (regcache->tdesc, regno);
161
162 if (size < sizeof (long))
163 collect_register (regcache, regno, buf + sizeof (long) - size);
164 else
165 collect_register (regcache, regno, buf);
166 }
167 else
168 perror_with_name ("Unexpected byte order");
169 }
170
171 static void
172 ppc_supply_ptrace_register (struct regcache *regcache,
173 int regno, const char *buf)
174 {
175 if (__BYTE_ORDER == __LITTLE_ENDIAN)
176 {
177 /* Little-endian values always sit at the left end of the buffer. */
178 supply_register (regcache, regno, buf);
179 }
180 else if (__BYTE_ORDER == __BIG_ENDIAN)
181 {
182 /* Big-endian values sit at the right end of the buffer. In case of
183 registers whose sizes are smaller than sizeof (long), we must use a
184 padding to access them correctly. */
185 int size = register_size (regcache->tdesc, regno);
186
187 if (size < sizeof (long))
188 supply_register (regcache, regno, buf + sizeof (long) - size);
189 else
190 supply_register (regcache, regno, buf);
191 }
192 else
193 perror_with_name ("Unexpected byte order");
194 }
195
196
197 #define INSTR_SC 0x44000002
198 #define NR_spu_run 0x0116
199
200 /* If the PPU thread is currently stopped on a spu_run system call,
201 return to FD and ADDR the file handle and NPC parameter address
202 used with the system call. Return non-zero if successful. */
203 static int
204 parse_spufs_run (struct regcache *regcache, int *fd, CORE_ADDR *addr)
205 {
206 CORE_ADDR curr_pc;
207 int curr_insn;
208 int curr_r0;
209
210 if (register_size (regcache->tdesc, 0) == 4)
211 {
212 unsigned int pc, r0, r3, r4;
213 collect_register_by_name (regcache, "pc", &pc);
214 collect_register_by_name (regcache, "r0", &r0);
215 collect_register_by_name (regcache, "orig_r3", &r3);
216 collect_register_by_name (regcache, "r4", &r4);
217 curr_pc = (CORE_ADDR) pc;
218 curr_r0 = (int) r0;
219 *fd = (int) r3;
220 *addr = (CORE_ADDR) r4;
221 }
222 else
223 {
224 unsigned long pc, r0, r3, r4;
225 collect_register_by_name (regcache, "pc", &pc);
226 collect_register_by_name (regcache, "r0", &r0);
227 collect_register_by_name (regcache, "orig_r3", &r3);
228 collect_register_by_name (regcache, "r4", &r4);
229 curr_pc = (CORE_ADDR) pc;
230 curr_r0 = (int) r0;
231 *fd = (int) r3;
232 *addr = (CORE_ADDR) r4;
233 }
234
235 /* Fetch instruction preceding current NIP. */
236 if ((*the_target->read_memory) (curr_pc - 4,
237 (unsigned char *) &curr_insn, 4) != 0)
238 return 0;
239 /* It should be a "sc" instruction. */
240 if (curr_insn != INSTR_SC)
241 return 0;
242 /* System call number should be NR_spu_run. */
243 if (curr_r0 != NR_spu_run)
244 return 0;
245
246 return 1;
247 }
248
249 static CORE_ADDR
250 ppc_get_pc (struct regcache *regcache)
251 {
252 CORE_ADDR addr;
253 int fd;
254
255 if (parse_spufs_run (regcache, &fd, &addr))
256 {
257 unsigned int pc;
258 (*the_target->read_memory) (addr, (unsigned char *) &pc, 4);
259 return ((CORE_ADDR)1 << 63)
260 | ((CORE_ADDR)fd << 32) | (CORE_ADDR) (pc - 4);
261 }
262 else if (register_size (regcache->tdesc, 0) == 4)
263 {
264 unsigned int pc;
265 collect_register_by_name (regcache, "pc", &pc);
266 return (CORE_ADDR) pc;
267 }
268 else
269 {
270 unsigned long pc;
271 collect_register_by_name (regcache, "pc", &pc);
272 return (CORE_ADDR) pc;
273 }
274 }
275
276 static void
277 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
278 {
279 CORE_ADDR addr;
280 int fd;
281
282 if (parse_spufs_run (regcache, &fd, &addr))
283 {
284 unsigned int newpc = pc;
285 (*the_target->write_memory) (addr, (unsigned char *) &newpc, 4);
286 }
287 else if (register_size (regcache->tdesc, 0) == 4)
288 {
289 unsigned int newpc = pc;
290 supply_register_by_name (regcache, "pc", &newpc);
291 }
292 else
293 {
294 unsigned long newpc = pc;
295 supply_register_by_name (regcache, "pc", &newpc);
296 }
297 }
298
299
300 static int
301 ppc_get_auxv (unsigned long type, unsigned long *valp)
302 {
303 const struct target_desc *tdesc = current_process ()->tdesc;
304 int wordsize = register_size (tdesc, 0);
305 unsigned char *data = (unsigned char *) alloca (2 * wordsize);
306 int offset = 0;
307
308 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
309 {
310 if (wordsize == 4)
311 {
312 unsigned int *data_p = (unsigned int *)data;
313 if (data_p[0] == type)
314 {
315 *valp = data_p[1];
316 return 1;
317 }
318 }
319 else
320 {
321 unsigned long *data_p = (unsigned long *)data;
322 if (data_p[0] == type)
323 {
324 *valp = data_p[1];
325 return 1;
326 }
327 }
328
329 offset += 2 * wordsize;
330 }
331
332 *valp = 0;
333 return 0;
334 }
335
336 #ifndef __powerpc64__
337 static int ppc_regmap_adjusted;
338 #endif
339
340
341 /* Correct in either endianness.
342 This instruction is "twge r2, r2", which GDB uses as a software
343 breakpoint. */
344 static const unsigned int ppc_breakpoint = 0x7d821008;
345 #define ppc_breakpoint_len 4
346
347 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
348
349 static const gdb_byte *
350 ppc_sw_breakpoint_from_kind (int kind, int *size)
351 {
352 *size = ppc_breakpoint_len;
353 return (const gdb_byte *) &ppc_breakpoint;
354 }
355
356 static int
357 ppc_breakpoint_at (CORE_ADDR where)
358 {
359 unsigned int insn;
360
361 if (where & ((CORE_ADDR)1 << 63))
362 {
363 char mem_annex[32];
364 sprintf (mem_annex, "%d/mem", (int)((where >> 32) & 0x7fffffff));
365 (*the_target->qxfer_spu) (mem_annex, (unsigned char *) &insn,
366 NULL, where & 0xffffffff, 4);
367 if (insn == 0x3fff)
368 return 1;
369 }
370 else
371 {
372 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
373 if (insn == ppc_breakpoint)
374 return 1;
375 /* If necessary, recognize more trap instructions here. GDB only uses
376 the one. */
377 }
378
379 return 0;
380 }
381
382 /* Implement supports_z_point_type target-ops.
383 Returns true if type Z_TYPE breakpoint is supported.
384
385 Handling software breakpoint at server side, so tracepoints
386 and breakpoints can be inserted at the same location. */
387
388 static int
389 ppc_supports_z_point_type (char z_type)
390 {
391 switch (z_type)
392 {
393 case Z_PACKET_SW_BP:
394 return 1;
395 case Z_PACKET_HW_BP:
396 case Z_PACKET_WRITE_WP:
397 case Z_PACKET_ACCESS_WP:
398 default:
399 return 0;
400 }
401 }
402
403 /* Implement insert_point target-ops.
404 Returns 0 on success, -1 on failure and 1 on unsupported. */
405
406 static int
407 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
408 int size, struct raw_breakpoint *bp)
409 {
410 switch (type)
411 {
412 case raw_bkpt_type_sw:
413 return insert_memory_breakpoint (bp);
414
415 case raw_bkpt_type_hw:
416 case raw_bkpt_type_write_wp:
417 case raw_bkpt_type_access_wp:
418 default:
419 /* Unsupported. */
420 return 1;
421 }
422 }
423
424 /* Implement remove_point target-ops.
425 Returns 0 on success, -1 on failure and 1 on unsupported. */
426
427 static int
428 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
429 int size, struct raw_breakpoint *bp)
430 {
431 switch (type)
432 {
433 case raw_bkpt_type_sw:
434 return remove_memory_breakpoint (bp);
435
436 case raw_bkpt_type_hw:
437 case raw_bkpt_type_write_wp:
438 case raw_bkpt_type_access_wp:
439 default:
440 /* Unsupported. */
441 return 1;
442 }
443 }
444
445 /* Provide only a fill function for the general register set. ps_lgetregs
446 will use this for NPTL support. */
447
448 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
449 {
450 int i;
451
452 for (i = 0; i < 32; i++)
453 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
454
455 for (i = 64; i < 70; i++)
456 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
457
458 for (i = 71; i < 73; i++)
459 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
460 }
461
462 #define SIZEOF_VSXREGS 32*8
463
464 static void
465 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
466 {
467 int i, base;
468 char *regset = (char *) buf;
469
470 if (!(ppc_hwcap & PPC_FEATURE_HAS_VSX))
471 return;
472
473 base = find_regno (regcache->tdesc, "vs0h");
474 for (i = 0; i < 32; i++)
475 collect_register (regcache, base + i, &regset[i * 8]);
476 }
477
478 static void
479 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
480 {
481 int i, base;
482 const char *regset = (const char *) buf;
483
484 if (!(ppc_hwcap & PPC_FEATURE_HAS_VSX))
485 return;
486
487 base = find_regno (regcache->tdesc, "vs0h");
488 for (i = 0; i < 32; i++)
489 supply_register (regcache, base + i, &regset[i * 8]);
490 }
491
492 #define SIZEOF_VRREGS 33*16+4
493
494 static void
495 ppc_fill_vrregset (struct regcache *regcache, void *buf)
496 {
497 int i, base;
498 char *regset = (char *) buf;
499
500 if (!(ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC))
501 return;
502
503 base = find_regno (regcache->tdesc, "vr0");
504 for (i = 0; i < 32; i++)
505 collect_register (regcache, base + i, &regset[i * 16]);
506
507 collect_register_by_name (regcache, "vscr", &regset[32 * 16 + 12]);
508 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
509 }
510
511 static void
512 ppc_store_vrregset (struct regcache *regcache, const void *buf)
513 {
514 int i, base;
515 const char *regset = (const char *) buf;
516
517 if (!(ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC))
518 return;
519
520 base = find_regno (regcache->tdesc, "vr0");
521 for (i = 0; i < 32; i++)
522 supply_register (regcache, base + i, &regset[i * 16]);
523
524 supply_register_by_name (regcache, "vscr", &regset[32 * 16 + 12]);
525 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
526 }
527
528 struct gdb_evrregset_t
529 {
530 unsigned long evr[32];
531 unsigned long long acc;
532 unsigned long spefscr;
533 };
534
535 static void
536 ppc_fill_evrregset (struct regcache *regcache, void *buf)
537 {
538 int i, ev0;
539 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
540
541 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE))
542 return;
543
544 ev0 = find_regno (regcache->tdesc, "ev0h");
545 for (i = 0; i < 32; i++)
546 collect_register (regcache, ev0 + i, &regset->evr[i]);
547
548 collect_register_by_name (regcache, "acc", &regset->acc);
549 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
550 }
551
552 static void
553 ppc_store_evrregset (struct regcache *regcache, const void *buf)
554 {
555 int i, ev0;
556 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
557
558 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE))
559 return;
560
561 ev0 = find_regno (regcache->tdesc, "ev0h");
562 for (i = 0; i < 32; i++)
563 supply_register (regcache, ev0 + i, &regset->evr[i]);
564
565 supply_register_by_name (regcache, "acc", &regset->acc);
566 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
567 }
568
569 /* Support for hardware single step. */
570
571 static int
572 ppc_supports_hardware_single_step (void)
573 {
574 return 1;
575 }
576
577 static struct regset_info ppc_regsets[] = {
578 /* List the extra register sets before GENERAL_REGS. That way we will
579 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
580 general registers. Some kernels support these, but not the newer
581 PPC_PTRACE_GETREGS. */
582 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, SIZEOF_VSXREGS, EXTENDED_REGS,
583 ppc_fill_vsxregset, ppc_store_vsxregset },
584 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, SIZEOF_VRREGS, EXTENDED_REGS,
585 ppc_fill_vrregset, ppc_store_vrregset },
586 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 32 * 4 + 8 + 4, EXTENDED_REGS,
587 ppc_fill_evrregset, ppc_store_evrregset },
588 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
589 NULL_REGSET
590 };
591
592 static struct usrregs_info ppc_usrregs_info =
593 {
594 ppc_num_regs,
595 ppc_regmap,
596 };
597
598 static struct regsets_info ppc_regsets_info =
599 {
600 ppc_regsets, /* regsets */
601 0, /* num_regsets */
602 NULL, /* disabled_regsets */
603 };
604
605 static struct regs_info regs_info =
606 {
607 NULL, /* regset_bitmap */
608 &ppc_usrregs_info,
609 &ppc_regsets_info
610 };
611
612 static const struct regs_info *
613 ppc_regs_info (void)
614 {
615 return &regs_info;
616 }
617
618 static void
619 ppc_arch_setup (void)
620 {
621 const struct target_desc *tdesc;
622 struct ppc_linux_features features = ppc_linux_no_features;
623
624 features.wordsize = 4;
625
626 #ifdef __powerpc64__
627 long msr;
628 struct regcache *regcache;
629
630 /* On a 64-bit host, assume 64-bit inferior process with no
631 AltiVec registers. Reset ppc_hwcap to ensure that the
632 collect_register call below does not fail. */
633 tdesc = tdesc_powerpc_64l;
634 current_process ()->tdesc = tdesc;
635 ppc_hwcap = 0;
636
637 regcache = new_register_cache (tdesc);
638 fetch_inferior_registers (regcache, find_regno (tdesc, "msr"));
639 collect_register_by_name (regcache, "msr", &msr);
640 free_register_cache (regcache);
641 if (ppc64_64bit_inferior_p (msr))
642 {
643 features.wordsize = 8;
644 }
645 #endif
646
647 if (features.wordsize == 4)
648 {
649 /* OK, we have a 32-bit inferior. */
650 tdesc = tdesc_powerpc_32l;
651 current_process ()->tdesc = tdesc;
652 }
653
654 /* The value of current_process ()->tdesc needs to be set for this
655 call. */
656 ppc_get_auxv (AT_HWCAP, &ppc_hwcap);
657
658 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
659
660 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
661 features.vsx = true;
662
663 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
664 features.altivec = true;
665
666 if (ppc_hwcap & PPC_FEATURE_CELL)
667 features.cell = true;
668
669 tdesc = ppc_linux_match_description (features);
670
671 /* On 32-bit machines, check for SPE registers.
672 Set the low target's regmap field as appropriately. */
673 #ifndef __powerpc64__
674 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
675 tdesc = tdesc_powerpc_e500l;
676
677 if (!ppc_regmap_adjusted)
678 {
679 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
680 ppc_usrregs_info.regmap = ppc_regmap_e500;
681
682 /* If the FPSCR is 64-bit wide, we need to fetch the whole
683 64-bit slot and not just its second word. The PT_FPSCR
684 supplied in a 32-bit GDB compilation doesn't reflect
685 this. */
686 if (register_size (tdesc, 70) == 8)
687 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
688
689 ppc_regmap_adjusted = 1;
690 }
691 #endif
692
693 current_process ()->tdesc = tdesc;
694 }
695
696 /* Implementation of linux_target_ops method "supports_tracepoints". */
697
698 static int
699 ppc_supports_tracepoints (void)
700 {
701 return 1;
702 }
703
704 /* Get the thread area address. This is used to recognize which
705 thread is which when tracing with the in-process agent library. We
706 don't read anything from the address, and treat it as opaque; it's
707 the address itself that we assume is unique per-thread. */
708
709 static int
710 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
711 {
712 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
713 struct thread_info *thr = get_lwp_thread (lwp);
714 struct regcache *regcache = get_thread_regcache (thr, 1);
715 ULONGEST tp = 0;
716
717 #ifdef __powerpc64__
718 if (register_size (regcache->tdesc, 0) == 8)
719 collect_register_by_name (regcache, "r13", &tp);
720 else
721 #endif
722 collect_register_by_name (regcache, "r2", &tp);
723
724 *addr = tp;
725
726 return 0;
727 }
728
729 #ifdef __powerpc64__
730
731 /* Older glibc doesn't provide this. */
732
733 #ifndef EF_PPC64_ABI
734 #define EF_PPC64_ABI 3
735 #endif
736
737 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
738 inferiors. */
739
740 static int
741 is_elfv2_inferior (void)
742 {
743 /* To be used as fallback if we're unable to determine the right result -
744 assume inferior uses the same ABI as gdbserver. */
745 #if _CALL_ELF == 2
746 const int def_res = 1;
747 #else
748 const int def_res = 0;
749 #endif
750 unsigned long phdr;
751 Elf64_Ehdr ehdr;
752
753 if (!ppc_get_auxv (AT_PHDR, &phdr))
754 return def_res;
755
756 /* Assume ELF header is at the beginning of the page where program headers
757 are located. If it doesn't look like one, bail. */
758
759 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
760 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
761 return def_res;
762
763 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
764 }
765
766 #endif
767
768 /* Generate a ds-form instruction in BUF and return the number of bytes written
769
770 0 6 11 16 30 32
771 | OPCD | RST | RA | DS |XO| */
772
773 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
774 static int
775 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
776 {
777 uint32_t insn;
778
779 gdb_assert ((opcd & ~0x3f) == 0);
780 gdb_assert ((rst & ~0x1f) == 0);
781 gdb_assert ((ra & ~0x1f) == 0);
782 gdb_assert ((xo & ~0x3) == 0);
783
784 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
785 *buf = (opcd << 26) | insn;
786 return 1;
787 }
788
789 /* Followings are frequently used ds-form instructions. */
790
791 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
792 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
793 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
794 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
795
796 /* Generate a d-form instruction in BUF.
797
798 0 6 11 16 32
799 | OPCD | RST | RA | D | */
800
801 static int
802 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
803 {
804 uint32_t insn;
805
806 gdb_assert ((opcd & ~0x3f) == 0);
807 gdb_assert ((rst & ~0x1f) == 0);
808 gdb_assert ((ra & ~0x1f) == 0);
809
810 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
811 *buf = (opcd << 26) | insn;
812 return 1;
813 }
814
815 /* Followings are frequently used d-form instructions. */
816
817 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
818 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
819 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
820 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
821 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
822 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
823 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
824 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
825 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
826
827 /* Generate a xfx-form instruction in BUF and return the number of bytes
828 written.
829
830 0 6 11 21 31 32
831 | OPCD | RST | RI | XO |/| */
832
833 static int
834 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
835 {
836 uint32_t insn;
837 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
838
839 gdb_assert ((opcd & ~0x3f) == 0);
840 gdb_assert ((rst & ~0x1f) == 0);
841 gdb_assert ((xo & ~0x3ff) == 0);
842
843 insn = (rst << 21) | (n << 11) | (xo << 1);
844 *buf = (opcd << 26) | insn;
845 return 1;
846 }
847
848 /* Followings are frequently used xfx-form instructions. */
849
850 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
851 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
852 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
853 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
854 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
855 E & 0xf, 598)
856 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
857
858
859 /* Generate a x-form instruction in BUF and return the number of bytes written.
860
861 0 6 11 16 21 31 32
862 | OPCD | RST | RA | RB | XO |RC| */
863
864 static int
865 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
866 {
867 uint32_t insn;
868
869 gdb_assert ((opcd & ~0x3f) == 0);
870 gdb_assert ((rst & ~0x1f) == 0);
871 gdb_assert ((ra & ~0x1f) == 0);
872 gdb_assert ((rb & ~0x1f) == 0);
873 gdb_assert ((xo & ~0x3ff) == 0);
874 gdb_assert ((rc & ~1) == 0);
875
876 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
877 *buf = (opcd << 26) | insn;
878 return 1;
879 }
880
881 /* Followings are frequently used x-form instructions. */
882
883 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
884 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
885 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
886 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
887 /* Assume bf = cr7. */
888 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
889
890
891 /* Generate a md-form instruction in BUF and return the number of bytes written.
892
893 0 6 11 16 21 27 30 31 32
894 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
895
896 static int
897 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
898 int xo, int rc)
899 {
900 uint32_t insn;
901 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
902 unsigned int sh0_4 = sh & 0x1f;
903 unsigned int sh5 = (sh >> 5) & 1;
904
905 gdb_assert ((opcd & ~0x3f) == 0);
906 gdb_assert ((rs & ~0x1f) == 0);
907 gdb_assert ((ra & ~0x1f) == 0);
908 gdb_assert ((sh & ~0x3f) == 0);
909 gdb_assert ((mb & ~0x3f) == 0);
910 gdb_assert ((xo & ~0x7) == 0);
911 gdb_assert ((rc & ~0x1) == 0);
912
913 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
914 | (sh5 << 1) | (xo << 2) | (rc & 1);
915 *buf = (opcd << 26) | insn;
916 return 1;
917 }
918
919 /* The following are frequently used md-form instructions. */
920
921 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
922 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
923 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
924 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
925
926 /* Generate a i-form instruction in BUF and return the number of bytes written.
927
928 0 6 30 31 32
929 | OPCD | LI |AA|LK| */
930
931 static int
932 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
933 {
934 uint32_t insn;
935
936 gdb_assert ((opcd & ~0x3f) == 0);
937
938 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
939 *buf = (opcd << 26) | insn;
940 return 1;
941 }
942
943 /* The following are frequently used i-form instructions. */
944
945 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
946 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
947
948 /* Generate a b-form instruction in BUF and return the number of bytes written.
949
950 0 6 11 16 30 31 32
951 | OPCD | BO | BI | BD |AA|LK| */
952
953 static int
954 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
955 int aa, int lk)
956 {
957 uint32_t insn;
958
959 gdb_assert ((opcd & ~0x3f) == 0);
960 gdb_assert ((bo & ~0x1f) == 0);
961 gdb_assert ((bi & ~0x1f) == 0);
962
963 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
964 *buf = (opcd << 26) | insn;
965 return 1;
966 }
967
968 /* The following are frequently used b-form instructions. */
969 /* Assume bi = cr7. */
970 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
971
972 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
973 respectively. They are primary used for save/restore GPRs in jump-pad,
974 not used for bytecode compiling. */
975
976 #ifdef __powerpc64__
977 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
978 GEN_LD (buf, rt, ra, si) : \
979 GEN_LWZ (buf, rt, ra, si))
980 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
981 GEN_STD (buf, rt, ra, si) : \
982 GEN_STW (buf, rt, ra, si))
983 #else
984 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
985 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
986 #endif
987
988 /* Generate a sequence of instructions to load IMM in the register REG.
989 Write the instructions in BUF and return the number of bytes written. */
990
991 static int
992 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
993 {
994 uint32_t *p = buf;
995
996 if ((imm + 32768) < 65536)
997 {
998 /* li reg, imm[15:0] */
999 p += GEN_LI (p, reg, imm);
1000 }
1001 else if ((imm >> 32) == 0)
1002 {
1003 /* lis reg, imm[31:16]
1004 ori reg, reg, imm[15:0]
1005 rldicl reg, reg, 0, 32 */
1006 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1007 if ((imm & 0xffff) != 0)
1008 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1009 /* Clear upper 32-bit if sign-bit is set. */
1010 if (imm & (1u << 31) && is_64)
1011 p += GEN_RLDICL (p, reg, reg, 0, 32);
1012 }
1013 else
1014 {
1015 gdb_assert (is_64);
1016 /* lis reg, <imm[63:48]>
1017 ori reg, reg, <imm[48:32]>
1018 rldicr reg, reg, 32, 31
1019 oris reg, reg, <imm[31:16]>
1020 ori reg, reg, <imm[15:0]> */
1021 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1022 if (((imm >> 32) & 0xffff) != 0)
1023 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1024 p += GEN_RLDICR (p, reg, reg, 32, 31);
1025 if (((imm >> 16) & 0xffff) != 0)
1026 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1027 if ((imm & 0xffff) != 0)
1028 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1029 }
1030
1031 return p - buf;
1032 }
1033
1034 /* Generate a sequence for atomically exchange at location LOCK.
1035 This code sequence clobbers r6, r7, r8. LOCK is the location for
1036 the atomic-xchg, OLD_VALUE is expected old value stored in the
1037 location, and R_NEW is a register for the new value. */
1038
1039 static int
1040 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1041 int is_64)
1042 {
1043 const int r_lock = 6;
1044 const int r_old = 7;
1045 const int r_tmp = 8;
1046 uint32_t *p = buf;
1047
1048 /*
1049 1: lwarx TMP, 0, LOCK
1050 cmpwi TMP, OLD
1051 bne 1b
1052 stwcx. NEW, 0, LOCK
1053 bne 1b */
1054
1055 p += gen_limm (p, r_lock, lock, is_64);
1056 p += gen_limm (p, r_old, old_value, is_64);
1057
1058 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1059 p += GEN_CMPW (p, r_tmp, r_old);
1060 p += GEN_BNE (p, -8);
1061 p += GEN_STWCX (p, r_new, 0, r_lock);
1062 p += GEN_BNE (p, -16);
1063
1064 return p - buf;
1065 }
1066
1067 /* Generate a sequence of instructions for calling a function
1068 at address of FN. Return the number of bytes are written in BUF. */
1069
1070 static int
1071 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1072 {
1073 uint32_t *p = buf;
1074
1075 /* Must be called by r12 for caller to calculate TOC address. */
1076 p += gen_limm (p, 12, fn, is_64);
1077 if (is_opd)
1078 {
1079 p += GEN_LOAD (p, 11, 12, 16, is_64);
1080 p += GEN_LOAD (p, 2, 12, 8, is_64);
1081 p += GEN_LOAD (p, 12, 12, 0, is_64);
1082 }
1083 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1084 *p++ = 0x4e800421; /* bctrl */
1085
1086 return p - buf;
1087 }
1088
1089 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1090 of instruction. This function is used to adjust pc-relative instructions
1091 when copying. */
1092
1093 static void
1094 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1095 {
1096 uint32_t insn, op6;
1097 long rel, newrel;
1098
1099 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1100 op6 = PPC_OP6 (insn);
1101
1102 if (op6 == 18 && (insn & 2) == 0)
1103 {
1104 /* branch && AA = 0 */
1105 rel = PPC_LI (insn);
1106 newrel = (oldloc - *to) + rel;
1107
1108 /* Out of range. Cannot relocate instruction. */
1109 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1110 return;
1111
1112 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1113 }
1114 else if (op6 == 16 && (insn & 2) == 0)
1115 {
1116 /* conditional branch && AA = 0 */
1117
1118 /* If the new relocation is too big for even a 26-bit unconditional
1119 branch, there is nothing we can do. Just abort.
1120
1121 Otherwise, if it can be fit in 16-bit conditional branch, just
1122 copy the instruction and relocate the address.
1123
1124 If the it's big for conditional-branch (16-bit), try to invert the
1125 condition and jump with 26-bit branch. For example,
1126
1127 beq .Lgoto
1128 INSN1
1129
1130 =>
1131
1132 bne 1f (+8)
1133 b .Lgoto
1134 1:INSN1
1135
1136 After this transform, we are actually jump from *TO+4 instead of *TO,
1137 so check the relocation again because it will be 1-insn farther then
1138 before if *TO is after OLDLOC.
1139
1140
1141 For BDNZT (or so) is transformed from
1142
1143 bdnzt eq, .Lgoto
1144 INSN1
1145
1146 =>
1147
1148 bdz 1f (+12)
1149 bf eq, 1f (+8)
1150 b .Lgoto
1151 1:INSN1
1152
1153 See also "BO field encodings". */
1154
1155 rel = PPC_BD (insn);
1156 newrel = (oldloc - *to) + rel;
1157
1158 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1159 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1160 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1161 {
1162 newrel -= 4;
1163
1164 /* Out of range. Cannot relocate instruction. */
1165 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1166 return;
1167
1168 if ((PPC_BO (insn) & 0x14) == 0x4)
1169 insn ^= (1 << 24);
1170 else if ((PPC_BO (insn) & 0x14) == 0x10)
1171 insn ^= (1 << 22);
1172
1173 /* Jump over the unconditional branch. */
1174 insn = (insn & ~0xfffc) | 0x8;
1175 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1176 *to += 4;
1177
1178 /* Build a unconditional branch and copy LK bit. */
1179 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1180 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1181 *to += 4;
1182
1183 return;
1184 }
1185 else if ((PPC_BO (insn) & 0x14) == 0)
1186 {
1187 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1188 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1189
1190 newrel -= 8;
1191
1192 /* Out of range. Cannot relocate instruction. */
1193 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1194 return;
1195
1196 /* Copy BI field. */
1197 bf_insn |= (insn & 0x1f0000);
1198
1199 /* Invert condition. */
1200 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1201 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1202
1203 write_inferior_memory (*to, (unsigned char *) &bdnz_insn, 4);
1204 *to += 4;
1205 write_inferior_memory (*to, (unsigned char *) &bf_insn, 4);
1206 *to += 4;
1207
1208 /* Build a unconditional branch and copy LK bit. */
1209 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1210 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1211 *to += 4;
1212
1213 return;
1214 }
1215 else /* (BO & 0x14) == 0x14, branch always. */
1216 {
1217 /* Out of range. Cannot relocate instruction. */
1218 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1219 return;
1220
1221 /* Build a unconditional branch and copy LK bit. */
1222 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1223 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1224 *to += 4;
1225
1226 return;
1227 }
1228 }
1229
1230 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1231 *to += 4;
1232 }
1233
1234 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1235 See target.h for details. */
1236
1237 static int
1238 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1239 CORE_ADDR collector,
1240 CORE_ADDR lockaddr,
1241 ULONGEST orig_size,
1242 CORE_ADDR *jump_entry,
1243 CORE_ADDR *trampoline,
1244 ULONGEST *trampoline_size,
1245 unsigned char *jjump_pad_insn,
1246 ULONGEST *jjump_pad_insn_size,
1247 CORE_ADDR *adjusted_insn_addr,
1248 CORE_ADDR *adjusted_insn_addr_end,
1249 char *err)
1250 {
1251 uint32_t buf[256];
1252 uint32_t *p = buf;
1253 int j, offset;
1254 CORE_ADDR buildaddr = *jump_entry;
1255 const CORE_ADDR entryaddr = *jump_entry;
1256 int rsz, min_frame, frame_size, tp_reg;
1257 #ifdef __powerpc64__
1258 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1259 int is_64 = register_size (regcache->tdesc, 0) == 8;
1260 int is_opd = is_64 && !is_elfv2_inferior ();
1261 #else
1262 int is_64 = 0, is_opd = 0;
1263 #endif
1264
1265 #ifdef __powerpc64__
1266 if (is_64)
1267 {
1268 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1269 rsz = 8;
1270 min_frame = 112;
1271 frame_size = (40 * rsz) + min_frame;
1272 tp_reg = 13;
1273 }
1274 else
1275 {
1276 #endif
1277 rsz = 4;
1278 min_frame = 16;
1279 frame_size = (40 * rsz) + min_frame;
1280 tp_reg = 2;
1281 #ifdef __powerpc64__
1282 }
1283 #endif
1284
1285 /* Stack frame layout for this jump pad,
1286
1287 High thread_area (r13/r2) |
1288 tpoint - collecting_t obj
1289 PC/<tpaddr> | +36
1290 CTR | +35
1291 LR | +34
1292 XER | +33
1293 CR | +32
1294 R31 |
1295 R29 |
1296 ... |
1297 R1 | +1
1298 R0 - collected registers
1299 ... |
1300 ... |
1301 Low Back-chain -
1302
1303
1304 The code flow of this jump pad,
1305
1306 1. Adjust SP
1307 2. Save GPR and SPR
1308 3. Prepare argument
1309 4. Call gdb_collector
1310 5. Restore GPR and SPR
1311 6. Restore SP
1312 7. Build a jump for back to the program
1313 8. Copy/relocate original instruction
1314 9. Build a jump for replacing orignal instruction. */
1315
1316 /* Adjust stack pointer. */
1317 if (is_64)
1318 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1319 else
1320 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1321
1322 /* Store GPRs. Save R1 later, because it had just been modified, but
1323 we want the original value. */
1324 for (j = 2; j < 32; j++)
1325 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1326 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1327 /* Set r0 to the original value of r1 before adjusting stack frame,
1328 and then save it. */
1329 p += GEN_ADDI (p, 0, 1, frame_size);
1330 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1331
1332 /* Save CR, XER, LR, and CTR. */
1333 p += GEN_MFCR (p, 3); /* mfcr r3 */
1334 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1335 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1336 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1337 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1338 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1339 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1340 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1341
1342 /* Save PC<tpaddr> */
1343 p += gen_limm (p, 3, tpaddr, is_64);
1344 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1345
1346
1347 /* Setup arguments to collector. */
1348 /* Set r4 to collected registers. */
1349 p += GEN_ADDI (p, 4, 1, min_frame);
1350 /* Set r3 to TPOINT. */
1351 p += gen_limm (p, 3, tpoint, is_64);
1352
1353 /* Prepare collecting_t object for lock. */
1354 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1355 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1356 /* Set R5 to collecting object. */
1357 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1358
1359 p += GEN_LWSYNC (p);
1360 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1361 p += GEN_LWSYNC (p);
1362
1363 /* Call to collector. */
1364 p += gen_call (p, collector, is_64, is_opd);
1365
1366 /* Simply write 0 to release the lock. */
1367 p += gen_limm (p, 3, lockaddr, is_64);
1368 p += gen_limm (p, 4, 0, is_64);
1369 p += GEN_LWSYNC (p);
1370 p += GEN_STORE (p, 4, 3, 0, is_64);
1371
1372 /* Restore stack and registers. */
1373 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1374 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1375 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1376 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1377 p += GEN_MTCR (p, 3); /* mtcr r3 */
1378 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1379 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1380 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1381
1382 /* Restore GPRs. */
1383 for (j = 2; j < 32; j++)
1384 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1385 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1386 /* Restore SP. */
1387 p += GEN_ADDI (p, 1, 1, frame_size);
1388
1389 /* Flush instructions to inferior memory. */
1390 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1391
1392 /* Now, insert the original instruction to execute in the jump pad. */
1393 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1394 *adjusted_insn_addr_end = *adjusted_insn_addr;
1395 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1396
1397 /* Verify the relocation size. If should be 4 for normal copy,
1398 8 or 12 for some conditional branch. */
1399 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1400 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1401 {
1402 sprintf (err, "E.Unexpected instruction length = %d"
1403 "when relocate instruction.",
1404 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1405 return 1;
1406 }
1407
1408 buildaddr = *adjusted_insn_addr_end;
1409 p = buf;
1410 /* Finally, write a jump back to the program. */
1411 offset = (tpaddr + 4) - buildaddr;
1412 if (offset >= (1 << 25) || offset < -(1 << 25))
1413 {
1414 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1415 "(offset 0x%x > 26-bit).", offset);
1416 return 1;
1417 }
1418 /* b <tpaddr+4> */
1419 p += GEN_B (p, offset);
1420 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1421 *jump_entry = buildaddr + (p - buf) * 4;
1422
1423 /* The jump pad is now built. Wire in a jump to our jump pad. This
1424 is always done last (by our caller actually), so that we can
1425 install fast tracepoints with threads running. This relies on
1426 the agent's atomic write support. */
1427 offset = entryaddr - tpaddr;
1428 if (offset >= (1 << 25) || offset < -(1 << 25))
1429 {
1430 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1431 "(offset 0x%x > 26-bit).", offset);
1432 return 1;
1433 }
1434 /* b <jentry> */
1435 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1436 *jjump_pad_insn_size = 4;
1437
1438 return 0;
1439 }
1440
1441 /* Returns the minimum instruction length for installing a tracepoint. */
1442
1443 static int
1444 ppc_get_min_fast_tracepoint_insn_len (void)
1445 {
1446 return 4;
1447 }
1448
1449 /* Emits a given buffer into the target at current_insn_ptr. Length
1450 is in units of 32-bit words. */
1451
1452 static void
1453 emit_insns (uint32_t *buf, int n)
1454 {
1455 n = n * sizeof (uint32_t);
1456 write_inferior_memory (current_insn_ptr, (unsigned char *) buf, n);
1457 current_insn_ptr += n;
1458 }
1459
1460 #define __EMIT_ASM(NAME, INSNS) \
1461 do \
1462 { \
1463 extern uint32_t start_bcax_ ## NAME []; \
1464 extern uint32_t end_bcax_ ## NAME []; \
1465 emit_insns (start_bcax_ ## NAME, \
1466 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1467 __asm__ (".section .text.__ppcbcax\n\t" \
1468 "start_bcax_" #NAME ":\n\t" \
1469 INSNS "\n\t" \
1470 "end_bcax_" #NAME ":\n\t" \
1471 ".previous\n\t"); \
1472 } while (0)
1473
1474 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1475 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1476
1477 /*
1478
1479 Bytecode execution stack frame - 32-bit
1480
1481 | LR save area (SP + 4)
1482 SP' -> +- Back chain (SP + 0)
1483 | Save r31 for access saved arguments
1484 | Save r30 for bytecode stack pointer
1485 | Save r4 for incoming argument *value
1486 | Save r3 for incoming argument regs
1487 r30 -> +- Bytecode execution stack
1488 |
1489 | 64-byte (8 doublewords) at initial.
1490 | Expand stack as needed.
1491 |
1492 +-
1493 | Some padding for minimum stack frame and 16-byte alignment.
1494 | 16 bytes.
1495 SP +- Back-chain (SP')
1496
1497 initial frame size
1498 = 16 + (4 * 4) + 64
1499 = 96
1500
1501 r30 is the stack-pointer for bytecode machine.
1502 It should point to next-empty, so we can use LDU for pop.
1503 r3 is used for cache of the high part of TOP value.
1504 It was the first argument, pointer to regs.
1505 r4 is used for cache of the low part of TOP value.
1506 It was the second argument, pointer to the result.
1507 We should set *result = TOP after leaving this function.
1508
1509 Note:
1510 * To restore stack at epilogue
1511 => sp = r31
1512 * To check stack is big enough for bytecode execution.
1513 => r30 - 8 > SP + 8
1514 * To return execution result.
1515 => 0(r4) = TOP
1516
1517 */
1518
1519 /* Regardless of endian, register 3 is always high part, 4 is low part.
1520 These defines are used when the register pair is stored/loaded.
1521 Likewise, to simplify code, have a similiar define for 5:6. */
1522
1523 #if __BYTE_ORDER == __LITTLE_ENDIAN
1524 #define TOP_FIRST "4"
1525 #define TOP_SECOND "3"
1526 #define TMP_FIRST "6"
1527 #define TMP_SECOND "5"
1528 #else
1529 #define TOP_FIRST "3"
1530 #define TOP_SECOND "4"
1531 #define TMP_FIRST "5"
1532 #define TMP_SECOND "6"
1533 #endif
1534
1535 /* Emit prologue in inferior memory. See above comments. */
1536
1537 static void
1538 ppc_emit_prologue (void)
1539 {
1540 EMIT_ASM (/* Save return address. */
1541 "mflr 0 \n"
1542 "stw 0, 4(1) \n"
1543 /* Adjust SP. 96 is the initial frame size. */
1544 "stwu 1, -96(1) \n"
1545 /* Save r30 and incoming arguments. */
1546 "stw 31, 96-4(1) \n"
1547 "stw 30, 96-8(1) \n"
1548 "stw 4, 96-12(1) \n"
1549 "stw 3, 96-16(1) \n"
1550 /* Point r31 to original r1 for access arguments. */
1551 "addi 31, 1, 96 \n"
1552 /* Set r30 to pointing stack-top. */
1553 "addi 30, 1, 64 \n"
1554 /* Initial r3/TOP to 0. */
1555 "li 3, 0 \n"
1556 "li 4, 0 \n");
1557 }
1558
1559 /* Emit epilogue in inferior memory. See above comments. */
1560
1561 static void
1562 ppc_emit_epilogue (void)
1563 {
1564 EMIT_ASM (/* *result = TOP */
1565 "lwz 5, -12(31) \n"
1566 "stw " TOP_FIRST ", 0(5) \n"
1567 "stw " TOP_SECOND ", 4(5) \n"
1568 /* Restore registers. */
1569 "lwz 31, -4(31) \n"
1570 "lwz 30, -8(31) \n"
1571 /* Restore SP. */
1572 "lwz 1, 0(1) \n"
1573 /* Restore LR. */
1574 "lwz 0, 4(1) \n"
1575 /* Return 0 for no-error. */
1576 "li 3, 0 \n"
1577 "mtlr 0 \n"
1578 "blr \n");
1579 }
1580
1581 /* TOP = stack[--sp] + TOP */
1582
1583 static void
1584 ppc_emit_add (void)
1585 {
1586 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1587 "lwz " TMP_SECOND ", 4(30)\n"
1588 "addc 4, 6, 4 \n"
1589 "adde 3, 5, 3 \n");
1590 }
1591
1592 /* TOP = stack[--sp] - TOP */
1593
1594 static void
1595 ppc_emit_sub (void)
1596 {
1597 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1598 "lwz " TMP_SECOND ", 4(30) \n"
1599 "subfc 4, 4, 6 \n"
1600 "subfe 3, 3, 5 \n");
1601 }
1602
1603 /* TOP = stack[--sp] * TOP */
1604
1605 static void
1606 ppc_emit_mul (void)
1607 {
1608 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1609 "lwz " TMP_SECOND ", 4(30) \n"
1610 "mulhwu 7, 6, 4 \n"
1611 "mullw 3, 6, 3 \n"
1612 "mullw 5, 4, 5 \n"
1613 "mullw 4, 6, 4 \n"
1614 "add 3, 5, 3 \n"
1615 "add 3, 7, 3 \n");
1616 }
1617
1618 /* TOP = stack[--sp] << TOP */
1619
1620 static void
1621 ppc_emit_lsh (void)
1622 {
1623 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1624 "lwz " TMP_SECOND ", 4(30) \n"
1625 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1626 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1627 "slw 5, 5, 4\n" /* Shift high part left */
1628 "slw 4, 6, 4\n" /* Shift low part left */
1629 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1630 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1631 "or 3, 5, 3\n"
1632 "or 3, 7, 3\n"); /* Assemble high part */
1633 }
1634
1635 /* Top = stack[--sp] >> TOP
1636 (Arithmetic shift right) */
1637
1638 static void
1639 ppc_emit_rsh_signed (void)
1640 {
1641 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1642 "lwz " TMP_SECOND ", 4(30) \n"
1643 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1644 "sraw 3, 5, 4\n" /* Shift high part right */
1645 "cmpwi 7, 1\n"
1646 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1647 "sraw 4, 5, 7\n" /* Shift high to low */
1648 "b 2f\n"
1649 "1:\n"
1650 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1651 "srw 4, 6, 4\n" /* Shift low part right */
1652 "slw 5, 5, 7\n" /* Shift high to low */
1653 "or 4, 4, 5\n" /* Assemble low part */
1654 "2:\n");
1655 }
1656
1657 /* Top = stack[--sp] >> TOP
1658 (Logical shift right) */
1659
1660 static void
1661 ppc_emit_rsh_unsigned (void)
1662 {
1663 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1664 "lwz " TMP_SECOND ", 4(30) \n"
1665 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1666 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1667 "srw 6, 6, 4\n" /* Shift low part right */
1668 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1669 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1670 "or 6, 6, 3\n"
1671 "srw 3, 5, 4\n" /* Shift high part right */
1672 "or 4, 6, 7\n"); /* Assemble low part */
1673 }
1674
1675 /* Emit code for signed-extension specified by ARG. */
1676
1677 static void
1678 ppc_emit_ext (int arg)
1679 {
1680 switch (arg)
1681 {
1682 case 8:
1683 EMIT_ASM ("extsb 4, 4\n"
1684 "srawi 3, 4, 31");
1685 break;
1686 case 16:
1687 EMIT_ASM ("extsh 4, 4\n"
1688 "srawi 3, 4, 31");
1689 break;
1690 case 32:
1691 EMIT_ASM ("srawi 3, 4, 31");
1692 break;
1693 default:
1694 emit_error = 1;
1695 }
1696 }
1697
1698 /* Emit code for zero-extension specified by ARG. */
1699
1700 static void
1701 ppc_emit_zero_ext (int arg)
1702 {
1703 switch (arg)
1704 {
1705 case 8:
1706 EMIT_ASM ("clrlwi 4,4,24\n"
1707 "li 3, 0\n");
1708 break;
1709 case 16:
1710 EMIT_ASM ("clrlwi 4,4,16\n"
1711 "li 3, 0\n");
1712 break;
1713 case 32:
1714 EMIT_ASM ("li 3, 0");
1715 break;
1716 default:
1717 emit_error = 1;
1718 }
1719 }
1720
1721 /* TOP = !TOP
1722 i.e., TOP = (TOP == 0) ? 1 : 0; */
1723
1724 static void
1725 ppc_emit_log_not (void)
1726 {
1727 EMIT_ASM ("or 4, 3, 4 \n"
1728 "cntlzw 4, 4 \n"
1729 "srwi 4, 4, 5 \n"
1730 "li 3, 0 \n");
1731 }
1732
1733 /* TOP = stack[--sp] & TOP */
1734
1735 static void
1736 ppc_emit_bit_and (void)
1737 {
1738 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1739 "lwz " TMP_SECOND ", 4(30) \n"
1740 "and 4, 6, 4 \n"
1741 "and 3, 5, 3 \n");
1742 }
1743
1744 /* TOP = stack[--sp] | TOP */
1745
1746 static void
1747 ppc_emit_bit_or (void)
1748 {
1749 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1750 "lwz " TMP_SECOND ", 4(30) \n"
1751 "or 4, 6, 4 \n"
1752 "or 3, 5, 3 \n");
1753 }
1754
1755 /* TOP = stack[--sp] ^ TOP */
1756
1757 static void
1758 ppc_emit_bit_xor (void)
1759 {
1760 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1761 "lwz " TMP_SECOND ", 4(30) \n"
1762 "xor 4, 6, 4 \n"
1763 "xor 3, 5, 3 \n");
1764 }
1765
1766 /* TOP = ~TOP
1767 i.e., TOP = ~(TOP | TOP) */
1768
1769 static void
1770 ppc_emit_bit_not (void)
1771 {
1772 EMIT_ASM ("nor 3, 3, 3 \n"
1773 "nor 4, 4, 4 \n");
1774 }
1775
1776 /* TOP = stack[--sp] == TOP */
1777
1778 static void
1779 ppc_emit_equal (void)
1780 {
1781 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1782 "lwz " TMP_SECOND ", 4(30) \n"
1783 "xor 4, 6, 4 \n"
1784 "xor 3, 5, 3 \n"
1785 "or 4, 3, 4 \n"
1786 "cntlzw 4, 4 \n"
1787 "srwi 4, 4, 5 \n"
1788 "li 3, 0 \n");
1789 }
1790
1791 /* TOP = stack[--sp] < TOP
1792 (Signed comparison) */
1793
1794 static void
1795 ppc_emit_less_signed (void)
1796 {
1797 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1798 "lwz " TMP_SECOND ", 4(30) \n"
1799 "cmplw 6, 6, 4 \n"
1800 "cmpw 7, 5, 3 \n"
1801 /* CR6 bit 0 = low less and high equal */
1802 "crand 6*4+0, 6*4+0, 7*4+2\n"
1803 /* CR7 bit 0 = (low less and high equal) or high less */
1804 "cror 7*4+0, 7*4+0, 6*4+0\n"
1805 "mfcr 4 \n"
1806 "rlwinm 4, 4, 29, 31, 31 \n"
1807 "li 3, 0 \n");
1808 }
1809
1810 /* TOP = stack[--sp] < TOP
1811 (Unsigned comparison) */
1812
1813 static void
1814 ppc_emit_less_unsigned (void)
1815 {
1816 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1817 "lwz " TMP_SECOND ", 4(30) \n"
1818 "cmplw 6, 6, 4 \n"
1819 "cmplw 7, 5, 3 \n"
1820 /* CR6 bit 0 = low less and high equal */
1821 "crand 6*4+0, 6*4+0, 7*4+2\n"
1822 /* CR7 bit 0 = (low less and high equal) or high less */
1823 "cror 7*4+0, 7*4+0, 6*4+0\n"
1824 "mfcr 4 \n"
1825 "rlwinm 4, 4, 29, 31, 31 \n"
1826 "li 3, 0 \n");
1827 }
1828
1829 /* Access the memory address in TOP in size of SIZE.
1830 Zero-extend the read value. */
1831
1832 static void
1833 ppc_emit_ref (int size)
1834 {
1835 switch (size)
1836 {
1837 case 1:
1838 EMIT_ASM ("lbz 4, 0(4)\n"
1839 "li 3, 0");
1840 break;
1841 case 2:
1842 EMIT_ASM ("lhz 4, 0(4)\n"
1843 "li 3, 0");
1844 break;
1845 case 4:
1846 EMIT_ASM ("lwz 4, 0(4)\n"
1847 "li 3, 0");
1848 break;
1849 case 8:
1850 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1851 EMIT_ASM ("lwz 3, 4(4)\n"
1852 "lwz 4, 0(4)");
1853 else
1854 EMIT_ASM ("lwz 3, 0(4)\n"
1855 "lwz 4, 4(4)");
1856 break;
1857 }
1858 }
1859
1860 /* TOP = NUM */
1861
1862 static void
1863 ppc_emit_const (LONGEST num)
1864 {
1865 uint32_t buf[10];
1866 uint32_t *p = buf;
1867
1868 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
1869 p += gen_limm (p, 4, num & 0xffffffff, 0);
1870
1871 emit_insns (buf, p - buf);
1872 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1873 }
1874
1875 /* Set TOP to the value of register REG by calling get_raw_reg function
1876 with two argument, collected buffer and register number. */
1877
1878 static void
1879 ppc_emit_reg (int reg)
1880 {
1881 uint32_t buf[13];
1882 uint32_t *p = buf;
1883
1884 /* fctx->regs is passed in r3 and then saved in -16(31). */
1885 p += GEN_LWZ (p, 3, 31, -16);
1886 p += GEN_LI (p, 4, reg); /* li r4, reg */
1887 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
1888
1889 emit_insns (buf, p - buf);
1890 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1891
1892 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1893 {
1894 EMIT_ASM ("mr 5, 4\n"
1895 "mr 4, 3\n"
1896 "mr 3, 5\n");
1897 }
1898 }
1899
1900 /* TOP = stack[--sp] */
1901
1902 static void
1903 ppc_emit_pop (void)
1904 {
1905 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
1906 "lwz " TOP_SECOND ", 4(30) \n");
1907 }
1908
1909 /* stack[sp++] = TOP
1910
1911 Because we may use up bytecode stack, expand 8 doublewords more
1912 if needed. */
1913
1914 static void
1915 ppc_emit_stack_flush (void)
1916 {
1917 /* Make sure bytecode stack is big enough before push.
1918 Otherwise, expand 64-byte more. */
1919
1920 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
1921 " stw " TOP_SECOND ", 4(30)\n"
1922 " addi 5, 30, -(8 + 8) \n"
1923 " cmpw 7, 5, 1 \n"
1924 " bgt 7, 1f \n"
1925 " stwu 31, -64(1) \n"
1926 "1:addi 30, 30, -8 \n");
1927 }
1928
1929 /* Swap TOP and stack[sp-1] */
1930
1931 static void
1932 ppc_emit_swap (void)
1933 {
1934 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
1935 "lwz " TMP_SECOND ", 12(30) \n"
1936 "stw " TOP_FIRST ", 8(30) \n"
1937 "stw " TOP_SECOND ", 12(30) \n"
1938 "mr 3, 5 \n"
1939 "mr 4, 6 \n");
1940 }
1941
1942 /* Discard N elements in the stack. Also used for ppc64. */
1943
1944 static void
1945 ppc_emit_stack_adjust (int n)
1946 {
1947 uint32_t buf[6];
1948 uint32_t *p = buf;
1949
1950 n = n << 3;
1951 if ((n >> 15) != 0)
1952 {
1953 emit_error = 1;
1954 return;
1955 }
1956
1957 p += GEN_ADDI (p, 30, 30, n);
1958
1959 emit_insns (buf, p - buf);
1960 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1961 }
1962
1963 /* Call function FN. */
1964
1965 static void
1966 ppc_emit_call (CORE_ADDR fn)
1967 {
1968 uint32_t buf[11];
1969 uint32_t *p = buf;
1970
1971 p += gen_call (p, fn, 0, 0);
1972
1973 emit_insns (buf, p - buf);
1974 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1975 }
1976
1977 /* FN's prototype is `LONGEST(*fn)(int)'.
1978 TOP = fn (arg1)
1979 */
1980
1981 static void
1982 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
1983 {
1984 uint32_t buf[15];
1985 uint32_t *p = buf;
1986
1987 /* Setup argument. arg1 is a 16-bit value. */
1988 p += gen_limm (p, 3, (uint32_t) arg1, 0);
1989 p += gen_call (p, fn, 0, 0);
1990
1991 emit_insns (buf, p - buf);
1992 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1993
1994 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1995 {
1996 EMIT_ASM ("mr 5, 4\n"
1997 "mr 4, 3\n"
1998 "mr 3, 5\n");
1999 }
2000 }
2001
2002 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2003 fn (arg1, TOP)
2004
2005 TOP should be preserved/restored before/after the call. */
2006
2007 static void
2008 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2009 {
2010 uint32_t buf[21];
2011 uint32_t *p = buf;
2012
2013 /* Save TOP. 0(30) is next-empty. */
2014 p += GEN_STW (p, 3, 30, 0);
2015 p += GEN_STW (p, 4, 30, 4);
2016
2017 /* Setup argument. arg1 is a 16-bit value. */
2018 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2019 {
2020 p += GEN_MR (p, 5, 4);
2021 p += GEN_MR (p, 6, 3);
2022 }
2023 else
2024 {
2025 p += GEN_MR (p, 5, 3);
2026 p += GEN_MR (p, 6, 4);
2027 }
2028 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2029 p += gen_call (p, fn, 0, 0);
2030
2031 /* Restore TOP */
2032 p += GEN_LWZ (p, 3, 30, 0);
2033 p += GEN_LWZ (p, 4, 30, 4);
2034
2035 emit_insns (buf, p - buf);
2036 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2037 }
2038
2039 /* Note in the following goto ops:
2040
2041 When emitting goto, the target address is later relocated by
2042 write_goto_address. OFFSET_P is the offset of the branch instruction
2043 in the code sequence, and SIZE_P is how to relocate the instruction,
2044 recognized by ppc_write_goto_address. In current implementation,
2045 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2046 */
2047
2048 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2049
2050 static void
2051 ppc_emit_if_goto (int *offset_p, int *size_p)
2052 {
2053 EMIT_ASM ("or. 3, 3, 4 \n"
2054 "lwzu " TOP_FIRST ", 8(30) \n"
2055 "lwz " TOP_SECOND ", 4(30) \n"
2056 "1:bne 0, 1b \n");
2057
2058 if (offset_p)
2059 *offset_p = 12;
2060 if (size_p)
2061 *size_p = 14;
2062 }
2063
2064 /* Unconditional goto. Also used for ppc64. */
2065
2066 static void
2067 ppc_emit_goto (int *offset_p, int *size_p)
2068 {
2069 EMIT_ASM ("1:b 1b");
2070
2071 if (offset_p)
2072 *offset_p = 0;
2073 if (size_p)
2074 *size_p = 24;
2075 }
2076
2077 /* Goto if stack[--sp] == TOP */
2078
2079 static void
2080 ppc_emit_eq_goto (int *offset_p, int *size_p)
2081 {
2082 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2083 "lwz " TMP_SECOND ", 4(30) \n"
2084 "xor 4, 6, 4 \n"
2085 "xor 3, 5, 3 \n"
2086 "or. 3, 3, 4 \n"
2087 "lwzu " TOP_FIRST ", 8(30) \n"
2088 "lwz " TOP_SECOND ", 4(30) \n"
2089 "1:beq 0, 1b \n");
2090
2091 if (offset_p)
2092 *offset_p = 28;
2093 if (size_p)
2094 *size_p = 14;
2095 }
2096
2097 /* Goto if stack[--sp] != TOP */
2098
2099 static void
2100 ppc_emit_ne_goto (int *offset_p, int *size_p)
2101 {
2102 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2103 "lwz " TMP_SECOND ", 4(30) \n"
2104 "xor 4, 6, 4 \n"
2105 "xor 3, 5, 3 \n"
2106 "or. 3, 3, 4 \n"
2107 "lwzu " TOP_FIRST ", 8(30) \n"
2108 "lwz " TOP_SECOND ", 4(30) \n"
2109 "1:bne 0, 1b \n");
2110
2111 if (offset_p)
2112 *offset_p = 28;
2113 if (size_p)
2114 *size_p = 14;
2115 }
2116
2117 /* Goto if stack[--sp] < TOP */
2118
2119 static void
2120 ppc_emit_lt_goto (int *offset_p, int *size_p)
2121 {
2122 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2123 "lwz " TMP_SECOND ", 4(30) \n"
2124 "cmplw 6, 6, 4 \n"
2125 "cmpw 7, 5, 3 \n"
2126 /* CR6 bit 0 = low less and high equal */
2127 "crand 6*4+0, 6*4+0, 7*4+2\n"
2128 /* CR7 bit 0 = (low less and high equal) or high less */
2129 "cror 7*4+0, 7*4+0, 6*4+0\n"
2130 "lwzu " TOP_FIRST ", 8(30) \n"
2131 "lwz " TOP_SECOND ", 4(30)\n"
2132 "1:blt 7, 1b \n");
2133
2134 if (offset_p)
2135 *offset_p = 32;
2136 if (size_p)
2137 *size_p = 14;
2138 }
2139
2140 /* Goto if stack[--sp] <= TOP */
2141
2142 static void
2143 ppc_emit_le_goto (int *offset_p, int *size_p)
2144 {
2145 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2146 "lwz " TMP_SECOND ", 4(30) \n"
2147 "cmplw 6, 6, 4 \n"
2148 "cmpw 7, 5, 3 \n"
2149 /* CR6 bit 0 = low less/equal and high equal */
2150 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2151 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2152 "cror 7*4+0, 7*4+0, 6*4+0\n"
2153 "lwzu " TOP_FIRST ", 8(30) \n"
2154 "lwz " TOP_SECOND ", 4(30)\n"
2155 "1:blt 7, 1b \n");
2156
2157 if (offset_p)
2158 *offset_p = 32;
2159 if (size_p)
2160 *size_p = 14;
2161 }
2162
2163 /* Goto if stack[--sp] > TOP */
2164
2165 static void
2166 ppc_emit_gt_goto (int *offset_p, int *size_p)
2167 {
2168 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2169 "lwz " TMP_SECOND ", 4(30) \n"
2170 "cmplw 6, 6, 4 \n"
2171 "cmpw 7, 5, 3 \n"
2172 /* CR6 bit 0 = low greater and high equal */
2173 "crand 6*4+0, 6*4+1, 7*4+2\n"
2174 /* CR7 bit 0 = (low greater and high equal) or high greater */
2175 "cror 7*4+0, 7*4+1, 6*4+0\n"
2176 "lwzu " TOP_FIRST ", 8(30) \n"
2177 "lwz " TOP_SECOND ", 4(30)\n"
2178 "1:blt 7, 1b \n");
2179
2180 if (offset_p)
2181 *offset_p = 32;
2182 if (size_p)
2183 *size_p = 14;
2184 }
2185
2186 /* Goto if stack[--sp] >= TOP */
2187
2188 static void
2189 ppc_emit_ge_goto (int *offset_p, int *size_p)
2190 {
2191 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2192 "lwz " TMP_SECOND ", 4(30) \n"
2193 "cmplw 6, 6, 4 \n"
2194 "cmpw 7, 5, 3 \n"
2195 /* CR6 bit 0 = low ge and high equal */
2196 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2197 /* CR7 bit 0 = (low ge and high equal) or high greater */
2198 "cror 7*4+0, 7*4+1, 6*4+0\n"
2199 "lwzu " TOP_FIRST ", 8(30)\n"
2200 "lwz " TOP_SECOND ", 4(30)\n"
2201 "1:blt 7, 1b \n");
2202
2203 if (offset_p)
2204 *offset_p = 32;
2205 if (size_p)
2206 *size_p = 14;
2207 }
2208
2209 /* Relocate previous emitted branch instruction. FROM is the address
2210 of the branch instruction, TO is the goto target address, and SIZE
2211 if the value we set by *SIZE_P before. Currently, it is either
2212 24 or 14 of branch and conditional-branch instruction.
2213 Also used for ppc64. */
2214
2215 static void
2216 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2217 {
2218 long rel = to - from;
2219 uint32_t insn;
2220 int opcd;
2221
2222 read_inferior_memory (from, (unsigned char *) &insn, 4);
2223 opcd = (insn >> 26) & 0x3f;
2224
2225 switch (size)
2226 {
2227 case 14:
2228 if (opcd != 16
2229 || (rel >= (1 << 15) || rel < -(1 << 15)))
2230 emit_error = 1;
2231 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2232 break;
2233 case 24:
2234 if (opcd != 18
2235 || (rel >= (1 << 25) || rel < -(1 << 25)))
2236 emit_error = 1;
2237 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2238 break;
2239 default:
2240 emit_error = 1;
2241 }
2242
2243 if (!emit_error)
2244 write_inferior_memory (from, (unsigned char *) &insn, 4);
2245 }
2246
2247 /* Table of emit ops for 32-bit. */
2248
2249 static struct emit_ops ppc_emit_ops_impl =
2250 {
2251 ppc_emit_prologue,
2252 ppc_emit_epilogue,
2253 ppc_emit_add,
2254 ppc_emit_sub,
2255 ppc_emit_mul,
2256 ppc_emit_lsh,
2257 ppc_emit_rsh_signed,
2258 ppc_emit_rsh_unsigned,
2259 ppc_emit_ext,
2260 ppc_emit_log_not,
2261 ppc_emit_bit_and,
2262 ppc_emit_bit_or,
2263 ppc_emit_bit_xor,
2264 ppc_emit_bit_not,
2265 ppc_emit_equal,
2266 ppc_emit_less_signed,
2267 ppc_emit_less_unsigned,
2268 ppc_emit_ref,
2269 ppc_emit_if_goto,
2270 ppc_emit_goto,
2271 ppc_write_goto_address,
2272 ppc_emit_const,
2273 ppc_emit_call,
2274 ppc_emit_reg,
2275 ppc_emit_pop,
2276 ppc_emit_stack_flush,
2277 ppc_emit_zero_ext,
2278 ppc_emit_swap,
2279 ppc_emit_stack_adjust,
2280 ppc_emit_int_call_1,
2281 ppc_emit_void_call_2,
2282 ppc_emit_eq_goto,
2283 ppc_emit_ne_goto,
2284 ppc_emit_lt_goto,
2285 ppc_emit_le_goto,
2286 ppc_emit_gt_goto,
2287 ppc_emit_ge_goto
2288 };
2289
2290 #ifdef __powerpc64__
2291
2292 /*
2293
2294 Bytecode execution stack frame - 64-bit
2295
2296 | LR save area (SP + 16)
2297 | CR save area (SP + 8)
2298 SP' -> +- Back chain (SP + 0)
2299 | Save r31 for access saved arguments
2300 | Save r30 for bytecode stack pointer
2301 | Save r4 for incoming argument *value
2302 | Save r3 for incoming argument regs
2303 r30 -> +- Bytecode execution stack
2304 |
2305 | 64-byte (8 doublewords) at initial.
2306 | Expand stack as needed.
2307 |
2308 +-
2309 | Some padding for minimum stack frame.
2310 | 112 for ELFv1.
2311 SP +- Back-chain (SP')
2312
2313 initial frame size
2314 = 112 + (4 * 8) + 64
2315 = 208
2316
2317 r30 is the stack-pointer for bytecode machine.
2318 It should point to next-empty, so we can use LDU for pop.
2319 r3 is used for cache of TOP value.
2320 It was the first argument, pointer to regs.
2321 r4 is the second argument, pointer to the result.
2322 We should set *result = TOP after leaving this function.
2323
2324 Note:
2325 * To restore stack at epilogue
2326 => sp = r31
2327 * To check stack is big enough for bytecode execution.
2328 => r30 - 8 > SP + 112
2329 * To return execution result.
2330 => 0(r4) = TOP
2331
2332 */
2333
2334 /* Emit prologue in inferior memory. See above comments. */
2335
2336 static void
2337 ppc64v1_emit_prologue (void)
2338 {
2339 /* On ELFv1, function pointers really point to function descriptor,
2340 so emit one here. We don't care about contents of words 1 and 2,
2341 so let them just overlap out code. */
2342 uint64_t opd = current_insn_ptr + 8;
2343 uint32_t buf[2];
2344
2345 /* Mind the strict aliasing rules. */
2346 memcpy (buf, &opd, sizeof buf);
2347 emit_insns(buf, 2);
2348 EMIT_ASM (/* Save return address. */
2349 "mflr 0 \n"
2350 "std 0, 16(1) \n"
2351 /* Save r30 and incoming arguments. */
2352 "std 31, -8(1) \n"
2353 "std 30, -16(1) \n"
2354 "std 4, -24(1) \n"
2355 "std 3, -32(1) \n"
2356 /* Point r31 to current r1 for access arguments. */
2357 "mr 31, 1 \n"
2358 /* Adjust SP. 208 is the initial frame size. */
2359 "stdu 1, -208(1) \n"
2360 /* Set r30 to pointing stack-top. */
2361 "addi 30, 1, 168 \n"
2362 /* Initial r3/TOP to 0. */
2363 "li 3, 0 \n");
2364 }
2365
2366 /* Emit prologue in inferior memory. See above comments. */
2367
2368 static void
2369 ppc64v2_emit_prologue (void)
2370 {
2371 EMIT_ASM (/* Save return address. */
2372 "mflr 0 \n"
2373 "std 0, 16(1) \n"
2374 /* Save r30 and incoming arguments. */
2375 "std 31, -8(1) \n"
2376 "std 30, -16(1) \n"
2377 "std 4, -24(1) \n"
2378 "std 3, -32(1) \n"
2379 /* Point r31 to current r1 for access arguments. */
2380 "mr 31, 1 \n"
2381 /* Adjust SP. 208 is the initial frame size. */
2382 "stdu 1, -208(1) \n"
2383 /* Set r30 to pointing stack-top. */
2384 "addi 30, 1, 168 \n"
2385 /* Initial r3/TOP to 0. */
2386 "li 3, 0 \n");
2387 }
2388
2389 /* Emit epilogue in inferior memory. See above comments. */
2390
2391 static void
2392 ppc64_emit_epilogue (void)
2393 {
2394 EMIT_ASM (/* Restore SP. */
2395 "ld 1, 0(1) \n"
2396 /* *result = TOP */
2397 "ld 4, -24(1) \n"
2398 "std 3, 0(4) \n"
2399 /* Restore registers. */
2400 "ld 31, -8(1) \n"
2401 "ld 30, -16(1) \n"
2402 /* Restore LR. */
2403 "ld 0, 16(1) \n"
2404 /* Return 0 for no-error. */
2405 "li 3, 0 \n"
2406 "mtlr 0 \n"
2407 "blr \n");
2408 }
2409
2410 /* TOP = stack[--sp] + TOP */
2411
2412 static void
2413 ppc64_emit_add (void)
2414 {
2415 EMIT_ASM ("ldu 4, 8(30) \n"
2416 "add 3, 4, 3 \n");
2417 }
2418
2419 /* TOP = stack[--sp] - TOP */
2420
2421 static void
2422 ppc64_emit_sub (void)
2423 {
2424 EMIT_ASM ("ldu 4, 8(30) \n"
2425 "sub 3, 4, 3 \n");
2426 }
2427
2428 /* TOP = stack[--sp] * TOP */
2429
2430 static void
2431 ppc64_emit_mul (void)
2432 {
2433 EMIT_ASM ("ldu 4, 8(30) \n"
2434 "mulld 3, 4, 3 \n");
2435 }
2436
2437 /* TOP = stack[--sp] << TOP */
2438
2439 static void
2440 ppc64_emit_lsh (void)
2441 {
2442 EMIT_ASM ("ldu 4, 8(30) \n"
2443 "sld 3, 4, 3 \n");
2444 }
2445
2446 /* Top = stack[--sp] >> TOP
2447 (Arithmetic shift right) */
2448
2449 static void
2450 ppc64_emit_rsh_signed (void)
2451 {
2452 EMIT_ASM ("ldu 4, 8(30) \n"
2453 "srad 3, 4, 3 \n");
2454 }
2455
2456 /* Top = stack[--sp] >> TOP
2457 (Logical shift right) */
2458
2459 static void
2460 ppc64_emit_rsh_unsigned (void)
2461 {
2462 EMIT_ASM ("ldu 4, 8(30) \n"
2463 "srd 3, 4, 3 \n");
2464 }
2465
2466 /* Emit code for signed-extension specified by ARG. */
2467
2468 static void
2469 ppc64_emit_ext (int arg)
2470 {
2471 switch (arg)
2472 {
2473 case 8:
2474 EMIT_ASM ("extsb 3, 3");
2475 break;
2476 case 16:
2477 EMIT_ASM ("extsh 3, 3");
2478 break;
2479 case 32:
2480 EMIT_ASM ("extsw 3, 3");
2481 break;
2482 default:
2483 emit_error = 1;
2484 }
2485 }
2486
2487 /* Emit code for zero-extension specified by ARG. */
2488
2489 static void
2490 ppc64_emit_zero_ext (int arg)
2491 {
2492 switch (arg)
2493 {
2494 case 8:
2495 EMIT_ASM ("rldicl 3,3,0,56");
2496 break;
2497 case 16:
2498 EMIT_ASM ("rldicl 3,3,0,48");
2499 break;
2500 case 32:
2501 EMIT_ASM ("rldicl 3,3,0,32");
2502 break;
2503 default:
2504 emit_error = 1;
2505 }
2506 }
2507
2508 /* TOP = !TOP
2509 i.e., TOP = (TOP == 0) ? 1 : 0; */
2510
2511 static void
2512 ppc64_emit_log_not (void)
2513 {
2514 EMIT_ASM ("cntlzd 3, 3 \n"
2515 "srdi 3, 3, 6 \n");
2516 }
2517
2518 /* TOP = stack[--sp] & TOP */
2519
2520 static void
2521 ppc64_emit_bit_and (void)
2522 {
2523 EMIT_ASM ("ldu 4, 8(30) \n"
2524 "and 3, 4, 3 \n");
2525 }
2526
2527 /* TOP = stack[--sp] | TOP */
2528
2529 static void
2530 ppc64_emit_bit_or (void)
2531 {
2532 EMIT_ASM ("ldu 4, 8(30) \n"
2533 "or 3, 4, 3 \n");
2534 }
2535
2536 /* TOP = stack[--sp] ^ TOP */
2537
2538 static void
2539 ppc64_emit_bit_xor (void)
2540 {
2541 EMIT_ASM ("ldu 4, 8(30) \n"
2542 "xor 3, 4, 3 \n");
2543 }
2544
2545 /* TOP = ~TOP
2546 i.e., TOP = ~(TOP | TOP) */
2547
2548 static void
2549 ppc64_emit_bit_not (void)
2550 {
2551 EMIT_ASM ("nor 3, 3, 3 \n");
2552 }
2553
2554 /* TOP = stack[--sp] == TOP */
2555
2556 static void
2557 ppc64_emit_equal (void)
2558 {
2559 EMIT_ASM ("ldu 4, 8(30) \n"
2560 "xor 3, 3, 4 \n"
2561 "cntlzd 3, 3 \n"
2562 "srdi 3, 3, 6 \n");
2563 }
2564
2565 /* TOP = stack[--sp] < TOP
2566 (Signed comparison) */
2567
2568 static void
2569 ppc64_emit_less_signed (void)
2570 {
2571 EMIT_ASM ("ldu 4, 8(30) \n"
2572 "cmpd 7, 4, 3 \n"
2573 "mfcr 3 \n"
2574 "rlwinm 3, 3, 29, 31, 31 \n");
2575 }
2576
2577 /* TOP = stack[--sp] < TOP
2578 (Unsigned comparison) */
2579
2580 static void
2581 ppc64_emit_less_unsigned (void)
2582 {
2583 EMIT_ASM ("ldu 4, 8(30) \n"
2584 "cmpld 7, 4, 3 \n"
2585 "mfcr 3 \n"
2586 "rlwinm 3, 3, 29, 31, 31 \n");
2587 }
2588
2589 /* Access the memory address in TOP in size of SIZE.
2590 Zero-extend the read value. */
2591
2592 static void
2593 ppc64_emit_ref (int size)
2594 {
2595 switch (size)
2596 {
2597 case 1:
2598 EMIT_ASM ("lbz 3, 0(3)");
2599 break;
2600 case 2:
2601 EMIT_ASM ("lhz 3, 0(3)");
2602 break;
2603 case 4:
2604 EMIT_ASM ("lwz 3, 0(3)");
2605 break;
2606 case 8:
2607 EMIT_ASM ("ld 3, 0(3)");
2608 break;
2609 }
2610 }
2611
2612 /* TOP = NUM */
2613
2614 static void
2615 ppc64_emit_const (LONGEST num)
2616 {
2617 uint32_t buf[5];
2618 uint32_t *p = buf;
2619
2620 p += gen_limm (p, 3, num, 1);
2621
2622 emit_insns (buf, p - buf);
2623 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2624 }
2625
2626 /* Set TOP to the value of register REG by calling get_raw_reg function
2627 with two argument, collected buffer and register number. */
2628
2629 static void
2630 ppc64v1_emit_reg (int reg)
2631 {
2632 uint32_t buf[15];
2633 uint32_t *p = buf;
2634
2635 /* fctx->regs is passed in r3 and then saved in 176(1). */
2636 p += GEN_LD (p, 3, 31, -32);
2637 p += GEN_LI (p, 4, reg);
2638 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2639 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2640 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2641
2642 emit_insns (buf, p - buf);
2643 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2644 }
2645
2646 /* Likewise, for ELFv2. */
2647
2648 static void
2649 ppc64v2_emit_reg (int reg)
2650 {
2651 uint32_t buf[12];
2652 uint32_t *p = buf;
2653
2654 /* fctx->regs is passed in r3 and then saved in 176(1). */
2655 p += GEN_LD (p, 3, 31, -32);
2656 p += GEN_LI (p, 4, reg);
2657 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2658 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2659 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2660
2661 emit_insns (buf, p - buf);
2662 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2663 }
2664
2665 /* TOP = stack[--sp] */
2666
2667 static void
2668 ppc64_emit_pop (void)
2669 {
2670 EMIT_ASM ("ldu 3, 8(30)");
2671 }
2672
2673 /* stack[sp++] = TOP
2674
2675 Because we may use up bytecode stack, expand 8 doublewords more
2676 if needed. */
2677
2678 static void
2679 ppc64_emit_stack_flush (void)
2680 {
2681 /* Make sure bytecode stack is big enough before push.
2682 Otherwise, expand 64-byte more. */
2683
2684 EMIT_ASM (" std 3, 0(30) \n"
2685 " addi 4, 30, -(112 + 8) \n"
2686 " cmpd 7, 4, 1 \n"
2687 " bgt 7, 1f \n"
2688 " stdu 31, -64(1) \n"
2689 "1:addi 30, 30, -8 \n");
2690 }
2691
2692 /* Swap TOP and stack[sp-1] */
2693
2694 static void
2695 ppc64_emit_swap (void)
2696 {
2697 EMIT_ASM ("ld 4, 8(30) \n"
2698 "std 3, 8(30) \n"
2699 "mr 3, 4 \n");
2700 }
2701
2702 /* Call function FN - ELFv1. */
2703
2704 static void
2705 ppc64v1_emit_call (CORE_ADDR fn)
2706 {
2707 uint32_t buf[13];
2708 uint32_t *p = buf;
2709
2710 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2711 p += gen_call (p, fn, 1, 1);
2712 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2713
2714 emit_insns (buf, p - buf);
2715 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2716 }
2717
2718 /* Call function FN - ELFv2. */
2719
2720 static void
2721 ppc64v2_emit_call (CORE_ADDR fn)
2722 {
2723 uint32_t buf[10];
2724 uint32_t *p = buf;
2725
2726 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2727 p += gen_call (p, fn, 1, 0);
2728 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2729
2730 emit_insns (buf, p - buf);
2731 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2732 }
2733
2734 /* FN's prototype is `LONGEST(*fn)(int)'.
2735 TOP = fn (arg1)
2736 */
2737
2738 static void
2739 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
2740 {
2741 uint32_t buf[13];
2742 uint32_t *p = buf;
2743
2744 /* Setup argument. arg1 is a 16-bit value. */
2745 p += gen_limm (p, 3, arg1, 1);
2746 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2747 p += gen_call (p, fn, 1, 1);
2748 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2749
2750 emit_insns (buf, p - buf);
2751 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2752 }
2753
2754 /* Likewise for ELFv2. */
2755
2756 static void
2757 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
2758 {
2759 uint32_t buf[10];
2760 uint32_t *p = buf;
2761
2762 /* Setup argument. arg1 is a 16-bit value. */
2763 p += gen_limm (p, 3, arg1, 1);
2764 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2765 p += gen_call (p, fn, 1, 0);
2766 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2767
2768 emit_insns (buf, p - buf);
2769 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2770 }
2771
2772 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2773 fn (arg1, TOP)
2774
2775 TOP should be preserved/restored before/after the call. */
2776
2777 static void
2778 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
2779 {
2780 uint32_t buf[17];
2781 uint32_t *p = buf;
2782
2783 /* Save TOP. 0(30) is next-empty. */
2784 p += GEN_STD (p, 3, 30, 0);
2785
2786 /* Setup argument. arg1 is a 16-bit value. */
2787 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2788 p += gen_limm (p, 3, arg1, 1);
2789 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2790 p += gen_call (p, fn, 1, 1);
2791 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2792
2793 /* Restore TOP */
2794 p += GEN_LD (p, 3, 30, 0);
2795
2796 emit_insns (buf, p - buf);
2797 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2798 }
2799
2800 /* Likewise for ELFv2. */
2801
2802 static void
2803 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
2804 {
2805 uint32_t buf[14];
2806 uint32_t *p = buf;
2807
2808 /* Save TOP. 0(30) is next-empty. */
2809 p += GEN_STD (p, 3, 30, 0);
2810
2811 /* Setup argument. arg1 is a 16-bit value. */
2812 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2813 p += gen_limm (p, 3, arg1, 1);
2814 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2815 p += gen_call (p, fn, 1, 0);
2816 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2817
2818 /* Restore TOP */
2819 p += GEN_LD (p, 3, 30, 0);
2820
2821 emit_insns (buf, p - buf);
2822 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2823 }
2824
2825 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2826
2827 static void
2828 ppc64_emit_if_goto (int *offset_p, int *size_p)
2829 {
2830 EMIT_ASM ("cmpdi 7, 3, 0 \n"
2831 "ldu 3, 8(30) \n"
2832 "1:bne 7, 1b \n");
2833
2834 if (offset_p)
2835 *offset_p = 8;
2836 if (size_p)
2837 *size_p = 14;
2838 }
2839
2840 /* Goto if stack[--sp] == TOP */
2841
2842 static void
2843 ppc64_emit_eq_goto (int *offset_p, int *size_p)
2844 {
2845 EMIT_ASM ("ldu 4, 8(30) \n"
2846 "cmpd 7, 4, 3 \n"
2847 "ldu 3, 8(30) \n"
2848 "1:beq 7, 1b \n");
2849
2850 if (offset_p)
2851 *offset_p = 12;
2852 if (size_p)
2853 *size_p = 14;
2854 }
2855
2856 /* Goto if stack[--sp] != TOP */
2857
2858 static void
2859 ppc64_emit_ne_goto (int *offset_p, int *size_p)
2860 {
2861 EMIT_ASM ("ldu 4, 8(30) \n"
2862 "cmpd 7, 4, 3 \n"
2863 "ldu 3, 8(30) \n"
2864 "1:bne 7, 1b \n");
2865
2866 if (offset_p)
2867 *offset_p = 12;
2868 if (size_p)
2869 *size_p = 14;
2870 }
2871
2872 /* Goto if stack[--sp] < TOP */
2873
2874 static void
2875 ppc64_emit_lt_goto (int *offset_p, int *size_p)
2876 {
2877 EMIT_ASM ("ldu 4, 8(30) \n"
2878 "cmpd 7, 4, 3 \n"
2879 "ldu 3, 8(30) \n"
2880 "1:blt 7, 1b \n");
2881
2882 if (offset_p)
2883 *offset_p = 12;
2884 if (size_p)
2885 *size_p = 14;
2886 }
2887
2888 /* Goto if stack[--sp] <= TOP */
2889
2890 static void
2891 ppc64_emit_le_goto (int *offset_p, int *size_p)
2892 {
2893 EMIT_ASM ("ldu 4, 8(30) \n"
2894 "cmpd 7, 4, 3 \n"
2895 "ldu 3, 8(30) \n"
2896 "1:ble 7, 1b \n");
2897
2898 if (offset_p)
2899 *offset_p = 12;
2900 if (size_p)
2901 *size_p = 14;
2902 }
2903
2904 /* Goto if stack[--sp] > TOP */
2905
2906 static void
2907 ppc64_emit_gt_goto (int *offset_p, int *size_p)
2908 {
2909 EMIT_ASM ("ldu 4, 8(30) \n"
2910 "cmpd 7, 4, 3 \n"
2911 "ldu 3, 8(30) \n"
2912 "1:bgt 7, 1b \n");
2913
2914 if (offset_p)
2915 *offset_p = 12;
2916 if (size_p)
2917 *size_p = 14;
2918 }
2919
2920 /* Goto if stack[--sp] >= TOP */
2921
2922 static void
2923 ppc64_emit_ge_goto (int *offset_p, int *size_p)
2924 {
2925 EMIT_ASM ("ldu 4, 8(30) \n"
2926 "cmpd 7, 4, 3 \n"
2927 "ldu 3, 8(30) \n"
2928 "1:bge 7, 1b \n");
2929
2930 if (offset_p)
2931 *offset_p = 12;
2932 if (size_p)
2933 *size_p = 14;
2934 }
2935
2936 /* Table of emit ops for 64-bit ELFv1. */
2937
2938 static struct emit_ops ppc64v1_emit_ops_impl =
2939 {
2940 ppc64v1_emit_prologue,
2941 ppc64_emit_epilogue,
2942 ppc64_emit_add,
2943 ppc64_emit_sub,
2944 ppc64_emit_mul,
2945 ppc64_emit_lsh,
2946 ppc64_emit_rsh_signed,
2947 ppc64_emit_rsh_unsigned,
2948 ppc64_emit_ext,
2949 ppc64_emit_log_not,
2950 ppc64_emit_bit_and,
2951 ppc64_emit_bit_or,
2952 ppc64_emit_bit_xor,
2953 ppc64_emit_bit_not,
2954 ppc64_emit_equal,
2955 ppc64_emit_less_signed,
2956 ppc64_emit_less_unsigned,
2957 ppc64_emit_ref,
2958 ppc64_emit_if_goto,
2959 ppc_emit_goto,
2960 ppc_write_goto_address,
2961 ppc64_emit_const,
2962 ppc64v1_emit_call,
2963 ppc64v1_emit_reg,
2964 ppc64_emit_pop,
2965 ppc64_emit_stack_flush,
2966 ppc64_emit_zero_ext,
2967 ppc64_emit_swap,
2968 ppc_emit_stack_adjust,
2969 ppc64v1_emit_int_call_1,
2970 ppc64v1_emit_void_call_2,
2971 ppc64_emit_eq_goto,
2972 ppc64_emit_ne_goto,
2973 ppc64_emit_lt_goto,
2974 ppc64_emit_le_goto,
2975 ppc64_emit_gt_goto,
2976 ppc64_emit_ge_goto
2977 };
2978
2979 /* Table of emit ops for 64-bit ELFv2. */
2980
2981 static struct emit_ops ppc64v2_emit_ops_impl =
2982 {
2983 ppc64v2_emit_prologue,
2984 ppc64_emit_epilogue,
2985 ppc64_emit_add,
2986 ppc64_emit_sub,
2987 ppc64_emit_mul,
2988 ppc64_emit_lsh,
2989 ppc64_emit_rsh_signed,
2990 ppc64_emit_rsh_unsigned,
2991 ppc64_emit_ext,
2992 ppc64_emit_log_not,
2993 ppc64_emit_bit_and,
2994 ppc64_emit_bit_or,
2995 ppc64_emit_bit_xor,
2996 ppc64_emit_bit_not,
2997 ppc64_emit_equal,
2998 ppc64_emit_less_signed,
2999 ppc64_emit_less_unsigned,
3000 ppc64_emit_ref,
3001 ppc64_emit_if_goto,
3002 ppc_emit_goto,
3003 ppc_write_goto_address,
3004 ppc64_emit_const,
3005 ppc64v2_emit_call,
3006 ppc64v2_emit_reg,
3007 ppc64_emit_pop,
3008 ppc64_emit_stack_flush,
3009 ppc64_emit_zero_ext,
3010 ppc64_emit_swap,
3011 ppc_emit_stack_adjust,
3012 ppc64v2_emit_int_call_1,
3013 ppc64v2_emit_void_call_2,
3014 ppc64_emit_eq_goto,
3015 ppc64_emit_ne_goto,
3016 ppc64_emit_lt_goto,
3017 ppc64_emit_le_goto,
3018 ppc64_emit_gt_goto,
3019 ppc64_emit_ge_goto
3020 };
3021
3022 #endif
3023
3024 /* Implementation of linux_target_ops method "emit_ops". */
3025
3026 static struct emit_ops *
3027 ppc_emit_ops (void)
3028 {
3029 #ifdef __powerpc64__
3030 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3031
3032 if (register_size (regcache->tdesc, 0) == 8)
3033 {
3034 if (is_elfv2_inferior ())
3035 return &ppc64v2_emit_ops_impl;
3036 else
3037 return &ppc64v1_emit_ops_impl;
3038 }
3039 #endif
3040 return &ppc_emit_ops_impl;
3041 }
3042
3043 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3044
3045 static int
3046 ppc_get_ipa_tdesc_idx (void)
3047 {
3048 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3049 const struct target_desc *tdesc = regcache->tdesc;
3050
3051 #ifdef __powerpc64__
3052 if (tdesc == tdesc_powerpc_64l)
3053 return PPC_TDESC_BASE;
3054 if (tdesc == tdesc_powerpc_altivec64l)
3055 return PPC_TDESC_ALTIVEC;
3056 if (tdesc == tdesc_powerpc_cell64l)
3057 return PPC_TDESC_CELL;
3058 if (tdesc == tdesc_powerpc_vsx64l)
3059 return PPC_TDESC_VSX;
3060 if (tdesc == tdesc_powerpc_isa205_64l)
3061 return PPC_TDESC_ISA205;
3062 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3063 return PPC_TDESC_ISA205_ALTIVEC;
3064 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3065 return PPC_TDESC_ISA205_VSX;
3066 #endif
3067
3068 if (tdesc == tdesc_powerpc_32l)
3069 return PPC_TDESC_BASE;
3070 if (tdesc == tdesc_powerpc_altivec32l)
3071 return PPC_TDESC_ALTIVEC;
3072 if (tdesc == tdesc_powerpc_cell32l)
3073 return PPC_TDESC_CELL;
3074 if (tdesc == tdesc_powerpc_vsx32l)
3075 return PPC_TDESC_VSX;
3076 if (tdesc == tdesc_powerpc_isa205_32l)
3077 return PPC_TDESC_ISA205;
3078 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3079 return PPC_TDESC_ISA205_ALTIVEC;
3080 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3081 return PPC_TDESC_ISA205_VSX;
3082 if (tdesc == tdesc_powerpc_e500l)
3083 return PPC_TDESC_E500;
3084
3085 return 0;
3086 }
3087
3088 struct linux_target_ops the_low_target = {
3089 ppc_arch_setup,
3090 ppc_regs_info,
3091 ppc_cannot_fetch_register,
3092 ppc_cannot_store_register,
3093 NULL, /* fetch_register */
3094 ppc_get_pc,
3095 ppc_set_pc,
3096 NULL, /* breakpoint_kind_from_pc */
3097 ppc_sw_breakpoint_from_kind,
3098 NULL,
3099 0,
3100 ppc_breakpoint_at,
3101 ppc_supports_z_point_type,
3102 ppc_insert_point,
3103 ppc_remove_point,
3104 NULL,
3105 NULL,
3106 ppc_collect_ptrace_register,
3107 ppc_supply_ptrace_register,
3108 NULL, /* siginfo_fixup */
3109 NULL, /* new_process */
3110 NULL, /* delete_process */
3111 NULL, /* new_thread */
3112 NULL, /* delete_thread */
3113 NULL, /* new_fork */
3114 NULL, /* prepare_to_resume */
3115 NULL, /* process_qsupported */
3116 ppc_supports_tracepoints,
3117 ppc_get_thread_area,
3118 ppc_install_fast_tracepoint_jump_pad,
3119 ppc_emit_ops,
3120 ppc_get_min_fast_tracepoint_insn_len,
3121 NULL, /* supports_range_stepping */
3122 NULL, /* breakpoint_kind_from_current_state */
3123 ppc_supports_hardware_single_step,
3124 NULL, /* get_syscall_trapinfo */
3125 ppc_get_ipa_tdesc_idx,
3126 };
3127
3128 void
3129 initialize_low_arch (void)
3130 {
3131 /* Initialize the Linux target descriptions. */
3132
3133 init_registers_powerpc_32l ();
3134 init_registers_powerpc_altivec32l ();
3135 init_registers_powerpc_cell32l ();
3136 init_registers_powerpc_vsx32l ();
3137 init_registers_powerpc_isa205_32l ();
3138 init_registers_powerpc_isa205_altivec32l ();
3139 init_registers_powerpc_isa205_vsx32l ();
3140 init_registers_powerpc_e500l ();
3141 #if __powerpc64__
3142 init_registers_powerpc_64l ();
3143 init_registers_powerpc_altivec64l ();
3144 init_registers_powerpc_cell64l ();
3145 init_registers_powerpc_vsx64l ();
3146 init_registers_powerpc_isa205_64l ();
3147 init_registers_powerpc_isa205_altivec64l ();
3148 init_registers_powerpc_isa205_vsx64l ();
3149 #endif
3150
3151 initialize_regsets_info (&ppc_regsets_info);
3152 }
This page took 0.146573 seconds and 4 git commands to generate.