[PowerPC] Add support for HTM registers
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "linux-ppc-tdesc-init.h"
32 #include "ax.h"
33 #include "tracepoint.h"
34
35 #define PPC_FIELD(value, from, len) \
36 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
37 #define PPC_SEXT(v, bs) \
38 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
39 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
40 - ((CORE_ADDR) 1 << ((bs) - 1)))
41 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
42 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
43 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
44 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
45
46 /* Holds the AT_HWCAP auxv entry. */
47
48 static unsigned long ppc_hwcap;
49
50 /* Holds the AT_HWCAP2 auxv entry. */
51
52 static unsigned long ppc_hwcap2;
53
54
55 #define ppc_num_regs 73
56
57 #ifdef __powerpc64__
58 /* We use a constant for FPSCR instead of PT_FPSCR, because
59 many shipped PPC64 kernels had the wrong value in ptrace.h. */
60 static int ppc_regmap[] =
61 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
62 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
63 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
64 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
65 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
66 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
67 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
68 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
69 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
70 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
71 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
72 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
73 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
74 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
75 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
76 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
77 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
78 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
79 PT_ORIG_R3 * 8, PT_TRAP * 8 };
80 #else
81 /* Currently, don't check/send MQ. */
82 static int ppc_regmap[] =
83 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
84 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
85 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
86 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
87 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
88 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
89 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
90 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
91 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
92 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
93 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
94 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
95 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
96 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
97 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
98 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
99 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
100 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
101 PT_ORIG_R3 * 4, PT_TRAP * 4
102 };
103
104 static int ppc_regmap_e500[] =
105 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
106 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
107 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
108 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
109 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
110 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
111 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
112 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
113 -1, -1, -1, -1,
114 -1, -1, -1, -1,
115 -1, -1, -1, -1,
116 -1, -1, -1, -1,
117 -1, -1, -1, -1,
118 -1, -1, -1, -1,
119 -1, -1, -1, -1,
120 -1, -1, -1, -1,
121 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
122 PT_CTR * 4, PT_XER * 4, -1,
123 PT_ORIG_R3 * 4, PT_TRAP * 4
124 };
125 #endif
126
127 /* Check whether the kernel provides a register set with number
128 REGSET_ID of size REGSETSIZE for process/thread TID. */
129
130 static int
131 ppc_check_regset (int tid, int regset_id, int regsetsize)
132 {
133 void *buf = alloca (regsetsize);
134 struct iovec iov;
135
136 iov.iov_base = buf;
137 iov.iov_len = regsetsize;
138
139 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
140 || errno == ENODATA)
141 return 1;
142 return 0;
143 }
144
145 static int
146 ppc_cannot_store_register (int regno)
147 {
148 const struct target_desc *tdesc = current_process ()->tdesc;
149
150 #ifndef __powerpc64__
151 /* Some kernels do not allow us to store fpscr. */
152 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
153 && regno == find_regno (tdesc, "fpscr"))
154 return 2;
155 #endif
156
157 /* Some kernels do not allow us to store orig_r3 or trap. */
158 if (regno == find_regno (tdesc, "orig_r3")
159 || regno == find_regno (tdesc, "trap"))
160 return 2;
161
162 return 0;
163 }
164
165 static int
166 ppc_cannot_fetch_register (int regno)
167 {
168 return 0;
169 }
170
171 static void
172 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
173 {
174 memset (buf, 0, sizeof (long));
175
176 if (__BYTE_ORDER == __LITTLE_ENDIAN)
177 {
178 /* Little-endian values always sit at the left end of the buffer. */
179 collect_register (regcache, regno, buf);
180 }
181 else if (__BYTE_ORDER == __BIG_ENDIAN)
182 {
183 /* Big-endian values sit at the right end of the buffer. In case of
184 registers whose sizes are smaller than sizeof (long), we must use a
185 padding to access them correctly. */
186 int size = register_size (regcache->tdesc, regno);
187
188 if (size < sizeof (long))
189 collect_register (regcache, regno, buf + sizeof (long) - size);
190 else
191 collect_register (regcache, regno, buf);
192 }
193 else
194 perror_with_name ("Unexpected byte order");
195 }
196
197 static void
198 ppc_supply_ptrace_register (struct regcache *regcache,
199 int regno, const char *buf)
200 {
201 if (__BYTE_ORDER == __LITTLE_ENDIAN)
202 {
203 /* Little-endian values always sit at the left end of the buffer. */
204 supply_register (regcache, regno, buf);
205 }
206 else if (__BYTE_ORDER == __BIG_ENDIAN)
207 {
208 /* Big-endian values sit at the right end of the buffer. In case of
209 registers whose sizes are smaller than sizeof (long), we must use a
210 padding to access them correctly. */
211 int size = register_size (regcache->tdesc, regno);
212
213 if (size < sizeof (long))
214 supply_register (regcache, regno, buf + sizeof (long) - size);
215 else
216 supply_register (regcache, regno, buf);
217 }
218 else
219 perror_with_name ("Unexpected byte order");
220 }
221
222
223 #define INSTR_SC 0x44000002
224 #define NR_spu_run 0x0116
225
226 /* If the PPU thread is currently stopped on a spu_run system call,
227 return to FD and ADDR the file handle and NPC parameter address
228 used with the system call. Return non-zero if successful. */
229 static int
230 parse_spufs_run (struct regcache *regcache, int *fd, CORE_ADDR *addr)
231 {
232 CORE_ADDR curr_pc;
233 int curr_insn;
234 int curr_r0;
235
236 if (register_size (regcache->tdesc, 0) == 4)
237 {
238 unsigned int pc, r0, r3, r4;
239 collect_register_by_name (regcache, "pc", &pc);
240 collect_register_by_name (regcache, "r0", &r0);
241 collect_register_by_name (regcache, "orig_r3", &r3);
242 collect_register_by_name (regcache, "r4", &r4);
243 curr_pc = (CORE_ADDR) pc;
244 curr_r0 = (int) r0;
245 *fd = (int) r3;
246 *addr = (CORE_ADDR) r4;
247 }
248 else
249 {
250 unsigned long pc, r0, r3, r4;
251 collect_register_by_name (regcache, "pc", &pc);
252 collect_register_by_name (regcache, "r0", &r0);
253 collect_register_by_name (regcache, "orig_r3", &r3);
254 collect_register_by_name (regcache, "r4", &r4);
255 curr_pc = (CORE_ADDR) pc;
256 curr_r0 = (int) r0;
257 *fd = (int) r3;
258 *addr = (CORE_ADDR) r4;
259 }
260
261 /* Fetch instruction preceding current NIP. */
262 if ((*the_target->read_memory) (curr_pc - 4,
263 (unsigned char *) &curr_insn, 4) != 0)
264 return 0;
265 /* It should be a "sc" instruction. */
266 if (curr_insn != INSTR_SC)
267 return 0;
268 /* System call number should be NR_spu_run. */
269 if (curr_r0 != NR_spu_run)
270 return 0;
271
272 return 1;
273 }
274
275 static CORE_ADDR
276 ppc_get_pc (struct regcache *regcache)
277 {
278 CORE_ADDR addr;
279 int fd;
280
281 if (parse_spufs_run (regcache, &fd, &addr))
282 {
283 unsigned int pc;
284 (*the_target->read_memory) (addr, (unsigned char *) &pc, 4);
285 return ((CORE_ADDR)1 << 63)
286 | ((CORE_ADDR)fd << 32) | (CORE_ADDR) (pc - 4);
287 }
288 else if (register_size (regcache->tdesc, 0) == 4)
289 {
290 unsigned int pc;
291 collect_register_by_name (regcache, "pc", &pc);
292 return (CORE_ADDR) pc;
293 }
294 else
295 {
296 unsigned long pc;
297 collect_register_by_name (regcache, "pc", &pc);
298 return (CORE_ADDR) pc;
299 }
300 }
301
302 static void
303 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
304 {
305 CORE_ADDR addr;
306 int fd;
307
308 if (parse_spufs_run (regcache, &fd, &addr))
309 {
310 unsigned int newpc = pc;
311 (*the_target->write_memory) (addr, (unsigned char *) &newpc, 4);
312 }
313 else if (register_size (regcache->tdesc, 0) == 4)
314 {
315 unsigned int newpc = pc;
316 supply_register_by_name (regcache, "pc", &newpc);
317 }
318 else
319 {
320 unsigned long newpc = pc;
321 supply_register_by_name (regcache, "pc", &newpc);
322 }
323 }
324
325
326 static int
327 ppc_get_auxv (unsigned long type, unsigned long *valp)
328 {
329 const struct target_desc *tdesc = current_process ()->tdesc;
330 int wordsize = register_size (tdesc, 0);
331 unsigned char *data = (unsigned char *) alloca (2 * wordsize);
332 int offset = 0;
333
334 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
335 {
336 if (wordsize == 4)
337 {
338 unsigned int *data_p = (unsigned int *)data;
339 if (data_p[0] == type)
340 {
341 *valp = data_p[1];
342 return 1;
343 }
344 }
345 else
346 {
347 unsigned long *data_p = (unsigned long *)data;
348 if (data_p[0] == type)
349 {
350 *valp = data_p[1];
351 return 1;
352 }
353 }
354
355 offset += 2 * wordsize;
356 }
357
358 *valp = 0;
359 return 0;
360 }
361
362 #ifndef __powerpc64__
363 static int ppc_regmap_adjusted;
364 #endif
365
366
367 /* Correct in either endianness.
368 This instruction is "twge r2, r2", which GDB uses as a software
369 breakpoint. */
370 static const unsigned int ppc_breakpoint = 0x7d821008;
371 #define ppc_breakpoint_len 4
372
373 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
374
375 static const gdb_byte *
376 ppc_sw_breakpoint_from_kind (int kind, int *size)
377 {
378 *size = ppc_breakpoint_len;
379 return (const gdb_byte *) &ppc_breakpoint;
380 }
381
382 static int
383 ppc_breakpoint_at (CORE_ADDR where)
384 {
385 unsigned int insn;
386
387 if (where & ((CORE_ADDR)1 << 63))
388 {
389 char mem_annex[32];
390 sprintf (mem_annex, "%d/mem", (int)((where >> 32) & 0x7fffffff));
391 (*the_target->qxfer_spu) (mem_annex, (unsigned char *) &insn,
392 NULL, where & 0xffffffff, 4);
393 if (insn == 0x3fff)
394 return 1;
395 }
396 else
397 {
398 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
399 if (insn == ppc_breakpoint)
400 return 1;
401 /* If necessary, recognize more trap instructions here. GDB only uses
402 the one. */
403 }
404
405 return 0;
406 }
407
408 /* Implement supports_z_point_type target-ops.
409 Returns true if type Z_TYPE breakpoint is supported.
410
411 Handling software breakpoint at server side, so tracepoints
412 and breakpoints can be inserted at the same location. */
413
414 static int
415 ppc_supports_z_point_type (char z_type)
416 {
417 switch (z_type)
418 {
419 case Z_PACKET_SW_BP:
420 return 1;
421 case Z_PACKET_HW_BP:
422 case Z_PACKET_WRITE_WP:
423 case Z_PACKET_ACCESS_WP:
424 default:
425 return 0;
426 }
427 }
428
429 /* Implement insert_point target-ops.
430 Returns 0 on success, -1 on failure and 1 on unsupported. */
431
432 static int
433 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
434 int size, struct raw_breakpoint *bp)
435 {
436 switch (type)
437 {
438 case raw_bkpt_type_sw:
439 return insert_memory_breakpoint (bp);
440
441 case raw_bkpt_type_hw:
442 case raw_bkpt_type_write_wp:
443 case raw_bkpt_type_access_wp:
444 default:
445 /* Unsupported. */
446 return 1;
447 }
448 }
449
450 /* Implement remove_point target-ops.
451 Returns 0 on success, -1 on failure and 1 on unsupported. */
452
453 static int
454 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
455 int size, struct raw_breakpoint *bp)
456 {
457 switch (type)
458 {
459 case raw_bkpt_type_sw:
460 return remove_memory_breakpoint (bp);
461
462 case raw_bkpt_type_hw:
463 case raw_bkpt_type_write_wp:
464 case raw_bkpt_type_access_wp:
465 default:
466 /* Unsupported. */
467 return 1;
468 }
469 }
470
471 /* Provide only a fill function for the general register set. ps_lgetregs
472 will use this for NPTL support. */
473
474 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
475 {
476 int i;
477
478 for (i = 0; i < 32; i++)
479 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
480
481 for (i = 64; i < 70; i++)
482 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
483
484 for (i = 71; i < 73; i++)
485 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
486 }
487
488 /* Program Priority Register regset fill function. */
489
490 static void
491 ppc_fill_pprregset (struct regcache *regcache, void *buf)
492 {
493 char *ppr = (char *) buf;
494
495 collect_register_by_name (regcache, "ppr", ppr);
496 }
497
498 /* Program Priority Register regset store function. */
499
500 static void
501 ppc_store_pprregset (struct regcache *regcache, const void *buf)
502 {
503 const char *ppr = (const char *) buf;
504
505 supply_register_by_name (regcache, "ppr", ppr);
506 }
507
508 /* Data Stream Control Register regset fill function. */
509
510 static void
511 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
512 {
513 char *dscr = (char *) buf;
514
515 collect_register_by_name (regcache, "dscr", dscr);
516 }
517
518 /* Data Stream Control Register regset store function. */
519
520 static void
521 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
522 {
523 const char *dscr = (const char *) buf;
524
525 supply_register_by_name (regcache, "dscr", dscr);
526 }
527
528 /* Target Address Register regset fill function. */
529
530 static void
531 ppc_fill_tarregset (struct regcache *regcache, void *buf)
532 {
533 char *tar = (char *) buf;
534
535 collect_register_by_name (regcache, "tar", tar);
536 }
537
538 /* Target Address Register regset store function. */
539
540 static void
541 ppc_store_tarregset (struct regcache *regcache, const void *buf)
542 {
543 const char *tar = (const char *) buf;
544
545 supply_register_by_name (regcache, "tar", tar);
546 }
547
548 /* Event-Based Branching regset store function. Unless the inferior
549 has a perf event open, ptrace can return in error when reading and
550 writing to the regset, with ENODATA. For reading, the registers
551 will correctly show as unavailable. For writing, gdbserver
552 currently only caches any register writes from P and G packets and
553 the stub always tries to write all the regsets when resuming the
554 inferior, which would result in frequent warnings. For this
555 reason, we don't define a fill function. This also means that the
556 client-side regcache will be dirty if the user tries to write to
557 the EBB registers. G packets that the client sends to write to
558 unrelated registers will also include data for EBB registers, even
559 if they are unavailable. */
560
561 static void
562 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
563 {
564 const char *regset = (const char *) buf;
565
566 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
567 .dat file is BESCR, EBBHR, EBBRR. */
568 supply_register_by_name (regcache, "ebbrr", &regset[0]);
569 supply_register_by_name (regcache, "ebbhr", &regset[8]);
570 supply_register_by_name (regcache, "bescr", &regset[16]);
571 }
572
573 /* Performance Monitoring Unit regset fill function. */
574
575 static void
576 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
577 {
578 char *regset = (char *) buf;
579
580 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
581 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
582 collect_register_by_name (regcache, "siar", &regset[0]);
583 collect_register_by_name (regcache, "sdar", &regset[8]);
584 collect_register_by_name (regcache, "sier", &regset[16]);
585 collect_register_by_name (regcache, "mmcr2", &regset[24]);
586 collect_register_by_name (regcache, "mmcr0", &regset[32]);
587 }
588
589 /* Performance Monitoring Unit regset store function. */
590
591 static void
592 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
593 {
594 const char *regset = (const char *) buf;
595
596 supply_register_by_name (regcache, "siar", &regset[0]);
597 supply_register_by_name (regcache, "sdar", &regset[8]);
598 supply_register_by_name (regcache, "sier", &regset[16]);
599 supply_register_by_name (regcache, "mmcr2", &regset[24]);
600 supply_register_by_name (regcache, "mmcr0", &regset[32]);
601 }
602
603 /* Hardware Transactional Memory special-purpose register regset fill
604 function. */
605
606 static void
607 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
608 {
609 int i, base;
610 char *regset = (char *) buf;
611
612 base = find_regno (regcache->tdesc, "tfhar");
613 for (i = 0; i < 3; i++)
614 collect_register (regcache, base + i, &regset[i * 8]);
615 }
616
617 /* Hardware Transactional Memory special-purpose register regset store
618 function. */
619
620 static void
621 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
622 {
623 int i, base;
624 const char *regset = (const char *) buf;
625
626 base = find_regno (regcache->tdesc, "tfhar");
627 for (i = 0; i < 3; i++)
628 supply_register (regcache, base + i, &regset[i * 8]);
629 }
630
631 /* For the same reasons as the EBB regset, none of the HTM
632 checkpointed regsets have a fill function. These registers are
633 only available if the inferior is in a transaction. */
634
635 /* Hardware Transactional Memory checkpointed general-purpose regset
636 store function. */
637
638 static void
639 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
640 {
641 int i, base, size, endian_offset;
642 const char *regset = (const char *) buf;
643
644 base = find_regno (regcache->tdesc, "cr0");
645 size = register_size (regcache->tdesc, base);
646
647 gdb_assert (size == 4 || size == 8);
648
649 for (i = 0; i < 32; i++)
650 supply_register (regcache, base + i, &regset[i * size]);
651
652 endian_offset = 0;
653
654 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
655 endian_offset = 4;
656
657 supply_register_by_name (regcache, "ccr",
658 &regset[PT_CCR * size + endian_offset]);
659
660 supply_register_by_name (regcache, "cxer",
661 &regset[PT_XER * size + endian_offset]);
662
663 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
664 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
665 }
666
667 /* Hardware Transactional Memory checkpointed floating-point regset
668 store function. */
669
670 static void
671 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
672 {
673 int i, base;
674 const char *regset = (const char *) buf;
675
676 base = find_regno (regcache->tdesc, "cf0");
677
678 for (i = 0; i < 32; i++)
679 supply_register (regcache, base + i, &regset[i * 8]);
680
681 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
682 }
683
684 /* Hardware Transactional Memory checkpointed vector regset store
685 function. */
686
687 static void
688 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
689 {
690 int i, base;
691 const char *regset = (const char *) buf;
692 int vscr_offset = 0;
693
694 base = find_regno (regcache->tdesc, "cvr0");
695
696 for (i = 0; i < 32; i++)
697 supply_register (regcache, base + i, &regset[i * 16]);
698
699 if (__BYTE_ORDER == __BIG_ENDIAN)
700 vscr_offset = 12;
701
702 supply_register_by_name (regcache, "cvscr",
703 &regset[32 * 16 + vscr_offset]);
704
705 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
706 }
707
708 /* Hardware Transactional Memory checkpointed vector-scalar regset
709 store function. */
710
711 static void
712 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
713 {
714 int i, base;
715 const char *regset = (const char *) buf;
716
717 base = find_regno (regcache->tdesc, "cvs0h");
718 for (i = 0; i < 32; i++)
719 supply_register (regcache, base + i, &regset[i * 8]);
720 }
721
722 /* Hardware Transactional Memory checkpointed Program Priority
723 Register regset store function. */
724
725 static void
726 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
727 {
728 const char *cppr = (const char *) buf;
729
730 supply_register_by_name (regcache, "cppr", cppr);
731 }
732
733 /* Hardware Transactional Memory checkpointed Data Stream Control
734 Register regset store function. */
735
736 static void
737 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
738 {
739 const char *cdscr = (const char *) buf;
740
741 supply_register_by_name (regcache, "cdscr", cdscr);
742 }
743
744 /* Hardware Transactional Memory checkpointed Target Address Register
745 regset store function. */
746
747 static void
748 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
749 {
750 const char *ctar = (const char *) buf;
751
752 supply_register_by_name (regcache, "ctar", ctar);
753 }
754
755 static void
756 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
757 {
758 int i, base;
759 char *regset = (char *) buf;
760
761 base = find_regno (regcache->tdesc, "vs0h");
762 for (i = 0; i < 32; i++)
763 collect_register (regcache, base + i, &regset[i * 8]);
764 }
765
766 static void
767 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
768 {
769 int i, base;
770 const char *regset = (const char *) buf;
771
772 base = find_regno (regcache->tdesc, "vs0h");
773 for (i = 0; i < 32; i++)
774 supply_register (regcache, base + i, &regset[i * 8]);
775 }
776
777 static void
778 ppc_fill_vrregset (struct regcache *regcache, void *buf)
779 {
780 int i, base;
781 char *regset = (char *) buf;
782 int vscr_offset = 0;
783
784 base = find_regno (regcache->tdesc, "vr0");
785 for (i = 0; i < 32; i++)
786 collect_register (regcache, base + i, &regset[i * 16]);
787
788 if (__BYTE_ORDER == __BIG_ENDIAN)
789 vscr_offset = 12;
790
791 collect_register_by_name (regcache, "vscr",
792 &regset[32 * 16 + vscr_offset]);
793
794 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
795 }
796
797 static void
798 ppc_store_vrregset (struct regcache *regcache, const void *buf)
799 {
800 int i, base;
801 const char *regset = (const char *) buf;
802 int vscr_offset = 0;
803
804 base = find_regno (regcache->tdesc, "vr0");
805 for (i = 0; i < 32; i++)
806 supply_register (regcache, base + i, &regset[i * 16]);
807
808 if (__BYTE_ORDER == __BIG_ENDIAN)
809 vscr_offset = 12;
810
811 supply_register_by_name (regcache, "vscr",
812 &regset[32 * 16 + vscr_offset]);
813 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
814 }
815
816 struct gdb_evrregset_t
817 {
818 unsigned long evr[32];
819 unsigned long long acc;
820 unsigned long spefscr;
821 };
822
823 static void
824 ppc_fill_evrregset (struct regcache *regcache, void *buf)
825 {
826 int i, ev0;
827 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
828
829 ev0 = find_regno (regcache->tdesc, "ev0h");
830 for (i = 0; i < 32; i++)
831 collect_register (regcache, ev0 + i, &regset->evr[i]);
832
833 collect_register_by_name (regcache, "acc", &regset->acc);
834 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
835 }
836
837 static void
838 ppc_store_evrregset (struct regcache *regcache, const void *buf)
839 {
840 int i, ev0;
841 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
842
843 ev0 = find_regno (regcache->tdesc, "ev0h");
844 for (i = 0; i < 32; i++)
845 supply_register (regcache, ev0 + i, &regset->evr[i]);
846
847 supply_register_by_name (regcache, "acc", &regset->acc);
848 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
849 }
850
851 /* Support for hardware single step. */
852
853 static int
854 ppc_supports_hardware_single_step (void)
855 {
856 return 1;
857 }
858
859 static struct regset_info ppc_regsets[] = {
860 /* List the extra register sets before GENERAL_REGS. That way we will
861 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
862 general registers. Some kernels support these, but not the newer
863 PPC_PTRACE_GETREGS. */
864 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
865 NULL, ppc_store_tm_ctarregset },
866 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
867 NULL, ppc_store_tm_cdscrregset },
868 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
869 NULL, ppc_store_tm_cpprregset },
870 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
871 NULL, ppc_store_tm_cvsxregset },
872 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
873 NULL, ppc_store_tm_cvrregset },
874 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
875 NULL, ppc_store_tm_cfprregset },
876 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
877 NULL, ppc_store_tm_cgprregset },
878 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
879 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
880 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
881 NULL, ppc_store_ebbregset },
882 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
883 ppc_fill_pmuregset, ppc_store_pmuregset },
884 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
885 ppc_fill_tarregset, ppc_store_tarregset },
886 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
887 ppc_fill_pprregset, ppc_store_pprregset },
888 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
889 ppc_fill_dscrregset, ppc_store_dscrregset },
890 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
891 ppc_fill_vsxregset, ppc_store_vsxregset },
892 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
893 ppc_fill_vrregset, ppc_store_vrregset },
894 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
895 ppc_fill_evrregset, ppc_store_evrregset },
896 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
897 NULL_REGSET
898 };
899
900 static struct usrregs_info ppc_usrregs_info =
901 {
902 ppc_num_regs,
903 ppc_regmap,
904 };
905
906 static struct regsets_info ppc_regsets_info =
907 {
908 ppc_regsets, /* regsets */
909 0, /* num_regsets */
910 NULL, /* disabled_regsets */
911 };
912
913 static struct regs_info regs_info =
914 {
915 NULL, /* regset_bitmap */
916 &ppc_usrregs_info,
917 &ppc_regsets_info
918 };
919
920 static const struct regs_info *
921 ppc_regs_info (void)
922 {
923 return &regs_info;
924 }
925
926 static void
927 ppc_arch_setup (void)
928 {
929 const struct target_desc *tdesc;
930 struct regset_info *regset;
931 struct ppc_linux_features features = ppc_linux_no_features;
932
933 int tid = lwpid_of (current_thread);
934
935 features.wordsize = ppc_linux_target_wordsize (tid);
936
937 if (features.wordsize == 4)
938 tdesc = tdesc_powerpc_32l;
939 else
940 tdesc = tdesc_powerpc_64l;
941
942 current_process ()->tdesc = tdesc;
943
944 /* The value of current_process ()->tdesc needs to be set for this
945 call. */
946 ppc_get_auxv (AT_HWCAP, &ppc_hwcap);
947 ppc_get_auxv (AT_HWCAP2, &ppc_hwcap2);
948
949 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
950
951 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
952 features.vsx = true;
953
954 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
955 features.altivec = true;
956
957 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
958 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
959 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
960 {
961 features.ppr_dscr = true;
962 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
963 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
964 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
965 && ppc_check_regset (tid, NT_PPC_TAR,
966 PPC_LINUX_SIZEOF_TARREGSET)
967 && ppc_check_regset (tid, NT_PPC_EBB,
968 PPC_LINUX_SIZEOF_EBBREGSET)
969 && ppc_check_regset (tid, NT_PPC_PMU,
970 PPC_LINUX_SIZEOF_PMUREGSET))
971 {
972 features.isa207 = true;
973 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
974 && ppc_check_regset (tid, NT_PPC_TM_SPR,
975 PPC_LINUX_SIZEOF_TM_SPRREGSET))
976 features.htm = true;
977 }
978 }
979
980 if (ppc_hwcap & PPC_FEATURE_CELL)
981 features.cell = true;
982
983 tdesc = ppc_linux_match_description (features);
984
985 /* On 32-bit machines, check for SPE registers.
986 Set the low target's regmap field as appropriately. */
987 #ifndef __powerpc64__
988 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
989 tdesc = tdesc_powerpc_e500l;
990
991 if (!ppc_regmap_adjusted)
992 {
993 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
994 ppc_usrregs_info.regmap = ppc_regmap_e500;
995
996 /* If the FPSCR is 64-bit wide, we need to fetch the whole
997 64-bit slot and not just its second word. The PT_FPSCR
998 supplied in a 32-bit GDB compilation doesn't reflect
999 this. */
1000 if (register_size (tdesc, 70) == 8)
1001 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
1002
1003 ppc_regmap_adjusted = 1;
1004 }
1005 #endif
1006
1007 current_process ()->tdesc = tdesc;
1008
1009 for (regset = ppc_regsets; regset->size >= 0; regset++)
1010 switch (regset->get_request)
1011 {
1012 case PTRACE_GETVRREGS:
1013 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
1014 break;
1015 case PTRACE_GETVSXREGS:
1016 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
1017 break;
1018 case PTRACE_GETEVRREGS:
1019 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
1020 regset->size = 32 * 4 + 8 + 4;
1021 else
1022 regset->size = 0;
1023 break;
1024 case PTRACE_GETREGSET:
1025 switch (regset->nt_type)
1026 {
1027 case NT_PPC_PPR:
1028 regset->size = (features.ppr_dscr ?
1029 PPC_LINUX_SIZEOF_PPRREGSET : 0);
1030 break;
1031 case NT_PPC_DSCR:
1032 regset->size = (features.ppr_dscr ?
1033 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
1034 break;
1035 case NT_PPC_TAR:
1036 regset->size = (features.isa207 ?
1037 PPC_LINUX_SIZEOF_TARREGSET : 0);
1038 break;
1039 case NT_PPC_EBB:
1040 regset->size = (features.isa207 ?
1041 PPC_LINUX_SIZEOF_EBBREGSET : 0);
1042 break;
1043 case NT_PPC_PMU:
1044 regset->size = (features.isa207 ?
1045 PPC_LINUX_SIZEOF_PMUREGSET : 0);
1046 break;
1047 case NT_PPC_TM_SPR:
1048 regset->size = (features.htm ?
1049 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
1050 break;
1051 case NT_PPC_TM_CGPR:
1052 if (features.wordsize == 4)
1053 regset->size = (features.htm ?
1054 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
1055 else
1056 regset->size = (features.htm ?
1057 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
1058 break;
1059 case NT_PPC_TM_CFPR:
1060 regset->size = (features.htm ?
1061 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
1062 break;
1063 case NT_PPC_TM_CVMX:
1064 regset->size = (features.htm ?
1065 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1066 break;
1067 case NT_PPC_TM_CVSX:
1068 regset->size = (features.htm ?
1069 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1070 break;
1071 case NT_PPC_TM_CPPR:
1072 regset->size = (features.htm ?
1073 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1074 break;
1075 case NT_PPC_TM_CDSCR:
1076 regset->size = (features.htm ?
1077 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1078 break;
1079 case NT_PPC_TM_CTAR:
1080 regset->size = (features.htm ?
1081 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1082 break;
1083 default:
1084 break;
1085 }
1086 break;
1087 default:
1088 break;
1089 }
1090 }
1091
1092 /* Implementation of linux_target_ops method "supports_tracepoints". */
1093
1094 static int
1095 ppc_supports_tracepoints (void)
1096 {
1097 return 1;
1098 }
1099
1100 /* Get the thread area address. This is used to recognize which
1101 thread is which when tracing with the in-process agent library. We
1102 don't read anything from the address, and treat it as opaque; it's
1103 the address itself that we assume is unique per-thread. */
1104
1105 static int
1106 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1107 {
1108 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1109 struct thread_info *thr = get_lwp_thread (lwp);
1110 struct regcache *regcache = get_thread_regcache (thr, 1);
1111 ULONGEST tp = 0;
1112
1113 #ifdef __powerpc64__
1114 if (register_size (regcache->tdesc, 0) == 8)
1115 collect_register_by_name (regcache, "r13", &tp);
1116 else
1117 #endif
1118 collect_register_by_name (regcache, "r2", &tp);
1119
1120 *addr = tp;
1121
1122 return 0;
1123 }
1124
1125 #ifdef __powerpc64__
1126
1127 /* Older glibc doesn't provide this. */
1128
1129 #ifndef EF_PPC64_ABI
1130 #define EF_PPC64_ABI 3
1131 #endif
1132
1133 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1134 inferiors. */
1135
1136 static int
1137 is_elfv2_inferior (void)
1138 {
1139 /* To be used as fallback if we're unable to determine the right result -
1140 assume inferior uses the same ABI as gdbserver. */
1141 #if _CALL_ELF == 2
1142 const int def_res = 1;
1143 #else
1144 const int def_res = 0;
1145 #endif
1146 unsigned long phdr;
1147 Elf64_Ehdr ehdr;
1148
1149 if (!ppc_get_auxv (AT_PHDR, &phdr))
1150 return def_res;
1151
1152 /* Assume ELF header is at the beginning of the page where program headers
1153 are located. If it doesn't look like one, bail. */
1154
1155 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1156 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1157 return def_res;
1158
1159 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1160 }
1161
1162 #endif
1163
1164 /* Generate a ds-form instruction in BUF and return the number of bytes written
1165
1166 0 6 11 16 30 32
1167 | OPCD | RST | RA | DS |XO| */
1168
1169 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1170 static int
1171 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1172 {
1173 uint32_t insn;
1174
1175 gdb_assert ((opcd & ~0x3f) == 0);
1176 gdb_assert ((rst & ~0x1f) == 0);
1177 gdb_assert ((ra & ~0x1f) == 0);
1178 gdb_assert ((xo & ~0x3) == 0);
1179
1180 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1181 *buf = (opcd << 26) | insn;
1182 return 1;
1183 }
1184
1185 /* Followings are frequently used ds-form instructions. */
1186
1187 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1188 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1189 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1190 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1191
1192 /* Generate a d-form instruction in BUF.
1193
1194 0 6 11 16 32
1195 | OPCD | RST | RA | D | */
1196
1197 static int
1198 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1199 {
1200 uint32_t insn;
1201
1202 gdb_assert ((opcd & ~0x3f) == 0);
1203 gdb_assert ((rst & ~0x1f) == 0);
1204 gdb_assert ((ra & ~0x1f) == 0);
1205
1206 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1207 *buf = (opcd << 26) | insn;
1208 return 1;
1209 }
1210
1211 /* Followings are frequently used d-form instructions. */
1212
1213 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1214 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1215 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1216 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1217 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1218 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1219 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1220 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1221 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1222
1223 /* Generate a xfx-form instruction in BUF and return the number of bytes
1224 written.
1225
1226 0 6 11 21 31 32
1227 | OPCD | RST | RI | XO |/| */
1228
1229 static int
1230 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1231 {
1232 uint32_t insn;
1233 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1234
1235 gdb_assert ((opcd & ~0x3f) == 0);
1236 gdb_assert ((rst & ~0x1f) == 0);
1237 gdb_assert ((xo & ~0x3ff) == 0);
1238
1239 insn = (rst << 21) | (n << 11) | (xo << 1);
1240 *buf = (opcd << 26) | insn;
1241 return 1;
1242 }
1243
1244 /* Followings are frequently used xfx-form instructions. */
1245
1246 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1247 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1248 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1249 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1250 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1251 E & 0xf, 598)
1252 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1253
1254
1255 /* Generate a x-form instruction in BUF and return the number of bytes written.
1256
1257 0 6 11 16 21 31 32
1258 | OPCD | RST | RA | RB | XO |RC| */
1259
1260 static int
1261 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1262 {
1263 uint32_t insn;
1264
1265 gdb_assert ((opcd & ~0x3f) == 0);
1266 gdb_assert ((rst & ~0x1f) == 0);
1267 gdb_assert ((ra & ~0x1f) == 0);
1268 gdb_assert ((rb & ~0x1f) == 0);
1269 gdb_assert ((xo & ~0x3ff) == 0);
1270 gdb_assert ((rc & ~1) == 0);
1271
1272 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1273 *buf = (opcd << 26) | insn;
1274 return 1;
1275 }
1276
1277 /* Followings are frequently used x-form instructions. */
1278
1279 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1280 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1281 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1282 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1283 /* Assume bf = cr7. */
1284 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1285
1286
1287 /* Generate a md-form instruction in BUF and return the number of bytes written.
1288
1289 0 6 11 16 21 27 30 31 32
1290 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1291
1292 static int
1293 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1294 int xo, int rc)
1295 {
1296 uint32_t insn;
1297 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1298 unsigned int sh0_4 = sh & 0x1f;
1299 unsigned int sh5 = (sh >> 5) & 1;
1300
1301 gdb_assert ((opcd & ~0x3f) == 0);
1302 gdb_assert ((rs & ~0x1f) == 0);
1303 gdb_assert ((ra & ~0x1f) == 0);
1304 gdb_assert ((sh & ~0x3f) == 0);
1305 gdb_assert ((mb & ~0x3f) == 0);
1306 gdb_assert ((xo & ~0x7) == 0);
1307 gdb_assert ((rc & ~0x1) == 0);
1308
1309 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1310 | (sh5 << 1) | (xo << 2) | (rc & 1);
1311 *buf = (opcd << 26) | insn;
1312 return 1;
1313 }
1314
1315 /* The following are frequently used md-form instructions. */
1316
1317 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1318 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1319 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1320 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1321
1322 /* Generate a i-form instruction in BUF and return the number of bytes written.
1323
1324 0 6 30 31 32
1325 | OPCD | LI |AA|LK| */
1326
1327 static int
1328 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1329 {
1330 uint32_t insn;
1331
1332 gdb_assert ((opcd & ~0x3f) == 0);
1333
1334 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1335 *buf = (opcd << 26) | insn;
1336 return 1;
1337 }
1338
1339 /* The following are frequently used i-form instructions. */
1340
1341 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1342 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1343
1344 /* Generate a b-form instruction in BUF and return the number of bytes written.
1345
1346 0 6 11 16 30 31 32
1347 | OPCD | BO | BI | BD |AA|LK| */
1348
1349 static int
1350 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1351 int aa, int lk)
1352 {
1353 uint32_t insn;
1354
1355 gdb_assert ((opcd & ~0x3f) == 0);
1356 gdb_assert ((bo & ~0x1f) == 0);
1357 gdb_assert ((bi & ~0x1f) == 0);
1358
1359 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1360 *buf = (opcd << 26) | insn;
1361 return 1;
1362 }
1363
1364 /* The following are frequently used b-form instructions. */
1365 /* Assume bi = cr7. */
1366 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1367
1368 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1369 respectively. They are primary used for save/restore GPRs in jump-pad,
1370 not used for bytecode compiling. */
1371
1372 #ifdef __powerpc64__
1373 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1374 GEN_LD (buf, rt, ra, si) : \
1375 GEN_LWZ (buf, rt, ra, si))
1376 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1377 GEN_STD (buf, rt, ra, si) : \
1378 GEN_STW (buf, rt, ra, si))
1379 #else
1380 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1381 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1382 #endif
1383
1384 /* Generate a sequence of instructions to load IMM in the register REG.
1385 Write the instructions in BUF and return the number of bytes written. */
1386
1387 static int
1388 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1389 {
1390 uint32_t *p = buf;
1391
1392 if ((imm + 32768) < 65536)
1393 {
1394 /* li reg, imm[15:0] */
1395 p += GEN_LI (p, reg, imm);
1396 }
1397 else if ((imm >> 32) == 0)
1398 {
1399 /* lis reg, imm[31:16]
1400 ori reg, reg, imm[15:0]
1401 rldicl reg, reg, 0, 32 */
1402 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1403 if ((imm & 0xffff) != 0)
1404 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1405 /* Clear upper 32-bit if sign-bit is set. */
1406 if (imm & (1u << 31) && is_64)
1407 p += GEN_RLDICL (p, reg, reg, 0, 32);
1408 }
1409 else
1410 {
1411 gdb_assert (is_64);
1412 /* lis reg, <imm[63:48]>
1413 ori reg, reg, <imm[48:32]>
1414 rldicr reg, reg, 32, 31
1415 oris reg, reg, <imm[31:16]>
1416 ori reg, reg, <imm[15:0]> */
1417 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1418 if (((imm >> 32) & 0xffff) != 0)
1419 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1420 p += GEN_RLDICR (p, reg, reg, 32, 31);
1421 if (((imm >> 16) & 0xffff) != 0)
1422 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1423 if ((imm & 0xffff) != 0)
1424 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1425 }
1426
1427 return p - buf;
1428 }
1429
1430 /* Generate a sequence for atomically exchange at location LOCK.
1431 This code sequence clobbers r6, r7, r8. LOCK is the location for
1432 the atomic-xchg, OLD_VALUE is expected old value stored in the
1433 location, and R_NEW is a register for the new value. */
1434
1435 static int
1436 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1437 int is_64)
1438 {
1439 const int r_lock = 6;
1440 const int r_old = 7;
1441 const int r_tmp = 8;
1442 uint32_t *p = buf;
1443
1444 /*
1445 1: lwarx TMP, 0, LOCK
1446 cmpwi TMP, OLD
1447 bne 1b
1448 stwcx. NEW, 0, LOCK
1449 bne 1b */
1450
1451 p += gen_limm (p, r_lock, lock, is_64);
1452 p += gen_limm (p, r_old, old_value, is_64);
1453
1454 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1455 p += GEN_CMPW (p, r_tmp, r_old);
1456 p += GEN_BNE (p, -8);
1457 p += GEN_STWCX (p, r_new, 0, r_lock);
1458 p += GEN_BNE (p, -16);
1459
1460 return p - buf;
1461 }
1462
1463 /* Generate a sequence of instructions for calling a function
1464 at address of FN. Return the number of bytes are written in BUF. */
1465
1466 static int
1467 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1468 {
1469 uint32_t *p = buf;
1470
1471 /* Must be called by r12 for caller to calculate TOC address. */
1472 p += gen_limm (p, 12, fn, is_64);
1473 if (is_opd)
1474 {
1475 p += GEN_LOAD (p, 11, 12, 16, is_64);
1476 p += GEN_LOAD (p, 2, 12, 8, is_64);
1477 p += GEN_LOAD (p, 12, 12, 0, is_64);
1478 }
1479 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1480 *p++ = 0x4e800421; /* bctrl */
1481
1482 return p - buf;
1483 }
1484
1485 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1486 of instruction. This function is used to adjust pc-relative instructions
1487 when copying. */
1488
1489 static void
1490 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1491 {
1492 uint32_t insn, op6;
1493 long rel, newrel;
1494
1495 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1496 op6 = PPC_OP6 (insn);
1497
1498 if (op6 == 18 && (insn & 2) == 0)
1499 {
1500 /* branch && AA = 0 */
1501 rel = PPC_LI (insn);
1502 newrel = (oldloc - *to) + rel;
1503
1504 /* Out of range. Cannot relocate instruction. */
1505 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1506 return;
1507
1508 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1509 }
1510 else if (op6 == 16 && (insn & 2) == 0)
1511 {
1512 /* conditional branch && AA = 0 */
1513
1514 /* If the new relocation is too big for even a 26-bit unconditional
1515 branch, there is nothing we can do. Just abort.
1516
1517 Otherwise, if it can be fit in 16-bit conditional branch, just
1518 copy the instruction and relocate the address.
1519
1520 If the it's big for conditional-branch (16-bit), try to invert the
1521 condition and jump with 26-bit branch. For example,
1522
1523 beq .Lgoto
1524 INSN1
1525
1526 =>
1527
1528 bne 1f (+8)
1529 b .Lgoto
1530 1:INSN1
1531
1532 After this transform, we are actually jump from *TO+4 instead of *TO,
1533 so check the relocation again because it will be 1-insn farther then
1534 before if *TO is after OLDLOC.
1535
1536
1537 For BDNZT (or so) is transformed from
1538
1539 bdnzt eq, .Lgoto
1540 INSN1
1541
1542 =>
1543
1544 bdz 1f (+12)
1545 bf eq, 1f (+8)
1546 b .Lgoto
1547 1:INSN1
1548
1549 See also "BO field encodings". */
1550
1551 rel = PPC_BD (insn);
1552 newrel = (oldloc - *to) + rel;
1553
1554 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1555 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1556 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1557 {
1558 newrel -= 4;
1559
1560 /* Out of range. Cannot relocate instruction. */
1561 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1562 return;
1563
1564 if ((PPC_BO (insn) & 0x14) == 0x4)
1565 insn ^= (1 << 24);
1566 else if ((PPC_BO (insn) & 0x14) == 0x10)
1567 insn ^= (1 << 22);
1568
1569 /* Jump over the unconditional branch. */
1570 insn = (insn & ~0xfffc) | 0x8;
1571 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1572 *to += 4;
1573
1574 /* Build a unconditional branch and copy LK bit. */
1575 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1576 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1577 *to += 4;
1578
1579 return;
1580 }
1581 else if ((PPC_BO (insn) & 0x14) == 0)
1582 {
1583 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1584 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1585
1586 newrel -= 8;
1587
1588 /* Out of range. Cannot relocate instruction. */
1589 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1590 return;
1591
1592 /* Copy BI field. */
1593 bf_insn |= (insn & 0x1f0000);
1594
1595 /* Invert condition. */
1596 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1597 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1598
1599 write_inferior_memory (*to, (unsigned char *) &bdnz_insn, 4);
1600 *to += 4;
1601 write_inferior_memory (*to, (unsigned char *) &bf_insn, 4);
1602 *to += 4;
1603
1604 /* Build a unconditional branch and copy LK bit. */
1605 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1606 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1607 *to += 4;
1608
1609 return;
1610 }
1611 else /* (BO & 0x14) == 0x14, branch always. */
1612 {
1613 /* Out of range. Cannot relocate instruction. */
1614 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1615 return;
1616
1617 /* Build a unconditional branch and copy LK bit. */
1618 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1619 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1620 *to += 4;
1621
1622 return;
1623 }
1624 }
1625
1626 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1627 *to += 4;
1628 }
1629
1630 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1631 See target.h for details. */
1632
1633 static int
1634 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1635 CORE_ADDR collector,
1636 CORE_ADDR lockaddr,
1637 ULONGEST orig_size,
1638 CORE_ADDR *jump_entry,
1639 CORE_ADDR *trampoline,
1640 ULONGEST *trampoline_size,
1641 unsigned char *jjump_pad_insn,
1642 ULONGEST *jjump_pad_insn_size,
1643 CORE_ADDR *adjusted_insn_addr,
1644 CORE_ADDR *adjusted_insn_addr_end,
1645 char *err)
1646 {
1647 uint32_t buf[256];
1648 uint32_t *p = buf;
1649 int j, offset;
1650 CORE_ADDR buildaddr = *jump_entry;
1651 const CORE_ADDR entryaddr = *jump_entry;
1652 int rsz, min_frame, frame_size, tp_reg;
1653 #ifdef __powerpc64__
1654 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1655 int is_64 = register_size (regcache->tdesc, 0) == 8;
1656 int is_opd = is_64 && !is_elfv2_inferior ();
1657 #else
1658 int is_64 = 0, is_opd = 0;
1659 #endif
1660
1661 #ifdef __powerpc64__
1662 if (is_64)
1663 {
1664 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1665 rsz = 8;
1666 min_frame = 112;
1667 frame_size = (40 * rsz) + min_frame;
1668 tp_reg = 13;
1669 }
1670 else
1671 {
1672 #endif
1673 rsz = 4;
1674 min_frame = 16;
1675 frame_size = (40 * rsz) + min_frame;
1676 tp_reg = 2;
1677 #ifdef __powerpc64__
1678 }
1679 #endif
1680
1681 /* Stack frame layout for this jump pad,
1682
1683 High thread_area (r13/r2) |
1684 tpoint - collecting_t obj
1685 PC/<tpaddr> | +36
1686 CTR | +35
1687 LR | +34
1688 XER | +33
1689 CR | +32
1690 R31 |
1691 R29 |
1692 ... |
1693 R1 | +1
1694 R0 - collected registers
1695 ... |
1696 ... |
1697 Low Back-chain -
1698
1699
1700 The code flow of this jump pad,
1701
1702 1. Adjust SP
1703 2. Save GPR and SPR
1704 3. Prepare argument
1705 4. Call gdb_collector
1706 5. Restore GPR and SPR
1707 6. Restore SP
1708 7. Build a jump for back to the program
1709 8. Copy/relocate original instruction
1710 9. Build a jump for replacing orignal instruction. */
1711
1712 /* Adjust stack pointer. */
1713 if (is_64)
1714 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1715 else
1716 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1717
1718 /* Store GPRs. Save R1 later, because it had just been modified, but
1719 we want the original value. */
1720 for (j = 2; j < 32; j++)
1721 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1722 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1723 /* Set r0 to the original value of r1 before adjusting stack frame,
1724 and then save it. */
1725 p += GEN_ADDI (p, 0, 1, frame_size);
1726 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1727
1728 /* Save CR, XER, LR, and CTR. */
1729 p += GEN_MFCR (p, 3); /* mfcr r3 */
1730 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1731 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1732 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1733 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1734 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1735 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1736 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1737
1738 /* Save PC<tpaddr> */
1739 p += gen_limm (p, 3, tpaddr, is_64);
1740 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1741
1742
1743 /* Setup arguments to collector. */
1744 /* Set r4 to collected registers. */
1745 p += GEN_ADDI (p, 4, 1, min_frame);
1746 /* Set r3 to TPOINT. */
1747 p += gen_limm (p, 3, tpoint, is_64);
1748
1749 /* Prepare collecting_t object for lock. */
1750 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1751 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1752 /* Set R5 to collecting object. */
1753 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1754
1755 p += GEN_LWSYNC (p);
1756 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1757 p += GEN_LWSYNC (p);
1758
1759 /* Call to collector. */
1760 p += gen_call (p, collector, is_64, is_opd);
1761
1762 /* Simply write 0 to release the lock. */
1763 p += gen_limm (p, 3, lockaddr, is_64);
1764 p += gen_limm (p, 4, 0, is_64);
1765 p += GEN_LWSYNC (p);
1766 p += GEN_STORE (p, 4, 3, 0, is_64);
1767
1768 /* Restore stack and registers. */
1769 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1770 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1771 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1772 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1773 p += GEN_MTCR (p, 3); /* mtcr r3 */
1774 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1775 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1776 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1777
1778 /* Restore GPRs. */
1779 for (j = 2; j < 32; j++)
1780 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1781 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1782 /* Restore SP. */
1783 p += GEN_ADDI (p, 1, 1, frame_size);
1784
1785 /* Flush instructions to inferior memory. */
1786 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1787
1788 /* Now, insert the original instruction to execute in the jump pad. */
1789 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1790 *adjusted_insn_addr_end = *adjusted_insn_addr;
1791 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1792
1793 /* Verify the relocation size. If should be 4 for normal copy,
1794 8 or 12 for some conditional branch. */
1795 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1796 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1797 {
1798 sprintf (err, "E.Unexpected instruction length = %d"
1799 "when relocate instruction.",
1800 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1801 return 1;
1802 }
1803
1804 buildaddr = *adjusted_insn_addr_end;
1805 p = buf;
1806 /* Finally, write a jump back to the program. */
1807 offset = (tpaddr + 4) - buildaddr;
1808 if (offset >= (1 << 25) || offset < -(1 << 25))
1809 {
1810 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1811 "(offset 0x%x > 26-bit).", offset);
1812 return 1;
1813 }
1814 /* b <tpaddr+4> */
1815 p += GEN_B (p, offset);
1816 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1817 *jump_entry = buildaddr + (p - buf) * 4;
1818
1819 /* The jump pad is now built. Wire in a jump to our jump pad. This
1820 is always done last (by our caller actually), so that we can
1821 install fast tracepoints with threads running. This relies on
1822 the agent's atomic write support. */
1823 offset = entryaddr - tpaddr;
1824 if (offset >= (1 << 25) || offset < -(1 << 25))
1825 {
1826 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1827 "(offset 0x%x > 26-bit).", offset);
1828 return 1;
1829 }
1830 /* b <jentry> */
1831 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1832 *jjump_pad_insn_size = 4;
1833
1834 return 0;
1835 }
1836
1837 /* Returns the minimum instruction length for installing a tracepoint. */
1838
1839 static int
1840 ppc_get_min_fast_tracepoint_insn_len (void)
1841 {
1842 return 4;
1843 }
1844
1845 /* Emits a given buffer into the target at current_insn_ptr. Length
1846 is in units of 32-bit words. */
1847
1848 static void
1849 emit_insns (uint32_t *buf, int n)
1850 {
1851 n = n * sizeof (uint32_t);
1852 write_inferior_memory (current_insn_ptr, (unsigned char *) buf, n);
1853 current_insn_ptr += n;
1854 }
1855
1856 #define __EMIT_ASM(NAME, INSNS) \
1857 do \
1858 { \
1859 extern uint32_t start_bcax_ ## NAME []; \
1860 extern uint32_t end_bcax_ ## NAME []; \
1861 emit_insns (start_bcax_ ## NAME, \
1862 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1863 __asm__ (".section .text.__ppcbcax\n\t" \
1864 "start_bcax_" #NAME ":\n\t" \
1865 INSNS "\n\t" \
1866 "end_bcax_" #NAME ":\n\t" \
1867 ".previous\n\t"); \
1868 } while (0)
1869
1870 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1871 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1872
1873 /*
1874
1875 Bytecode execution stack frame - 32-bit
1876
1877 | LR save area (SP + 4)
1878 SP' -> +- Back chain (SP + 0)
1879 | Save r31 for access saved arguments
1880 | Save r30 for bytecode stack pointer
1881 | Save r4 for incoming argument *value
1882 | Save r3 for incoming argument regs
1883 r30 -> +- Bytecode execution stack
1884 |
1885 | 64-byte (8 doublewords) at initial.
1886 | Expand stack as needed.
1887 |
1888 +-
1889 | Some padding for minimum stack frame and 16-byte alignment.
1890 | 16 bytes.
1891 SP +- Back-chain (SP')
1892
1893 initial frame size
1894 = 16 + (4 * 4) + 64
1895 = 96
1896
1897 r30 is the stack-pointer for bytecode machine.
1898 It should point to next-empty, so we can use LDU for pop.
1899 r3 is used for cache of the high part of TOP value.
1900 It was the first argument, pointer to regs.
1901 r4 is used for cache of the low part of TOP value.
1902 It was the second argument, pointer to the result.
1903 We should set *result = TOP after leaving this function.
1904
1905 Note:
1906 * To restore stack at epilogue
1907 => sp = r31
1908 * To check stack is big enough for bytecode execution.
1909 => r30 - 8 > SP + 8
1910 * To return execution result.
1911 => 0(r4) = TOP
1912
1913 */
1914
1915 /* Regardless of endian, register 3 is always high part, 4 is low part.
1916 These defines are used when the register pair is stored/loaded.
1917 Likewise, to simplify code, have a similiar define for 5:6. */
1918
1919 #if __BYTE_ORDER == __LITTLE_ENDIAN
1920 #define TOP_FIRST "4"
1921 #define TOP_SECOND "3"
1922 #define TMP_FIRST "6"
1923 #define TMP_SECOND "5"
1924 #else
1925 #define TOP_FIRST "3"
1926 #define TOP_SECOND "4"
1927 #define TMP_FIRST "5"
1928 #define TMP_SECOND "6"
1929 #endif
1930
1931 /* Emit prologue in inferior memory. See above comments. */
1932
1933 static void
1934 ppc_emit_prologue (void)
1935 {
1936 EMIT_ASM (/* Save return address. */
1937 "mflr 0 \n"
1938 "stw 0, 4(1) \n"
1939 /* Adjust SP. 96 is the initial frame size. */
1940 "stwu 1, -96(1) \n"
1941 /* Save r30 and incoming arguments. */
1942 "stw 31, 96-4(1) \n"
1943 "stw 30, 96-8(1) \n"
1944 "stw 4, 96-12(1) \n"
1945 "stw 3, 96-16(1) \n"
1946 /* Point r31 to original r1 for access arguments. */
1947 "addi 31, 1, 96 \n"
1948 /* Set r30 to pointing stack-top. */
1949 "addi 30, 1, 64 \n"
1950 /* Initial r3/TOP to 0. */
1951 "li 3, 0 \n"
1952 "li 4, 0 \n");
1953 }
1954
1955 /* Emit epilogue in inferior memory. See above comments. */
1956
1957 static void
1958 ppc_emit_epilogue (void)
1959 {
1960 EMIT_ASM (/* *result = TOP */
1961 "lwz 5, -12(31) \n"
1962 "stw " TOP_FIRST ", 0(5) \n"
1963 "stw " TOP_SECOND ", 4(5) \n"
1964 /* Restore registers. */
1965 "lwz 31, -4(31) \n"
1966 "lwz 30, -8(31) \n"
1967 /* Restore SP. */
1968 "lwz 1, 0(1) \n"
1969 /* Restore LR. */
1970 "lwz 0, 4(1) \n"
1971 /* Return 0 for no-error. */
1972 "li 3, 0 \n"
1973 "mtlr 0 \n"
1974 "blr \n");
1975 }
1976
1977 /* TOP = stack[--sp] + TOP */
1978
1979 static void
1980 ppc_emit_add (void)
1981 {
1982 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1983 "lwz " TMP_SECOND ", 4(30)\n"
1984 "addc 4, 6, 4 \n"
1985 "adde 3, 5, 3 \n");
1986 }
1987
1988 /* TOP = stack[--sp] - TOP */
1989
1990 static void
1991 ppc_emit_sub (void)
1992 {
1993 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1994 "lwz " TMP_SECOND ", 4(30) \n"
1995 "subfc 4, 4, 6 \n"
1996 "subfe 3, 3, 5 \n");
1997 }
1998
1999 /* TOP = stack[--sp] * TOP */
2000
2001 static void
2002 ppc_emit_mul (void)
2003 {
2004 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2005 "lwz " TMP_SECOND ", 4(30) \n"
2006 "mulhwu 7, 6, 4 \n"
2007 "mullw 3, 6, 3 \n"
2008 "mullw 5, 4, 5 \n"
2009 "mullw 4, 6, 4 \n"
2010 "add 3, 5, 3 \n"
2011 "add 3, 7, 3 \n");
2012 }
2013
2014 /* TOP = stack[--sp] << TOP */
2015
2016 static void
2017 ppc_emit_lsh (void)
2018 {
2019 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2020 "lwz " TMP_SECOND ", 4(30) \n"
2021 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2022 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2023 "slw 5, 5, 4\n" /* Shift high part left */
2024 "slw 4, 6, 4\n" /* Shift low part left */
2025 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
2026 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
2027 "or 3, 5, 3\n"
2028 "or 3, 7, 3\n"); /* Assemble high part */
2029 }
2030
2031 /* Top = stack[--sp] >> TOP
2032 (Arithmetic shift right) */
2033
2034 static void
2035 ppc_emit_rsh_signed (void)
2036 {
2037 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2038 "lwz " TMP_SECOND ", 4(30) \n"
2039 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2040 "sraw 3, 5, 4\n" /* Shift high part right */
2041 "cmpwi 7, 1\n"
2042 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
2043 "sraw 4, 5, 7\n" /* Shift high to low */
2044 "b 2f\n"
2045 "1:\n"
2046 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
2047 "srw 4, 6, 4\n" /* Shift low part right */
2048 "slw 5, 5, 7\n" /* Shift high to low */
2049 "or 4, 4, 5\n" /* Assemble low part */
2050 "2:\n");
2051 }
2052
2053 /* Top = stack[--sp] >> TOP
2054 (Logical shift right) */
2055
2056 static void
2057 ppc_emit_rsh_unsigned (void)
2058 {
2059 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2060 "lwz " TMP_SECOND ", 4(30) \n"
2061 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2062 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2063 "srw 6, 6, 4\n" /* Shift low part right */
2064 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2065 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2066 "or 6, 6, 3\n"
2067 "srw 3, 5, 4\n" /* Shift high part right */
2068 "or 4, 6, 7\n"); /* Assemble low part */
2069 }
2070
2071 /* Emit code for signed-extension specified by ARG. */
2072
2073 static void
2074 ppc_emit_ext (int arg)
2075 {
2076 switch (arg)
2077 {
2078 case 8:
2079 EMIT_ASM ("extsb 4, 4\n"
2080 "srawi 3, 4, 31");
2081 break;
2082 case 16:
2083 EMIT_ASM ("extsh 4, 4\n"
2084 "srawi 3, 4, 31");
2085 break;
2086 case 32:
2087 EMIT_ASM ("srawi 3, 4, 31");
2088 break;
2089 default:
2090 emit_error = 1;
2091 }
2092 }
2093
2094 /* Emit code for zero-extension specified by ARG. */
2095
2096 static void
2097 ppc_emit_zero_ext (int arg)
2098 {
2099 switch (arg)
2100 {
2101 case 8:
2102 EMIT_ASM ("clrlwi 4,4,24\n"
2103 "li 3, 0\n");
2104 break;
2105 case 16:
2106 EMIT_ASM ("clrlwi 4,4,16\n"
2107 "li 3, 0\n");
2108 break;
2109 case 32:
2110 EMIT_ASM ("li 3, 0");
2111 break;
2112 default:
2113 emit_error = 1;
2114 }
2115 }
2116
2117 /* TOP = !TOP
2118 i.e., TOP = (TOP == 0) ? 1 : 0; */
2119
2120 static void
2121 ppc_emit_log_not (void)
2122 {
2123 EMIT_ASM ("or 4, 3, 4 \n"
2124 "cntlzw 4, 4 \n"
2125 "srwi 4, 4, 5 \n"
2126 "li 3, 0 \n");
2127 }
2128
2129 /* TOP = stack[--sp] & TOP */
2130
2131 static void
2132 ppc_emit_bit_and (void)
2133 {
2134 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2135 "lwz " TMP_SECOND ", 4(30) \n"
2136 "and 4, 6, 4 \n"
2137 "and 3, 5, 3 \n");
2138 }
2139
2140 /* TOP = stack[--sp] | TOP */
2141
2142 static void
2143 ppc_emit_bit_or (void)
2144 {
2145 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2146 "lwz " TMP_SECOND ", 4(30) \n"
2147 "or 4, 6, 4 \n"
2148 "or 3, 5, 3 \n");
2149 }
2150
2151 /* TOP = stack[--sp] ^ TOP */
2152
2153 static void
2154 ppc_emit_bit_xor (void)
2155 {
2156 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2157 "lwz " TMP_SECOND ", 4(30) \n"
2158 "xor 4, 6, 4 \n"
2159 "xor 3, 5, 3 \n");
2160 }
2161
2162 /* TOP = ~TOP
2163 i.e., TOP = ~(TOP | TOP) */
2164
2165 static void
2166 ppc_emit_bit_not (void)
2167 {
2168 EMIT_ASM ("nor 3, 3, 3 \n"
2169 "nor 4, 4, 4 \n");
2170 }
2171
2172 /* TOP = stack[--sp] == TOP */
2173
2174 static void
2175 ppc_emit_equal (void)
2176 {
2177 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2178 "lwz " TMP_SECOND ", 4(30) \n"
2179 "xor 4, 6, 4 \n"
2180 "xor 3, 5, 3 \n"
2181 "or 4, 3, 4 \n"
2182 "cntlzw 4, 4 \n"
2183 "srwi 4, 4, 5 \n"
2184 "li 3, 0 \n");
2185 }
2186
2187 /* TOP = stack[--sp] < TOP
2188 (Signed comparison) */
2189
2190 static void
2191 ppc_emit_less_signed (void)
2192 {
2193 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2194 "lwz " TMP_SECOND ", 4(30) \n"
2195 "cmplw 6, 6, 4 \n"
2196 "cmpw 7, 5, 3 \n"
2197 /* CR6 bit 0 = low less and high equal */
2198 "crand 6*4+0, 6*4+0, 7*4+2\n"
2199 /* CR7 bit 0 = (low less and high equal) or high less */
2200 "cror 7*4+0, 7*4+0, 6*4+0\n"
2201 "mfcr 4 \n"
2202 "rlwinm 4, 4, 29, 31, 31 \n"
2203 "li 3, 0 \n");
2204 }
2205
2206 /* TOP = stack[--sp] < TOP
2207 (Unsigned comparison) */
2208
2209 static void
2210 ppc_emit_less_unsigned (void)
2211 {
2212 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2213 "lwz " TMP_SECOND ", 4(30) \n"
2214 "cmplw 6, 6, 4 \n"
2215 "cmplw 7, 5, 3 \n"
2216 /* CR6 bit 0 = low less and high equal */
2217 "crand 6*4+0, 6*4+0, 7*4+2\n"
2218 /* CR7 bit 0 = (low less and high equal) or high less */
2219 "cror 7*4+0, 7*4+0, 6*4+0\n"
2220 "mfcr 4 \n"
2221 "rlwinm 4, 4, 29, 31, 31 \n"
2222 "li 3, 0 \n");
2223 }
2224
2225 /* Access the memory address in TOP in size of SIZE.
2226 Zero-extend the read value. */
2227
2228 static void
2229 ppc_emit_ref (int size)
2230 {
2231 switch (size)
2232 {
2233 case 1:
2234 EMIT_ASM ("lbz 4, 0(4)\n"
2235 "li 3, 0");
2236 break;
2237 case 2:
2238 EMIT_ASM ("lhz 4, 0(4)\n"
2239 "li 3, 0");
2240 break;
2241 case 4:
2242 EMIT_ASM ("lwz 4, 0(4)\n"
2243 "li 3, 0");
2244 break;
2245 case 8:
2246 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2247 EMIT_ASM ("lwz 3, 4(4)\n"
2248 "lwz 4, 0(4)");
2249 else
2250 EMIT_ASM ("lwz 3, 0(4)\n"
2251 "lwz 4, 4(4)");
2252 break;
2253 }
2254 }
2255
2256 /* TOP = NUM */
2257
2258 static void
2259 ppc_emit_const (LONGEST num)
2260 {
2261 uint32_t buf[10];
2262 uint32_t *p = buf;
2263
2264 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2265 p += gen_limm (p, 4, num & 0xffffffff, 0);
2266
2267 emit_insns (buf, p - buf);
2268 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2269 }
2270
2271 /* Set TOP to the value of register REG by calling get_raw_reg function
2272 with two argument, collected buffer and register number. */
2273
2274 static void
2275 ppc_emit_reg (int reg)
2276 {
2277 uint32_t buf[13];
2278 uint32_t *p = buf;
2279
2280 /* fctx->regs is passed in r3 and then saved in -16(31). */
2281 p += GEN_LWZ (p, 3, 31, -16);
2282 p += GEN_LI (p, 4, reg); /* li r4, reg */
2283 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2284
2285 emit_insns (buf, p - buf);
2286 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2287
2288 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2289 {
2290 EMIT_ASM ("mr 5, 4\n"
2291 "mr 4, 3\n"
2292 "mr 3, 5\n");
2293 }
2294 }
2295
2296 /* TOP = stack[--sp] */
2297
2298 static void
2299 ppc_emit_pop (void)
2300 {
2301 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2302 "lwz " TOP_SECOND ", 4(30) \n");
2303 }
2304
2305 /* stack[sp++] = TOP
2306
2307 Because we may use up bytecode stack, expand 8 doublewords more
2308 if needed. */
2309
2310 static void
2311 ppc_emit_stack_flush (void)
2312 {
2313 /* Make sure bytecode stack is big enough before push.
2314 Otherwise, expand 64-byte more. */
2315
2316 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2317 " stw " TOP_SECOND ", 4(30)\n"
2318 " addi 5, 30, -(8 + 8) \n"
2319 " cmpw 7, 5, 1 \n"
2320 " bgt 7, 1f \n"
2321 " stwu 31, -64(1) \n"
2322 "1:addi 30, 30, -8 \n");
2323 }
2324
2325 /* Swap TOP and stack[sp-1] */
2326
2327 static void
2328 ppc_emit_swap (void)
2329 {
2330 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2331 "lwz " TMP_SECOND ", 12(30) \n"
2332 "stw " TOP_FIRST ", 8(30) \n"
2333 "stw " TOP_SECOND ", 12(30) \n"
2334 "mr 3, 5 \n"
2335 "mr 4, 6 \n");
2336 }
2337
2338 /* Discard N elements in the stack. Also used for ppc64. */
2339
2340 static void
2341 ppc_emit_stack_adjust (int n)
2342 {
2343 uint32_t buf[6];
2344 uint32_t *p = buf;
2345
2346 n = n << 3;
2347 if ((n >> 15) != 0)
2348 {
2349 emit_error = 1;
2350 return;
2351 }
2352
2353 p += GEN_ADDI (p, 30, 30, n);
2354
2355 emit_insns (buf, p - buf);
2356 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2357 }
2358
2359 /* Call function FN. */
2360
2361 static void
2362 ppc_emit_call (CORE_ADDR fn)
2363 {
2364 uint32_t buf[11];
2365 uint32_t *p = buf;
2366
2367 p += gen_call (p, fn, 0, 0);
2368
2369 emit_insns (buf, p - buf);
2370 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2371 }
2372
2373 /* FN's prototype is `LONGEST(*fn)(int)'.
2374 TOP = fn (arg1)
2375 */
2376
2377 static void
2378 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2379 {
2380 uint32_t buf[15];
2381 uint32_t *p = buf;
2382
2383 /* Setup argument. arg1 is a 16-bit value. */
2384 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2385 p += gen_call (p, fn, 0, 0);
2386
2387 emit_insns (buf, p - buf);
2388 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2389
2390 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2391 {
2392 EMIT_ASM ("mr 5, 4\n"
2393 "mr 4, 3\n"
2394 "mr 3, 5\n");
2395 }
2396 }
2397
2398 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2399 fn (arg1, TOP)
2400
2401 TOP should be preserved/restored before/after the call. */
2402
2403 static void
2404 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2405 {
2406 uint32_t buf[21];
2407 uint32_t *p = buf;
2408
2409 /* Save TOP. 0(30) is next-empty. */
2410 p += GEN_STW (p, 3, 30, 0);
2411 p += GEN_STW (p, 4, 30, 4);
2412
2413 /* Setup argument. arg1 is a 16-bit value. */
2414 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2415 {
2416 p += GEN_MR (p, 5, 4);
2417 p += GEN_MR (p, 6, 3);
2418 }
2419 else
2420 {
2421 p += GEN_MR (p, 5, 3);
2422 p += GEN_MR (p, 6, 4);
2423 }
2424 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2425 p += gen_call (p, fn, 0, 0);
2426
2427 /* Restore TOP */
2428 p += GEN_LWZ (p, 3, 30, 0);
2429 p += GEN_LWZ (p, 4, 30, 4);
2430
2431 emit_insns (buf, p - buf);
2432 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2433 }
2434
2435 /* Note in the following goto ops:
2436
2437 When emitting goto, the target address is later relocated by
2438 write_goto_address. OFFSET_P is the offset of the branch instruction
2439 in the code sequence, and SIZE_P is how to relocate the instruction,
2440 recognized by ppc_write_goto_address. In current implementation,
2441 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2442 */
2443
2444 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2445
2446 static void
2447 ppc_emit_if_goto (int *offset_p, int *size_p)
2448 {
2449 EMIT_ASM ("or. 3, 3, 4 \n"
2450 "lwzu " TOP_FIRST ", 8(30) \n"
2451 "lwz " TOP_SECOND ", 4(30) \n"
2452 "1:bne 0, 1b \n");
2453
2454 if (offset_p)
2455 *offset_p = 12;
2456 if (size_p)
2457 *size_p = 14;
2458 }
2459
2460 /* Unconditional goto. Also used for ppc64. */
2461
2462 static void
2463 ppc_emit_goto (int *offset_p, int *size_p)
2464 {
2465 EMIT_ASM ("1:b 1b");
2466
2467 if (offset_p)
2468 *offset_p = 0;
2469 if (size_p)
2470 *size_p = 24;
2471 }
2472
2473 /* Goto if stack[--sp] == TOP */
2474
2475 static void
2476 ppc_emit_eq_goto (int *offset_p, int *size_p)
2477 {
2478 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2479 "lwz " TMP_SECOND ", 4(30) \n"
2480 "xor 4, 6, 4 \n"
2481 "xor 3, 5, 3 \n"
2482 "or. 3, 3, 4 \n"
2483 "lwzu " TOP_FIRST ", 8(30) \n"
2484 "lwz " TOP_SECOND ", 4(30) \n"
2485 "1:beq 0, 1b \n");
2486
2487 if (offset_p)
2488 *offset_p = 28;
2489 if (size_p)
2490 *size_p = 14;
2491 }
2492
2493 /* Goto if stack[--sp] != TOP */
2494
2495 static void
2496 ppc_emit_ne_goto (int *offset_p, int *size_p)
2497 {
2498 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2499 "lwz " TMP_SECOND ", 4(30) \n"
2500 "xor 4, 6, 4 \n"
2501 "xor 3, 5, 3 \n"
2502 "or. 3, 3, 4 \n"
2503 "lwzu " TOP_FIRST ", 8(30) \n"
2504 "lwz " TOP_SECOND ", 4(30) \n"
2505 "1:bne 0, 1b \n");
2506
2507 if (offset_p)
2508 *offset_p = 28;
2509 if (size_p)
2510 *size_p = 14;
2511 }
2512
2513 /* Goto if stack[--sp] < TOP */
2514
2515 static void
2516 ppc_emit_lt_goto (int *offset_p, int *size_p)
2517 {
2518 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2519 "lwz " TMP_SECOND ", 4(30) \n"
2520 "cmplw 6, 6, 4 \n"
2521 "cmpw 7, 5, 3 \n"
2522 /* CR6 bit 0 = low less and high equal */
2523 "crand 6*4+0, 6*4+0, 7*4+2\n"
2524 /* CR7 bit 0 = (low less and high equal) or high less */
2525 "cror 7*4+0, 7*4+0, 6*4+0\n"
2526 "lwzu " TOP_FIRST ", 8(30) \n"
2527 "lwz " TOP_SECOND ", 4(30)\n"
2528 "1:blt 7, 1b \n");
2529
2530 if (offset_p)
2531 *offset_p = 32;
2532 if (size_p)
2533 *size_p = 14;
2534 }
2535
2536 /* Goto if stack[--sp] <= TOP */
2537
2538 static void
2539 ppc_emit_le_goto (int *offset_p, int *size_p)
2540 {
2541 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2542 "lwz " TMP_SECOND ", 4(30) \n"
2543 "cmplw 6, 6, 4 \n"
2544 "cmpw 7, 5, 3 \n"
2545 /* CR6 bit 0 = low less/equal and high equal */
2546 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2547 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2548 "cror 7*4+0, 7*4+0, 6*4+0\n"
2549 "lwzu " TOP_FIRST ", 8(30) \n"
2550 "lwz " TOP_SECOND ", 4(30)\n"
2551 "1:blt 7, 1b \n");
2552
2553 if (offset_p)
2554 *offset_p = 32;
2555 if (size_p)
2556 *size_p = 14;
2557 }
2558
2559 /* Goto if stack[--sp] > TOP */
2560
2561 static void
2562 ppc_emit_gt_goto (int *offset_p, int *size_p)
2563 {
2564 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2565 "lwz " TMP_SECOND ", 4(30) \n"
2566 "cmplw 6, 6, 4 \n"
2567 "cmpw 7, 5, 3 \n"
2568 /* CR6 bit 0 = low greater and high equal */
2569 "crand 6*4+0, 6*4+1, 7*4+2\n"
2570 /* CR7 bit 0 = (low greater and high equal) or high greater */
2571 "cror 7*4+0, 7*4+1, 6*4+0\n"
2572 "lwzu " TOP_FIRST ", 8(30) \n"
2573 "lwz " TOP_SECOND ", 4(30)\n"
2574 "1:blt 7, 1b \n");
2575
2576 if (offset_p)
2577 *offset_p = 32;
2578 if (size_p)
2579 *size_p = 14;
2580 }
2581
2582 /* Goto if stack[--sp] >= TOP */
2583
2584 static void
2585 ppc_emit_ge_goto (int *offset_p, int *size_p)
2586 {
2587 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2588 "lwz " TMP_SECOND ", 4(30) \n"
2589 "cmplw 6, 6, 4 \n"
2590 "cmpw 7, 5, 3 \n"
2591 /* CR6 bit 0 = low ge and high equal */
2592 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2593 /* CR7 bit 0 = (low ge and high equal) or high greater */
2594 "cror 7*4+0, 7*4+1, 6*4+0\n"
2595 "lwzu " TOP_FIRST ", 8(30)\n"
2596 "lwz " TOP_SECOND ", 4(30)\n"
2597 "1:blt 7, 1b \n");
2598
2599 if (offset_p)
2600 *offset_p = 32;
2601 if (size_p)
2602 *size_p = 14;
2603 }
2604
2605 /* Relocate previous emitted branch instruction. FROM is the address
2606 of the branch instruction, TO is the goto target address, and SIZE
2607 if the value we set by *SIZE_P before. Currently, it is either
2608 24 or 14 of branch and conditional-branch instruction.
2609 Also used for ppc64. */
2610
2611 static void
2612 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2613 {
2614 long rel = to - from;
2615 uint32_t insn;
2616 int opcd;
2617
2618 read_inferior_memory (from, (unsigned char *) &insn, 4);
2619 opcd = (insn >> 26) & 0x3f;
2620
2621 switch (size)
2622 {
2623 case 14:
2624 if (opcd != 16
2625 || (rel >= (1 << 15) || rel < -(1 << 15)))
2626 emit_error = 1;
2627 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2628 break;
2629 case 24:
2630 if (opcd != 18
2631 || (rel >= (1 << 25) || rel < -(1 << 25)))
2632 emit_error = 1;
2633 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2634 break;
2635 default:
2636 emit_error = 1;
2637 }
2638
2639 if (!emit_error)
2640 write_inferior_memory (from, (unsigned char *) &insn, 4);
2641 }
2642
2643 /* Table of emit ops for 32-bit. */
2644
2645 static struct emit_ops ppc_emit_ops_impl =
2646 {
2647 ppc_emit_prologue,
2648 ppc_emit_epilogue,
2649 ppc_emit_add,
2650 ppc_emit_sub,
2651 ppc_emit_mul,
2652 ppc_emit_lsh,
2653 ppc_emit_rsh_signed,
2654 ppc_emit_rsh_unsigned,
2655 ppc_emit_ext,
2656 ppc_emit_log_not,
2657 ppc_emit_bit_and,
2658 ppc_emit_bit_or,
2659 ppc_emit_bit_xor,
2660 ppc_emit_bit_not,
2661 ppc_emit_equal,
2662 ppc_emit_less_signed,
2663 ppc_emit_less_unsigned,
2664 ppc_emit_ref,
2665 ppc_emit_if_goto,
2666 ppc_emit_goto,
2667 ppc_write_goto_address,
2668 ppc_emit_const,
2669 ppc_emit_call,
2670 ppc_emit_reg,
2671 ppc_emit_pop,
2672 ppc_emit_stack_flush,
2673 ppc_emit_zero_ext,
2674 ppc_emit_swap,
2675 ppc_emit_stack_adjust,
2676 ppc_emit_int_call_1,
2677 ppc_emit_void_call_2,
2678 ppc_emit_eq_goto,
2679 ppc_emit_ne_goto,
2680 ppc_emit_lt_goto,
2681 ppc_emit_le_goto,
2682 ppc_emit_gt_goto,
2683 ppc_emit_ge_goto
2684 };
2685
2686 #ifdef __powerpc64__
2687
2688 /*
2689
2690 Bytecode execution stack frame - 64-bit
2691
2692 | LR save area (SP + 16)
2693 | CR save area (SP + 8)
2694 SP' -> +- Back chain (SP + 0)
2695 | Save r31 for access saved arguments
2696 | Save r30 for bytecode stack pointer
2697 | Save r4 for incoming argument *value
2698 | Save r3 for incoming argument regs
2699 r30 -> +- Bytecode execution stack
2700 |
2701 | 64-byte (8 doublewords) at initial.
2702 | Expand stack as needed.
2703 |
2704 +-
2705 | Some padding for minimum stack frame.
2706 | 112 for ELFv1.
2707 SP +- Back-chain (SP')
2708
2709 initial frame size
2710 = 112 + (4 * 8) + 64
2711 = 208
2712
2713 r30 is the stack-pointer for bytecode machine.
2714 It should point to next-empty, so we can use LDU for pop.
2715 r3 is used for cache of TOP value.
2716 It was the first argument, pointer to regs.
2717 r4 is the second argument, pointer to the result.
2718 We should set *result = TOP after leaving this function.
2719
2720 Note:
2721 * To restore stack at epilogue
2722 => sp = r31
2723 * To check stack is big enough for bytecode execution.
2724 => r30 - 8 > SP + 112
2725 * To return execution result.
2726 => 0(r4) = TOP
2727
2728 */
2729
2730 /* Emit prologue in inferior memory. See above comments. */
2731
2732 static void
2733 ppc64v1_emit_prologue (void)
2734 {
2735 /* On ELFv1, function pointers really point to function descriptor,
2736 so emit one here. We don't care about contents of words 1 and 2,
2737 so let them just overlap out code. */
2738 uint64_t opd = current_insn_ptr + 8;
2739 uint32_t buf[2];
2740
2741 /* Mind the strict aliasing rules. */
2742 memcpy (buf, &opd, sizeof buf);
2743 emit_insns(buf, 2);
2744 EMIT_ASM (/* Save return address. */
2745 "mflr 0 \n"
2746 "std 0, 16(1) \n"
2747 /* Save r30 and incoming arguments. */
2748 "std 31, -8(1) \n"
2749 "std 30, -16(1) \n"
2750 "std 4, -24(1) \n"
2751 "std 3, -32(1) \n"
2752 /* Point r31 to current r1 for access arguments. */
2753 "mr 31, 1 \n"
2754 /* Adjust SP. 208 is the initial frame size. */
2755 "stdu 1, -208(1) \n"
2756 /* Set r30 to pointing stack-top. */
2757 "addi 30, 1, 168 \n"
2758 /* Initial r3/TOP to 0. */
2759 "li 3, 0 \n");
2760 }
2761
2762 /* Emit prologue in inferior memory. See above comments. */
2763
2764 static void
2765 ppc64v2_emit_prologue (void)
2766 {
2767 EMIT_ASM (/* Save return address. */
2768 "mflr 0 \n"
2769 "std 0, 16(1) \n"
2770 /* Save r30 and incoming arguments. */
2771 "std 31, -8(1) \n"
2772 "std 30, -16(1) \n"
2773 "std 4, -24(1) \n"
2774 "std 3, -32(1) \n"
2775 /* Point r31 to current r1 for access arguments. */
2776 "mr 31, 1 \n"
2777 /* Adjust SP. 208 is the initial frame size. */
2778 "stdu 1, -208(1) \n"
2779 /* Set r30 to pointing stack-top. */
2780 "addi 30, 1, 168 \n"
2781 /* Initial r3/TOP to 0. */
2782 "li 3, 0 \n");
2783 }
2784
2785 /* Emit epilogue in inferior memory. See above comments. */
2786
2787 static void
2788 ppc64_emit_epilogue (void)
2789 {
2790 EMIT_ASM (/* Restore SP. */
2791 "ld 1, 0(1) \n"
2792 /* *result = TOP */
2793 "ld 4, -24(1) \n"
2794 "std 3, 0(4) \n"
2795 /* Restore registers. */
2796 "ld 31, -8(1) \n"
2797 "ld 30, -16(1) \n"
2798 /* Restore LR. */
2799 "ld 0, 16(1) \n"
2800 /* Return 0 for no-error. */
2801 "li 3, 0 \n"
2802 "mtlr 0 \n"
2803 "blr \n");
2804 }
2805
2806 /* TOP = stack[--sp] + TOP */
2807
2808 static void
2809 ppc64_emit_add (void)
2810 {
2811 EMIT_ASM ("ldu 4, 8(30) \n"
2812 "add 3, 4, 3 \n");
2813 }
2814
2815 /* TOP = stack[--sp] - TOP */
2816
2817 static void
2818 ppc64_emit_sub (void)
2819 {
2820 EMIT_ASM ("ldu 4, 8(30) \n"
2821 "sub 3, 4, 3 \n");
2822 }
2823
2824 /* TOP = stack[--sp] * TOP */
2825
2826 static void
2827 ppc64_emit_mul (void)
2828 {
2829 EMIT_ASM ("ldu 4, 8(30) \n"
2830 "mulld 3, 4, 3 \n");
2831 }
2832
2833 /* TOP = stack[--sp] << TOP */
2834
2835 static void
2836 ppc64_emit_lsh (void)
2837 {
2838 EMIT_ASM ("ldu 4, 8(30) \n"
2839 "sld 3, 4, 3 \n");
2840 }
2841
2842 /* Top = stack[--sp] >> TOP
2843 (Arithmetic shift right) */
2844
2845 static void
2846 ppc64_emit_rsh_signed (void)
2847 {
2848 EMIT_ASM ("ldu 4, 8(30) \n"
2849 "srad 3, 4, 3 \n");
2850 }
2851
2852 /* Top = stack[--sp] >> TOP
2853 (Logical shift right) */
2854
2855 static void
2856 ppc64_emit_rsh_unsigned (void)
2857 {
2858 EMIT_ASM ("ldu 4, 8(30) \n"
2859 "srd 3, 4, 3 \n");
2860 }
2861
2862 /* Emit code for signed-extension specified by ARG. */
2863
2864 static void
2865 ppc64_emit_ext (int arg)
2866 {
2867 switch (arg)
2868 {
2869 case 8:
2870 EMIT_ASM ("extsb 3, 3");
2871 break;
2872 case 16:
2873 EMIT_ASM ("extsh 3, 3");
2874 break;
2875 case 32:
2876 EMIT_ASM ("extsw 3, 3");
2877 break;
2878 default:
2879 emit_error = 1;
2880 }
2881 }
2882
2883 /* Emit code for zero-extension specified by ARG. */
2884
2885 static void
2886 ppc64_emit_zero_ext (int arg)
2887 {
2888 switch (arg)
2889 {
2890 case 8:
2891 EMIT_ASM ("rldicl 3,3,0,56");
2892 break;
2893 case 16:
2894 EMIT_ASM ("rldicl 3,3,0,48");
2895 break;
2896 case 32:
2897 EMIT_ASM ("rldicl 3,3,0,32");
2898 break;
2899 default:
2900 emit_error = 1;
2901 }
2902 }
2903
2904 /* TOP = !TOP
2905 i.e., TOP = (TOP == 0) ? 1 : 0; */
2906
2907 static void
2908 ppc64_emit_log_not (void)
2909 {
2910 EMIT_ASM ("cntlzd 3, 3 \n"
2911 "srdi 3, 3, 6 \n");
2912 }
2913
2914 /* TOP = stack[--sp] & TOP */
2915
2916 static void
2917 ppc64_emit_bit_and (void)
2918 {
2919 EMIT_ASM ("ldu 4, 8(30) \n"
2920 "and 3, 4, 3 \n");
2921 }
2922
2923 /* TOP = stack[--sp] | TOP */
2924
2925 static void
2926 ppc64_emit_bit_or (void)
2927 {
2928 EMIT_ASM ("ldu 4, 8(30) \n"
2929 "or 3, 4, 3 \n");
2930 }
2931
2932 /* TOP = stack[--sp] ^ TOP */
2933
2934 static void
2935 ppc64_emit_bit_xor (void)
2936 {
2937 EMIT_ASM ("ldu 4, 8(30) \n"
2938 "xor 3, 4, 3 \n");
2939 }
2940
2941 /* TOP = ~TOP
2942 i.e., TOP = ~(TOP | TOP) */
2943
2944 static void
2945 ppc64_emit_bit_not (void)
2946 {
2947 EMIT_ASM ("nor 3, 3, 3 \n");
2948 }
2949
2950 /* TOP = stack[--sp] == TOP */
2951
2952 static void
2953 ppc64_emit_equal (void)
2954 {
2955 EMIT_ASM ("ldu 4, 8(30) \n"
2956 "xor 3, 3, 4 \n"
2957 "cntlzd 3, 3 \n"
2958 "srdi 3, 3, 6 \n");
2959 }
2960
2961 /* TOP = stack[--sp] < TOP
2962 (Signed comparison) */
2963
2964 static void
2965 ppc64_emit_less_signed (void)
2966 {
2967 EMIT_ASM ("ldu 4, 8(30) \n"
2968 "cmpd 7, 4, 3 \n"
2969 "mfcr 3 \n"
2970 "rlwinm 3, 3, 29, 31, 31 \n");
2971 }
2972
2973 /* TOP = stack[--sp] < TOP
2974 (Unsigned comparison) */
2975
2976 static void
2977 ppc64_emit_less_unsigned (void)
2978 {
2979 EMIT_ASM ("ldu 4, 8(30) \n"
2980 "cmpld 7, 4, 3 \n"
2981 "mfcr 3 \n"
2982 "rlwinm 3, 3, 29, 31, 31 \n");
2983 }
2984
2985 /* Access the memory address in TOP in size of SIZE.
2986 Zero-extend the read value. */
2987
2988 static void
2989 ppc64_emit_ref (int size)
2990 {
2991 switch (size)
2992 {
2993 case 1:
2994 EMIT_ASM ("lbz 3, 0(3)");
2995 break;
2996 case 2:
2997 EMIT_ASM ("lhz 3, 0(3)");
2998 break;
2999 case 4:
3000 EMIT_ASM ("lwz 3, 0(3)");
3001 break;
3002 case 8:
3003 EMIT_ASM ("ld 3, 0(3)");
3004 break;
3005 }
3006 }
3007
3008 /* TOP = NUM */
3009
3010 static void
3011 ppc64_emit_const (LONGEST num)
3012 {
3013 uint32_t buf[5];
3014 uint32_t *p = buf;
3015
3016 p += gen_limm (p, 3, num, 1);
3017
3018 emit_insns (buf, p - buf);
3019 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3020 }
3021
3022 /* Set TOP to the value of register REG by calling get_raw_reg function
3023 with two argument, collected buffer and register number. */
3024
3025 static void
3026 ppc64v1_emit_reg (int reg)
3027 {
3028 uint32_t buf[15];
3029 uint32_t *p = buf;
3030
3031 /* fctx->regs is passed in r3 and then saved in 176(1). */
3032 p += GEN_LD (p, 3, 31, -32);
3033 p += GEN_LI (p, 4, reg);
3034 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3035 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
3036 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3037
3038 emit_insns (buf, p - buf);
3039 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3040 }
3041
3042 /* Likewise, for ELFv2. */
3043
3044 static void
3045 ppc64v2_emit_reg (int reg)
3046 {
3047 uint32_t buf[12];
3048 uint32_t *p = buf;
3049
3050 /* fctx->regs is passed in r3 and then saved in 176(1). */
3051 p += GEN_LD (p, 3, 31, -32);
3052 p += GEN_LI (p, 4, reg);
3053 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3054 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
3055 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3056
3057 emit_insns (buf, p - buf);
3058 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3059 }
3060
3061 /* TOP = stack[--sp] */
3062
3063 static void
3064 ppc64_emit_pop (void)
3065 {
3066 EMIT_ASM ("ldu 3, 8(30)");
3067 }
3068
3069 /* stack[sp++] = TOP
3070
3071 Because we may use up bytecode stack, expand 8 doublewords more
3072 if needed. */
3073
3074 static void
3075 ppc64_emit_stack_flush (void)
3076 {
3077 /* Make sure bytecode stack is big enough before push.
3078 Otherwise, expand 64-byte more. */
3079
3080 EMIT_ASM (" std 3, 0(30) \n"
3081 " addi 4, 30, -(112 + 8) \n"
3082 " cmpd 7, 4, 1 \n"
3083 " bgt 7, 1f \n"
3084 " stdu 31, -64(1) \n"
3085 "1:addi 30, 30, -8 \n");
3086 }
3087
3088 /* Swap TOP and stack[sp-1] */
3089
3090 static void
3091 ppc64_emit_swap (void)
3092 {
3093 EMIT_ASM ("ld 4, 8(30) \n"
3094 "std 3, 8(30) \n"
3095 "mr 3, 4 \n");
3096 }
3097
3098 /* Call function FN - ELFv1. */
3099
3100 static void
3101 ppc64v1_emit_call (CORE_ADDR fn)
3102 {
3103 uint32_t buf[13];
3104 uint32_t *p = buf;
3105
3106 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3107 p += gen_call (p, fn, 1, 1);
3108 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3109
3110 emit_insns (buf, p - buf);
3111 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3112 }
3113
3114 /* Call function FN - ELFv2. */
3115
3116 static void
3117 ppc64v2_emit_call (CORE_ADDR fn)
3118 {
3119 uint32_t buf[10];
3120 uint32_t *p = buf;
3121
3122 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3123 p += gen_call (p, fn, 1, 0);
3124 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3125
3126 emit_insns (buf, p - buf);
3127 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3128 }
3129
3130 /* FN's prototype is `LONGEST(*fn)(int)'.
3131 TOP = fn (arg1)
3132 */
3133
3134 static void
3135 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3136 {
3137 uint32_t buf[13];
3138 uint32_t *p = buf;
3139
3140 /* Setup argument. arg1 is a 16-bit value. */
3141 p += gen_limm (p, 3, arg1, 1);
3142 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3143 p += gen_call (p, fn, 1, 1);
3144 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3145
3146 emit_insns (buf, p - buf);
3147 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3148 }
3149
3150 /* Likewise for ELFv2. */
3151
3152 static void
3153 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3154 {
3155 uint32_t buf[10];
3156 uint32_t *p = buf;
3157
3158 /* Setup argument. arg1 is a 16-bit value. */
3159 p += gen_limm (p, 3, arg1, 1);
3160 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3161 p += gen_call (p, fn, 1, 0);
3162 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3163
3164 emit_insns (buf, p - buf);
3165 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3166 }
3167
3168 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3169 fn (arg1, TOP)
3170
3171 TOP should be preserved/restored before/after the call. */
3172
3173 static void
3174 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3175 {
3176 uint32_t buf[17];
3177 uint32_t *p = buf;
3178
3179 /* Save TOP. 0(30) is next-empty. */
3180 p += GEN_STD (p, 3, 30, 0);
3181
3182 /* Setup argument. arg1 is a 16-bit value. */
3183 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3184 p += gen_limm (p, 3, arg1, 1);
3185 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3186 p += gen_call (p, fn, 1, 1);
3187 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3188
3189 /* Restore TOP */
3190 p += GEN_LD (p, 3, 30, 0);
3191
3192 emit_insns (buf, p - buf);
3193 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3194 }
3195
3196 /* Likewise for ELFv2. */
3197
3198 static void
3199 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3200 {
3201 uint32_t buf[14];
3202 uint32_t *p = buf;
3203
3204 /* Save TOP. 0(30) is next-empty. */
3205 p += GEN_STD (p, 3, 30, 0);
3206
3207 /* Setup argument. arg1 is a 16-bit value. */
3208 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3209 p += gen_limm (p, 3, arg1, 1);
3210 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3211 p += gen_call (p, fn, 1, 0);
3212 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3213
3214 /* Restore TOP */
3215 p += GEN_LD (p, 3, 30, 0);
3216
3217 emit_insns (buf, p - buf);
3218 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3219 }
3220
3221 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3222
3223 static void
3224 ppc64_emit_if_goto (int *offset_p, int *size_p)
3225 {
3226 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3227 "ldu 3, 8(30) \n"
3228 "1:bne 7, 1b \n");
3229
3230 if (offset_p)
3231 *offset_p = 8;
3232 if (size_p)
3233 *size_p = 14;
3234 }
3235
3236 /* Goto if stack[--sp] == TOP */
3237
3238 static void
3239 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3240 {
3241 EMIT_ASM ("ldu 4, 8(30) \n"
3242 "cmpd 7, 4, 3 \n"
3243 "ldu 3, 8(30) \n"
3244 "1:beq 7, 1b \n");
3245
3246 if (offset_p)
3247 *offset_p = 12;
3248 if (size_p)
3249 *size_p = 14;
3250 }
3251
3252 /* Goto if stack[--sp] != TOP */
3253
3254 static void
3255 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3256 {
3257 EMIT_ASM ("ldu 4, 8(30) \n"
3258 "cmpd 7, 4, 3 \n"
3259 "ldu 3, 8(30) \n"
3260 "1:bne 7, 1b \n");
3261
3262 if (offset_p)
3263 *offset_p = 12;
3264 if (size_p)
3265 *size_p = 14;
3266 }
3267
3268 /* Goto if stack[--sp] < TOP */
3269
3270 static void
3271 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3272 {
3273 EMIT_ASM ("ldu 4, 8(30) \n"
3274 "cmpd 7, 4, 3 \n"
3275 "ldu 3, 8(30) \n"
3276 "1:blt 7, 1b \n");
3277
3278 if (offset_p)
3279 *offset_p = 12;
3280 if (size_p)
3281 *size_p = 14;
3282 }
3283
3284 /* Goto if stack[--sp] <= TOP */
3285
3286 static void
3287 ppc64_emit_le_goto (int *offset_p, int *size_p)
3288 {
3289 EMIT_ASM ("ldu 4, 8(30) \n"
3290 "cmpd 7, 4, 3 \n"
3291 "ldu 3, 8(30) \n"
3292 "1:ble 7, 1b \n");
3293
3294 if (offset_p)
3295 *offset_p = 12;
3296 if (size_p)
3297 *size_p = 14;
3298 }
3299
3300 /* Goto if stack[--sp] > TOP */
3301
3302 static void
3303 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3304 {
3305 EMIT_ASM ("ldu 4, 8(30) \n"
3306 "cmpd 7, 4, 3 \n"
3307 "ldu 3, 8(30) \n"
3308 "1:bgt 7, 1b \n");
3309
3310 if (offset_p)
3311 *offset_p = 12;
3312 if (size_p)
3313 *size_p = 14;
3314 }
3315
3316 /* Goto if stack[--sp] >= TOP */
3317
3318 static void
3319 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3320 {
3321 EMIT_ASM ("ldu 4, 8(30) \n"
3322 "cmpd 7, 4, 3 \n"
3323 "ldu 3, 8(30) \n"
3324 "1:bge 7, 1b \n");
3325
3326 if (offset_p)
3327 *offset_p = 12;
3328 if (size_p)
3329 *size_p = 14;
3330 }
3331
3332 /* Table of emit ops for 64-bit ELFv1. */
3333
3334 static struct emit_ops ppc64v1_emit_ops_impl =
3335 {
3336 ppc64v1_emit_prologue,
3337 ppc64_emit_epilogue,
3338 ppc64_emit_add,
3339 ppc64_emit_sub,
3340 ppc64_emit_mul,
3341 ppc64_emit_lsh,
3342 ppc64_emit_rsh_signed,
3343 ppc64_emit_rsh_unsigned,
3344 ppc64_emit_ext,
3345 ppc64_emit_log_not,
3346 ppc64_emit_bit_and,
3347 ppc64_emit_bit_or,
3348 ppc64_emit_bit_xor,
3349 ppc64_emit_bit_not,
3350 ppc64_emit_equal,
3351 ppc64_emit_less_signed,
3352 ppc64_emit_less_unsigned,
3353 ppc64_emit_ref,
3354 ppc64_emit_if_goto,
3355 ppc_emit_goto,
3356 ppc_write_goto_address,
3357 ppc64_emit_const,
3358 ppc64v1_emit_call,
3359 ppc64v1_emit_reg,
3360 ppc64_emit_pop,
3361 ppc64_emit_stack_flush,
3362 ppc64_emit_zero_ext,
3363 ppc64_emit_swap,
3364 ppc_emit_stack_adjust,
3365 ppc64v1_emit_int_call_1,
3366 ppc64v1_emit_void_call_2,
3367 ppc64_emit_eq_goto,
3368 ppc64_emit_ne_goto,
3369 ppc64_emit_lt_goto,
3370 ppc64_emit_le_goto,
3371 ppc64_emit_gt_goto,
3372 ppc64_emit_ge_goto
3373 };
3374
3375 /* Table of emit ops for 64-bit ELFv2. */
3376
3377 static struct emit_ops ppc64v2_emit_ops_impl =
3378 {
3379 ppc64v2_emit_prologue,
3380 ppc64_emit_epilogue,
3381 ppc64_emit_add,
3382 ppc64_emit_sub,
3383 ppc64_emit_mul,
3384 ppc64_emit_lsh,
3385 ppc64_emit_rsh_signed,
3386 ppc64_emit_rsh_unsigned,
3387 ppc64_emit_ext,
3388 ppc64_emit_log_not,
3389 ppc64_emit_bit_and,
3390 ppc64_emit_bit_or,
3391 ppc64_emit_bit_xor,
3392 ppc64_emit_bit_not,
3393 ppc64_emit_equal,
3394 ppc64_emit_less_signed,
3395 ppc64_emit_less_unsigned,
3396 ppc64_emit_ref,
3397 ppc64_emit_if_goto,
3398 ppc_emit_goto,
3399 ppc_write_goto_address,
3400 ppc64_emit_const,
3401 ppc64v2_emit_call,
3402 ppc64v2_emit_reg,
3403 ppc64_emit_pop,
3404 ppc64_emit_stack_flush,
3405 ppc64_emit_zero_ext,
3406 ppc64_emit_swap,
3407 ppc_emit_stack_adjust,
3408 ppc64v2_emit_int_call_1,
3409 ppc64v2_emit_void_call_2,
3410 ppc64_emit_eq_goto,
3411 ppc64_emit_ne_goto,
3412 ppc64_emit_lt_goto,
3413 ppc64_emit_le_goto,
3414 ppc64_emit_gt_goto,
3415 ppc64_emit_ge_goto
3416 };
3417
3418 #endif
3419
3420 /* Implementation of linux_target_ops method "emit_ops". */
3421
3422 static struct emit_ops *
3423 ppc_emit_ops (void)
3424 {
3425 #ifdef __powerpc64__
3426 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3427
3428 if (register_size (regcache->tdesc, 0) == 8)
3429 {
3430 if (is_elfv2_inferior ())
3431 return &ppc64v2_emit_ops_impl;
3432 else
3433 return &ppc64v1_emit_ops_impl;
3434 }
3435 #endif
3436 return &ppc_emit_ops_impl;
3437 }
3438
3439 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3440
3441 static int
3442 ppc_get_ipa_tdesc_idx (void)
3443 {
3444 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3445 const struct target_desc *tdesc = regcache->tdesc;
3446
3447 #ifdef __powerpc64__
3448 if (tdesc == tdesc_powerpc_64l)
3449 return PPC_TDESC_BASE;
3450 if (tdesc == tdesc_powerpc_altivec64l)
3451 return PPC_TDESC_ALTIVEC;
3452 if (tdesc == tdesc_powerpc_cell64l)
3453 return PPC_TDESC_CELL;
3454 if (tdesc == tdesc_powerpc_vsx64l)
3455 return PPC_TDESC_VSX;
3456 if (tdesc == tdesc_powerpc_isa205_64l)
3457 return PPC_TDESC_ISA205;
3458 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3459 return PPC_TDESC_ISA205_ALTIVEC;
3460 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3461 return PPC_TDESC_ISA205_VSX;
3462 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3463 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3464 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3465 return PPC_TDESC_ISA207_VSX;
3466 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3467 return PPC_TDESC_ISA207_HTM_VSX;
3468 #endif
3469
3470 if (tdesc == tdesc_powerpc_32l)
3471 return PPC_TDESC_BASE;
3472 if (tdesc == tdesc_powerpc_altivec32l)
3473 return PPC_TDESC_ALTIVEC;
3474 if (tdesc == tdesc_powerpc_cell32l)
3475 return PPC_TDESC_CELL;
3476 if (tdesc == tdesc_powerpc_vsx32l)
3477 return PPC_TDESC_VSX;
3478 if (tdesc == tdesc_powerpc_isa205_32l)
3479 return PPC_TDESC_ISA205;
3480 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3481 return PPC_TDESC_ISA205_ALTIVEC;
3482 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3483 return PPC_TDESC_ISA205_VSX;
3484 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3485 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3486 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3487 return PPC_TDESC_ISA207_VSX;
3488 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3489 return PPC_TDESC_ISA207_HTM_VSX;
3490 if (tdesc == tdesc_powerpc_e500l)
3491 return PPC_TDESC_E500;
3492
3493 return 0;
3494 }
3495
3496 struct linux_target_ops the_low_target = {
3497 ppc_arch_setup,
3498 ppc_regs_info,
3499 ppc_cannot_fetch_register,
3500 ppc_cannot_store_register,
3501 NULL, /* fetch_register */
3502 ppc_get_pc,
3503 ppc_set_pc,
3504 NULL, /* breakpoint_kind_from_pc */
3505 ppc_sw_breakpoint_from_kind,
3506 NULL,
3507 0,
3508 ppc_breakpoint_at,
3509 ppc_supports_z_point_type,
3510 ppc_insert_point,
3511 ppc_remove_point,
3512 NULL,
3513 NULL,
3514 ppc_collect_ptrace_register,
3515 ppc_supply_ptrace_register,
3516 NULL, /* siginfo_fixup */
3517 NULL, /* new_process */
3518 NULL, /* delete_process */
3519 NULL, /* new_thread */
3520 NULL, /* delete_thread */
3521 NULL, /* new_fork */
3522 NULL, /* prepare_to_resume */
3523 NULL, /* process_qsupported */
3524 ppc_supports_tracepoints,
3525 ppc_get_thread_area,
3526 ppc_install_fast_tracepoint_jump_pad,
3527 ppc_emit_ops,
3528 ppc_get_min_fast_tracepoint_insn_len,
3529 NULL, /* supports_range_stepping */
3530 NULL, /* breakpoint_kind_from_current_state */
3531 ppc_supports_hardware_single_step,
3532 NULL, /* get_syscall_trapinfo */
3533 ppc_get_ipa_tdesc_idx,
3534 };
3535
3536 void
3537 initialize_low_arch (void)
3538 {
3539 /* Initialize the Linux target descriptions. */
3540
3541 init_registers_powerpc_32l ();
3542 init_registers_powerpc_altivec32l ();
3543 init_registers_powerpc_cell32l ();
3544 init_registers_powerpc_vsx32l ();
3545 init_registers_powerpc_isa205_32l ();
3546 init_registers_powerpc_isa205_altivec32l ();
3547 init_registers_powerpc_isa205_vsx32l ();
3548 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3549 init_registers_powerpc_isa207_vsx32l ();
3550 init_registers_powerpc_isa207_htm_vsx32l ();
3551 init_registers_powerpc_e500l ();
3552 #if __powerpc64__
3553 init_registers_powerpc_64l ();
3554 init_registers_powerpc_altivec64l ();
3555 init_registers_powerpc_cell64l ();
3556 init_registers_powerpc_vsx64l ();
3557 init_registers_powerpc_isa205_64l ();
3558 init_registers_powerpc_isa205_altivec64l ();
3559 init_registers_powerpc_isa205_vsx64l ();
3560 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3561 init_registers_powerpc_isa207_vsx64l ();
3562 init_registers_powerpc_isa207_htm_vsx64l ();
3563 #endif
3564
3565 initialize_regsets_info (&ppc_regsets_info);
3566 }
This page took 0.140235 seconds and 5 git commands to generate.