a8f51eb78e0d18c7ae1f4fb0e664d1a642c4606b
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 protected:
58
59 void low_arch_setup () override;
60
61 bool low_cannot_fetch_register (int regno) override;
62
63 bool low_cannot_store_register (int regno) override;
64
65 bool low_supports_breakpoints () override;
66
67 CORE_ADDR low_get_pc (regcache *regcache) override;
68
69 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
70
71 bool low_breakpoint_at (CORE_ADDR pc) override;
72 };
73
74 /* The singleton target ops object. */
75
76 static ppc_target the_ppc_target;
77
78 /* Holds the AT_HWCAP auxv entry. */
79
80 static unsigned long ppc_hwcap;
81
82 /* Holds the AT_HWCAP2 auxv entry. */
83
84 static unsigned long ppc_hwcap2;
85
86
87 #define ppc_num_regs 73
88
89 #ifdef __powerpc64__
90 /* We use a constant for FPSCR instead of PT_FPSCR, because
91 many shipped PPC64 kernels had the wrong value in ptrace.h. */
92 static int ppc_regmap[] =
93 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
94 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
95 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
96 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
97 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
98 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
99 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
100 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
101 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
102 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
103 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
104 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
105 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
106 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
107 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
108 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
109 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
110 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
111 PT_ORIG_R3 * 8, PT_TRAP * 8 };
112 #else
113 /* Currently, don't check/send MQ. */
114 static int ppc_regmap[] =
115 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
116 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
117 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
118 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
119 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
120 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
121 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
122 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
123 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
124 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
125 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
126 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
127 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
128 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
129 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
130 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
131 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
132 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
133 PT_ORIG_R3 * 4, PT_TRAP * 4
134 };
135
136 static int ppc_regmap_e500[] =
137 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
138 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
139 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
140 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
141 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
142 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
143 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
144 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
145 -1, -1, -1, -1,
146 -1, -1, -1, -1,
147 -1, -1, -1, -1,
148 -1, -1, -1, -1,
149 -1, -1, -1, -1,
150 -1, -1, -1, -1,
151 -1, -1, -1, -1,
152 -1, -1, -1, -1,
153 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
154 PT_CTR * 4, PT_XER * 4, -1,
155 PT_ORIG_R3 * 4, PT_TRAP * 4
156 };
157 #endif
158
159 /* Check whether the kernel provides a register set with number
160 REGSET_ID of size REGSETSIZE for process/thread TID. */
161
162 static int
163 ppc_check_regset (int tid, int regset_id, int regsetsize)
164 {
165 void *buf = alloca (regsetsize);
166 struct iovec iov;
167
168 iov.iov_base = buf;
169 iov.iov_len = regsetsize;
170
171 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
172 || errno == ENODATA)
173 return 1;
174 return 0;
175 }
176
177 bool
178 ppc_target::low_cannot_store_register (int regno)
179 {
180 const struct target_desc *tdesc = current_process ()->tdesc;
181
182 #ifndef __powerpc64__
183 /* Some kernels do not allow us to store fpscr. */
184 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
185 && regno == find_regno (tdesc, "fpscr"))
186 return true;
187 #endif
188
189 /* Some kernels do not allow us to store orig_r3 or trap. */
190 if (regno == find_regno (tdesc, "orig_r3")
191 || regno == find_regno (tdesc, "trap"))
192 return true;
193
194 return false;
195 }
196
197 bool
198 ppc_target::low_cannot_fetch_register (int regno)
199 {
200 return false;
201 }
202
203 static void
204 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
205 {
206 memset (buf, 0, sizeof (long));
207
208 if (__BYTE_ORDER == __LITTLE_ENDIAN)
209 {
210 /* Little-endian values always sit at the left end of the buffer. */
211 collect_register (regcache, regno, buf);
212 }
213 else if (__BYTE_ORDER == __BIG_ENDIAN)
214 {
215 /* Big-endian values sit at the right end of the buffer. In case of
216 registers whose sizes are smaller than sizeof (long), we must use a
217 padding to access them correctly. */
218 int size = register_size (regcache->tdesc, regno);
219
220 if (size < sizeof (long))
221 collect_register (regcache, regno, buf + sizeof (long) - size);
222 else
223 collect_register (regcache, regno, buf);
224 }
225 else
226 perror_with_name ("Unexpected byte order");
227 }
228
229 static void
230 ppc_supply_ptrace_register (struct regcache *regcache,
231 int regno, const char *buf)
232 {
233 if (__BYTE_ORDER == __LITTLE_ENDIAN)
234 {
235 /* Little-endian values always sit at the left end of the buffer. */
236 supply_register (regcache, regno, buf);
237 }
238 else if (__BYTE_ORDER == __BIG_ENDIAN)
239 {
240 /* Big-endian values sit at the right end of the buffer. In case of
241 registers whose sizes are smaller than sizeof (long), we must use a
242 padding to access them correctly. */
243 int size = register_size (regcache->tdesc, regno);
244
245 if (size < sizeof (long))
246 supply_register (regcache, regno, buf + sizeof (long) - size);
247 else
248 supply_register (regcache, regno, buf);
249 }
250 else
251 perror_with_name ("Unexpected byte order");
252 }
253
254 bool
255 ppc_target::low_supports_breakpoints ()
256 {
257 return true;
258 }
259
260 CORE_ADDR
261 ppc_target::low_get_pc (regcache *regcache)
262 {
263 if (register_size (regcache->tdesc, 0) == 4)
264 {
265 unsigned int pc;
266 collect_register_by_name (regcache, "pc", &pc);
267 return (CORE_ADDR) pc;
268 }
269 else
270 {
271 unsigned long pc;
272 collect_register_by_name (regcache, "pc", &pc);
273 return (CORE_ADDR) pc;
274 }
275 }
276
277 void
278 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
279 {
280 if (register_size (regcache->tdesc, 0) == 4)
281 {
282 unsigned int newpc = pc;
283 supply_register_by_name (regcache, "pc", &newpc);
284 }
285 else
286 {
287 unsigned long newpc = pc;
288 supply_register_by_name (regcache, "pc", &newpc);
289 }
290 }
291
292 #ifndef __powerpc64__
293 static int ppc_regmap_adjusted;
294 #endif
295
296
297 /* Correct in either endianness.
298 This instruction is "twge r2, r2", which GDB uses as a software
299 breakpoint. */
300 static const unsigned int ppc_breakpoint = 0x7d821008;
301 #define ppc_breakpoint_len 4
302
303 /* Implementation of target ops method "sw_breakpoint_from_kind". */
304
305 const gdb_byte *
306 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
307 {
308 *size = ppc_breakpoint_len;
309 return (const gdb_byte *) &ppc_breakpoint;
310 }
311
312 bool
313 ppc_target::low_breakpoint_at (CORE_ADDR where)
314 {
315 unsigned int insn;
316
317 read_memory (where, (unsigned char *) &insn, 4);
318 if (insn == ppc_breakpoint)
319 return true;
320 /* If necessary, recognize more trap instructions here. GDB only uses
321 the one. */
322
323 return false;
324 }
325
326 /* Implement supports_z_point_type target-ops.
327 Returns true if type Z_TYPE breakpoint is supported.
328
329 Handling software breakpoint at server side, so tracepoints
330 and breakpoints can be inserted at the same location. */
331
332 static int
333 ppc_supports_z_point_type (char z_type)
334 {
335 switch (z_type)
336 {
337 case Z_PACKET_SW_BP:
338 return 1;
339 case Z_PACKET_HW_BP:
340 case Z_PACKET_WRITE_WP:
341 case Z_PACKET_ACCESS_WP:
342 default:
343 return 0;
344 }
345 }
346
347 /* Implement insert_point target-ops.
348 Returns 0 on success, -1 on failure and 1 on unsupported. */
349
350 static int
351 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
352 int size, struct raw_breakpoint *bp)
353 {
354 switch (type)
355 {
356 case raw_bkpt_type_sw:
357 return insert_memory_breakpoint (bp);
358
359 case raw_bkpt_type_hw:
360 case raw_bkpt_type_write_wp:
361 case raw_bkpt_type_access_wp:
362 default:
363 /* Unsupported. */
364 return 1;
365 }
366 }
367
368 /* Implement remove_point target-ops.
369 Returns 0 on success, -1 on failure and 1 on unsupported. */
370
371 static int
372 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
373 int size, struct raw_breakpoint *bp)
374 {
375 switch (type)
376 {
377 case raw_bkpt_type_sw:
378 return remove_memory_breakpoint (bp);
379
380 case raw_bkpt_type_hw:
381 case raw_bkpt_type_write_wp:
382 case raw_bkpt_type_access_wp:
383 default:
384 /* Unsupported. */
385 return 1;
386 }
387 }
388
389 /* Provide only a fill function for the general register set. ps_lgetregs
390 will use this for NPTL support. */
391
392 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
393 {
394 int i;
395
396 for (i = 0; i < 32; i++)
397 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
398
399 for (i = 64; i < 70; i++)
400 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
401
402 for (i = 71; i < 73; i++)
403 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
404 }
405
406 /* Program Priority Register regset fill function. */
407
408 static void
409 ppc_fill_pprregset (struct regcache *regcache, void *buf)
410 {
411 char *ppr = (char *) buf;
412
413 collect_register_by_name (regcache, "ppr", ppr);
414 }
415
416 /* Program Priority Register regset store function. */
417
418 static void
419 ppc_store_pprregset (struct regcache *regcache, const void *buf)
420 {
421 const char *ppr = (const char *) buf;
422
423 supply_register_by_name (regcache, "ppr", ppr);
424 }
425
426 /* Data Stream Control Register regset fill function. */
427
428 static void
429 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
430 {
431 char *dscr = (char *) buf;
432
433 collect_register_by_name (regcache, "dscr", dscr);
434 }
435
436 /* Data Stream Control Register regset store function. */
437
438 static void
439 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
440 {
441 const char *dscr = (const char *) buf;
442
443 supply_register_by_name (regcache, "dscr", dscr);
444 }
445
446 /* Target Address Register regset fill function. */
447
448 static void
449 ppc_fill_tarregset (struct regcache *regcache, void *buf)
450 {
451 char *tar = (char *) buf;
452
453 collect_register_by_name (regcache, "tar", tar);
454 }
455
456 /* Target Address Register regset store function. */
457
458 static void
459 ppc_store_tarregset (struct regcache *regcache, const void *buf)
460 {
461 const char *tar = (const char *) buf;
462
463 supply_register_by_name (regcache, "tar", tar);
464 }
465
466 /* Event-Based Branching regset store function. Unless the inferior
467 has a perf event open, ptrace can return in error when reading and
468 writing to the regset, with ENODATA. For reading, the registers
469 will correctly show as unavailable. For writing, gdbserver
470 currently only caches any register writes from P and G packets and
471 the stub always tries to write all the regsets when resuming the
472 inferior, which would result in frequent warnings. For this
473 reason, we don't define a fill function. This also means that the
474 client-side regcache will be dirty if the user tries to write to
475 the EBB registers. G packets that the client sends to write to
476 unrelated registers will also include data for EBB registers, even
477 if they are unavailable. */
478
479 static void
480 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
481 {
482 const char *regset = (const char *) buf;
483
484 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
485 .dat file is BESCR, EBBHR, EBBRR. */
486 supply_register_by_name (regcache, "ebbrr", &regset[0]);
487 supply_register_by_name (regcache, "ebbhr", &regset[8]);
488 supply_register_by_name (regcache, "bescr", &regset[16]);
489 }
490
491 /* Performance Monitoring Unit regset fill function. */
492
493 static void
494 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
495 {
496 char *regset = (char *) buf;
497
498 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
499 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
500 collect_register_by_name (regcache, "siar", &regset[0]);
501 collect_register_by_name (regcache, "sdar", &regset[8]);
502 collect_register_by_name (regcache, "sier", &regset[16]);
503 collect_register_by_name (regcache, "mmcr2", &regset[24]);
504 collect_register_by_name (regcache, "mmcr0", &regset[32]);
505 }
506
507 /* Performance Monitoring Unit regset store function. */
508
509 static void
510 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
511 {
512 const char *regset = (const char *) buf;
513
514 supply_register_by_name (regcache, "siar", &regset[0]);
515 supply_register_by_name (regcache, "sdar", &regset[8]);
516 supply_register_by_name (regcache, "sier", &regset[16]);
517 supply_register_by_name (regcache, "mmcr2", &regset[24]);
518 supply_register_by_name (regcache, "mmcr0", &regset[32]);
519 }
520
521 /* Hardware Transactional Memory special-purpose register regset fill
522 function. */
523
524 static void
525 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
526 {
527 int i, base;
528 char *regset = (char *) buf;
529
530 base = find_regno (regcache->tdesc, "tfhar");
531 for (i = 0; i < 3; i++)
532 collect_register (regcache, base + i, &regset[i * 8]);
533 }
534
535 /* Hardware Transactional Memory special-purpose register regset store
536 function. */
537
538 static void
539 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
540 {
541 int i, base;
542 const char *regset = (const char *) buf;
543
544 base = find_regno (regcache->tdesc, "tfhar");
545 for (i = 0; i < 3; i++)
546 supply_register (regcache, base + i, &regset[i * 8]);
547 }
548
549 /* For the same reasons as the EBB regset, none of the HTM
550 checkpointed regsets have a fill function. These registers are
551 only available if the inferior is in a transaction. */
552
553 /* Hardware Transactional Memory checkpointed general-purpose regset
554 store function. */
555
556 static void
557 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
558 {
559 int i, base, size, endian_offset;
560 const char *regset = (const char *) buf;
561
562 base = find_regno (regcache->tdesc, "cr0");
563 size = register_size (regcache->tdesc, base);
564
565 gdb_assert (size == 4 || size == 8);
566
567 for (i = 0; i < 32; i++)
568 supply_register (regcache, base + i, &regset[i * size]);
569
570 endian_offset = 0;
571
572 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
573 endian_offset = 4;
574
575 supply_register_by_name (regcache, "ccr",
576 &regset[PT_CCR * size + endian_offset]);
577
578 supply_register_by_name (regcache, "cxer",
579 &regset[PT_XER * size + endian_offset]);
580
581 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
582 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
583 }
584
585 /* Hardware Transactional Memory checkpointed floating-point regset
586 store function. */
587
588 static void
589 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
590 {
591 int i, base;
592 const char *regset = (const char *) buf;
593
594 base = find_regno (regcache->tdesc, "cf0");
595
596 for (i = 0; i < 32; i++)
597 supply_register (regcache, base + i, &regset[i * 8]);
598
599 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
600 }
601
602 /* Hardware Transactional Memory checkpointed vector regset store
603 function. */
604
605 static void
606 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
607 {
608 int i, base;
609 const char *regset = (const char *) buf;
610 int vscr_offset = 0;
611
612 base = find_regno (regcache->tdesc, "cvr0");
613
614 for (i = 0; i < 32; i++)
615 supply_register (regcache, base + i, &regset[i * 16]);
616
617 if (__BYTE_ORDER == __BIG_ENDIAN)
618 vscr_offset = 12;
619
620 supply_register_by_name (regcache, "cvscr",
621 &regset[32 * 16 + vscr_offset]);
622
623 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
624 }
625
626 /* Hardware Transactional Memory checkpointed vector-scalar regset
627 store function. */
628
629 static void
630 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
631 {
632 int i, base;
633 const char *regset = (const char *) buf;
634
635 base = find_regno (regcache->tdesc, "cvs0h");
636 for (i = 0; i < 32; i++)
637 supply_register (regcache, base + i, &regset[i * 8]);
638 }
639
640 /* Hardware Transactional Memory checkpointed Program Priority
641 Register regset store function. */
642
643 static void
644 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
645 {
646 const char *cppr = (const char *) buf;
647
648 supply_register_by_name (regcache, "cppr", cppr);
649 }
650
651 /* Hardware Transactional Memory checkpointed Data Stream Control
652 Register regset store function. */
653
654 static void
655 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
656 {
657 const char *cdscr = (const char *) buf;
658
659 supply_register_by_name (regcache, "cdscr", cdscr);
660 }
661
662 /* Hardware Transactional Memory checkpointed Target Address Register
663 regset store function. */
664
665 static void
666 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
667 {
668 const char *ctar = (const char *) buf;
669
670 supply_register_by_name (regcache, "ctar", ctar);
671 }
672
673 static void
674 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
675 {
676 int i, base;
677 char *regset = (char *) buf;
678
679 base = find_regno (regcache->tdesc, "vs0h");
680 for (i = 0; i < 32; i++)
681 collect_register (regcache, base + i, &regset[i * 8]);
682 }
683
684 static void
685 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
686 {
687 int i, base;
688 const char *regset = (const char *) buf;
689
690 base = find_regno (regcache->tdesc, "vs0h");
691 for (i = 0; i < 32; i++)
692 supply_register (regcache, base + i, &regset[i * 8]);
693 }
694
695 static void
696 ppc_fill_vrregset (struct regcache *regcache, void *buf)
697 {
698 int i, base;
699 char *regset = (char *) buf;
700 int vscr_offset = 0;
701
702 base = find_regno (regcache->tdesc, "vr0");
703 for (i = 0; i < 32; i++)
704 collect_register (regcache, base + i, &regset[i * 16]);
705
706 if (__BYTE_ORDER == __BIG_ENDIAN)
707 vscr_offset = 12;
708
709 collect_register_by_name (regcache, "vscr",
710 &regset[32 * 16 + vscr_offset]);
711
712 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
713 }
714
715 static void
716 ppc_store_vrregset (struct regcache *regcache, const void *buf)
717 {
718 int i, base;
719 const char *regset = (const char *) buf;
720 int vscr_offset = 0;
721
722 base = find_regno (regcache->tdesc, "vr0");
723 for (i = 0; i < 32; i++)
724 supply_register (regcache, base + i, &regset[i * 16]);
725
726 if (__BYTE_ORDER == __BIG_ENDIAN)
727 vscr_offset = 12;
728
729 supply_register_by_name (regcache, "vscr",
730 &regset[32 * 16 + vscr_offset]);
731 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
732 }
733
734 struct gdb_evrregset_t
735 {
736 unsigned long evr[32];
737 unsigned long long acc;
738 unsigned long spefscr;
739 };
740
741 static void
742 ppc_fill_evrregset (struct regcache *regcache, void *buf)
743 {
744 int i, ev0;
745 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
746
747 ev0 = find_regno (regcache->tdesc, "ev0h");
748 for (i = 0; i < 32; i++)
749 collect_register (regcache, ev0 + i, &regset->evr[i]);
750
751 collect_register_by_name (regcache, "acc", &regset->acc);
752 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
753 }
754
755 static void
756 ppc_store_evrregset (struct regcache *regcache, const void *buf)
757 {
758 int i, ev0;
759 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
760
761 ev0 = find_regno (regcache->tdesc, "ev0h");
762 for (i = 0; i < 32; i++)
763 supply_register (regcache, ev0 + i, &regset->evr[i]);
764
765 supply_register_by_name (regcache, "acc", &regset->acc);
766 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
767 }
768
769 /* Support for hardware single step. */
770
771 static int
772 ppc_supports_hardware_single_step (void)
773 {
774 return 1;
775 }
776
777 static struct regset_info ppc_regsets[] = {
778 /* List the extra register sets before GENERAL_REGS. That way we will
779 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
780 general registers. Some kernels support these, but not the newer
781 PPC_PTRACE_GETREGS. */
782 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
783 NULL, ppc_store_tm_ctarregset },
784 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
785 NULL, ppc_store_tm_cdscrregset },
786 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
787 NULL, ppc_store_tm_cpprregset },
788 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
789 NULL, ppc_store_tm_cvsxregset },
790 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
791 NULL, ppc_store_tm_cvrregset },
792 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
793 NULL, ppc_store_tm_cfprregset },
794 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
795 NULL, ppc_store_tm_cgprregset },
796 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
797 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
798 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
799 NULL, ppc_store_ebbregset },
800 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
801 ppc_fill_pmuregset, ppc_store_pmuregset },
802 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
803 ppc_fill_tarregset, ppc_store_tarregset },
804 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
805 ppc_fill_pprregset, ppc_store_pprregset },
806 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
807 ppc_fill_dscrregset, ppc_store_dscrregset },
808 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
809 ppc_fill_vsxregset, ppc_store_vsxregset },
810 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
811 ppc_fill_vrregset, ppc_store_vrregset },
812 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
813 ppc_fill_evrregset, ppc_store_evrregset },
814 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
815 NULL_REGSET
816 };
817
818 static struct usrregs_info ppc_usrregs_info =
819 {
820 ppc_num_regs,
821 ppc_regmap,
822 };
823
824 static struct regsets_info ppc_regsets_info =
825 {
826 ppc_regsets, /* regsets */
827 0, /* num_regsets */
828 NULL, /* disabled_regsets */
829 };
830
831 static struct regs_info myregs_info =
832 {
833 NULL, /* regset_bitmap */
834 &ppc_usrregs_info,
835 &ppc_regsets_info
836 };
837
838 const regs_info *
839 ppc_target::get_regs_info ()
840 {
841 return &myregs_info;
842 }
843
844 void
845 ppc_target::low_arch_setup ()
846 {
847 const struct target_desc *tdesc;
848 struct regset_info *regset;
849 struct ppc_linux_features features = ppc_linux_no_features;
850
851 int tid = lwpid_of (current_thread);
852
853 features.wordsize = ppc_linux_target_wordsize (tid);
854
855 if (features.wordsize == 4)
856 tdesc = tdesc_powerpc_32l;
857 else
858 tdesc = tdesc_powerpc_64l;
859
860 current_process ()->tdesc = tdesc;
861
862 /* The value of current_process ()->tdesc needs to be set for this
863 call. */
864 ppc_hwcap = linux_get_hwcap (features.wordsize);
865 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
866
867 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
868
869 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
870 features.vsx = true;
871
872 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
873 features.altivec = true;
874
875 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
876 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
877 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
878 {
879 features.ppr_dscr = true;
880 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
881 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
882 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
883 && ppc_check_regset (tid, NT_PPC_TAR,
884 PPC_LINUX_SIZEOF_TARREGSET)
885 && ppc_check_regset (tid, NT_PPC_EBB,
886 PPC_LINUX_SIZEOF_EBBREGSET)
887 && ppc_check_regset (tid, NT_PPC_PMU,
888 PPC_LINUX_SIZEOF_PMUREGSET))
889 {
890 features.isa207 = true;
891 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
892 && ppc_check_regset (tid, NT_PPC_TM_SPR,
893 PPC_LINUX_SIZEOF_TM_SPRREGSET))
894 features.htm = true;
895 }
896 }
897
898 tdesc = ppc_linux_match_description (features);
899
900 /* On 32-bit machines, check for SPE registers.
901 Set the low target's regmap field as appropriately. */
902 #ifndef __powerpc64__
903 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
904 tdesc = tdesc_powerpc_e500l;
905
906 if (!ppc_regmap_adjusted)
907 {
908 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
909 ppc_usrregs_info.regmap = ppc_regmap_e500;
910
911 /* If the FPSCR is 64-bit wide, we need to fetch the whole
912 64-bit slot and not just its second word. The PT_FPSCR
913 supplied in a 32-bit GDB compilation doesn't reflect
914 this. */
915 if (register_size (tdesc, 70) == 8)
916 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
917
918 ppc_regmap_adjusted = 1;
919 }
920 #endif
921
922 current_process ()->tdesc = tdesc;
923
924 for (regset = ppc_regsets; regset->size >= 0; regset++)
925 switch (regset->get_request)
926 {
927 case PTRACE_GETVRREGS:
928 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
929 break;
930 case PTRACE_GETVSXREGS:
931 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
932 break;
933 case PTRACE_GETEVRREGS:
934 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
935 regset->size = 32 * 4 + 8 + 4;
936 else
937 regset->size = 0;
938 break;
939 case PTRACE_GETREGSET:
940 switch (regset->nt_type)
941 {
942 case NT_PPC_PPR:
943 regset->size = (features.ppr_dscr ?
944 PPC_LINUX_SIZEOF_PPRREGSET : 0);
945 break;
946 case NT_PPC_DSCR:
947 regset->size = (features.ppr_dscr ?
948 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
949 break;
950 case NT_PPC_TAR:
951 regset->size = (features.isa207 ?
952 PPC_LINUX_SIZEOF_TARREGSET : 0);
953 break;
954 case NT_PPC_EBB:
955 regset->size = (features.isa207 ?
956 PPC_LINUX_SIZEOF_EBBREGSET : 0);
957 break;
958 case NT_PPC_PMU:
959 regset->size = (features.isa207 ?
960 PPC_LINUX_SIZEOF_PMUREGSET : 0);
961 break;
962 case NT_PPC_TM_SPR:
963 regset->size = (features.htm ?
964 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
965 break;
966 case NT_PPC_TM_CGPR:
967 if (features.wordsize == 4)
968 regset->size = (features.htm ?
969 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
970 else
971 regset->size = (features.htm ?
972 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
973 break;
974 case NT_PPC_TM_CFPR:
975 regset->size = (features.htm ?
976 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
977 break;
978 case NT_PPC_TM_CVMX:
979 regset->size = (features.htm ?
980 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
981 break;
982 case NT_PPC_TM_CVSX:
983 regset->size = (features.htm ?
984 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
985 break;
986 case NT_PPC_TM_CPPR:
987 regset->size = (features.htm ?
988 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
989 break;
990 case NT_PPC_TM_CDSCR:
991 regset->size = (features.htm ?
992 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
993 break;
994 case NT_PPC_TM_CTAR:
995 regset->size = (features.htm ?
996 PPC_LINUX_SIZEOF_CTARREGSET : 0);
997 break;
998 default:
999 break;
1000 }
1001 break;
1002 default:
1003 break;
1004 }
1005 }
1006
1007 /* Implementation of linux_target_ops method "supports_tracepoints". */
1008
1009 static int
1010 ppc_supports_tracepoints (void)
1011 {
1012 return 1;
1013 }
1014
1015 /* Get the thread area address. This is used to recognize which
1016 thread is which when tracing with the in-process agent library. We
1017 don't read anything from the address, and treat it as opaque; it's
1018 the address itself that we assume is unique per-thread. */
1019
1020 static int
1021 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1022 {
1023 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1024 struct thread_info *thr = get_lwp_thread (lwp);
1025 struct regcache *regcache = get_thread_regcache (thr, 1);
1026 ULONGEST tp = 0;
1027
1028 #ifdef __powerpc64__
1029 if (register_size (regcache->tdesc, 0) == 8)
1030 collect_register_by_name (regcache, "r13", &tp);
1031 else
1032 #endif
1033 collect_register_by_name (regcache, "r2", &tp);
1034
1035 *addr = tp;
1036
1037 return 0;
1038 }
1039
1040 #ifdef __powerpc64__
1041
1042 /* Older glibc doesn't provide this. */
1043
1044 #ifndef EF_PPC64_ABI
1045 #define EF_PPC64_ABI 3
1046 #endif
1047
1048 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1049 inferiors. */
1050
1051 static int
1052 is_elfv2_inferior (void)
1053 {
1054 /* To be used as fallback if we're unable to determine the right result -
1055 assume inferior uses the same ABI as gdbserver. */
1056 #if _CALL_ELF == 2
1057 const int def_res = 1;
1058 #else
1059 const int def_res = 0;
1060 #endif
1061 CORE_ADDR phdr;
1062 Elf64_Ehdr ehdr;
1063
1064 const struct target_desc *tdesc = current_process ()->tdesc;
1065 int wordsize = register_size (tdesc, 0);
1066
1067 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1068 return def_res;
1069
1070 /* Assume ELF header is at the beginning of the page where program headers
1071 are located. If it doesn't look like one, bail. */
1072
1073 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1074 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1075 return def_res;
1076
1077 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1078 }
1079
1080 #endif
1081
1082 /* Generate a ds-form instruction in BUF and return the number of bytes written
1083
1084 0 6 11 16 30 32
1085 | OPCD | RST | RA | DS |XO| */
1086
1087 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1088 static int
1089 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1090 {
1091 uint32_t insn;
1092
1093 gdb_assert ((opcd & ~0x3f) == 0);
1094 gdb_assert ((rst & ~0x1f) == 0);
1095 gdb_assert ((ra & ~0x1f) == 0);
1096 gdb_assert ((xo & ~0x3) == 0);
1097
1098 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1099 *buf = (opcd << 26) | insn;
1100 return 1;
1101 }
1102
1103 /* Followings are frequently used ds-form instructions. */
1104
1105 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1106 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1107 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1108 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1109
1110 /* Generate a d-form instruction in BUF.
1111
1112 0 6 11 16 32
1113 | OPCD | RST | RA | D | */
1114
1115 static int
1116 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1117 {
1118 uint32_t insn;
1119
1120 gdb_assert ((opcd & ~0x3f) == 0);
1121 gdb_assert ((rst & ~0x1f) == 0);
1122 gdb_assert ((ra & ~0x1f) == 0);
1123
1124 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1125 *buf = (opcd << 26) | insn;
1126 return 1;
1127 }
1128
1129 /* Followings are frequently used d-form instructions. */
1130
1131 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1132 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1133 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1134 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1135 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1136 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1137 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1138 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1139 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1140
1141 /* Generate a xfx-form instruction in BUF and return the number of bytes
1142 written.
1143
1144 0 6 11 21 31 32
1145 | OPCD | RST | RI | XO |/| */
1146
1147 static int
1148 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1149 {
1150 uint32_t insn;
1151 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1152
1153 gdb_assert ((opcd & ~0x3f) == 0);
1154 gdb_assert ((rst & ~0x1f) == 0);
1155 gdb_assert ((xo & ~0x3ff) == 0);
1156
1157 insn = (rst << 21) | (n << 11) | (xo << 1);
1158 *buf = (opcd << 26) | insn;
1159 return 1;
1160 }
1161
1162 /* Followings are frequently used xfx-form instructions. */
1163
1164 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1165 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1166 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1167 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1168 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1169 E & 0xf, 598)
1170 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1171
1172
1173 /* Generate a x-form instruction in BUF and return the number of bytes written.
1174
1175 0 6 11 16 21 31 32
1176 | OPCD | RST | RA | RB | XO |RC| */
1177
1178 static int
1179 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1180 {
1181 uint32_t insn;
1182
1183 gdb_assert ((opcd & ~0x3f) == 0);
1184 gdb_assert ((rst & ~0x1f) == 0);
1185 gdb_assert ((ra & ~0x1f) == 0);
1186 gdb_assert ((rb & ~0x1f) == 0);
1187 gdb_assert ((xo & ~0x3ff) == 0);
1188 gdb_assert ((rc & ~1) == 0);
1189
1190 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1191 *buf = (opcd << 26) | insn;
1192 return 1;
1193 }
1194
1195 /* Followings are frequently used x-form instructions. */
1196
1197 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1198 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1199 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1200 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1201 /* Assume bf = cr7. */
1202 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1203
1204
1205 /* Generate a md-form instruction in BUF and return the number of bytes written.
1206
1207 0 6 11 16 21 27 30 31 32
1208 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1209
1210 static int
1211 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1212 int xo, int rc)
1213 {
1214 uint32_t insn;
1215 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1216 unsigned int sh0_4 = sh & 0x1f;
1217 unsigned int sh5 = (sh >> 5) & 1;
1218
1219 gdb_assert ((opcd & ~0x3f) == 0);
1220 gdb_assert ((rs & ~0x1f) == 0);
1221 gdb_assert ((ra & ~0x1f) == 0);
1222 gdb_assert ((sh & ~0x3f) == 0);
1223 gdb_assert ((mb & ~0x3f) == 0);
1224 gdb_assert ((xo & ~0x7) == 0);
1225 gdb_assert ((rc & ~0x1) == 0);
1226
1227 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1228 | (sh5 << 1) | (xo << 2) | (rc & 1);
1229 *buf = (opcd << 26) | insn;
1230 return 1;
1231 }
1232
1233 /* The following are frequently used md-form instructions. */
1234
1235 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1236 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1237 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1238 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1239
1240 /* Generate a i-form instruction in BUF and return the number of bytes written.
1241
1242 0 6 30 31 32
1243 | OPCD | LI |AA|LK| */
1244
1245 static int
1246 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1247 {
1248 uint32_t insn;
1249
1250 gdb_assert ((opcd & ~0x3f) == 0);
1251
1252 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1253 *buf = (opcd << 26) | insn;
1254 return 1;
1255 }
1256
1257 /* The following are frequently used i-form instructions. */
1258
1259 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1260 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1261
1262 /* Generate a b-form instruction in BUF and return the number of bytes written.
1263
1264 0 6 11 16 30 31 32
1265 | OPCD | BO | BI | BD |AA|LK| */
1266
1267 static int
1268 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1269 int aa, int lk)
1270 {
1271 uint32_t insn;
1272
1273 gdb_assert ((opcd & ~0x3f) == 0);
1274 gdb_assert ((bo & ~0x1f) == 0);
1275 gdb_assert ((bi & ~0x1f) == 0);
1276
1277 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1278 *buf = (opcd << 26) | insn;
1279 return 1;
1280 }
1281
1282 /* The following are frequently used b-form instructions. */
1283 /* Assume bi = cr7. */
1284 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1285
1286 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1287 respectively. They are primary used for save/restore GPRs in jump-pad,
1288 not used for bytecode compiling. */
1289
1290 #ifdef __powerpc64__
1291 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1292 GEN_LD (buf, rt, ra, si) : \
1293 GEN_LWZ (buf, rt, ra, si))
1294 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1295 GEN_STD (buf, rt, ra, si) : \
1296 GEN_STW (buf, rt, ra, si))
1297 #else
1298 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1299 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1300 #endif
1301
1302 /* Generate a sequence of instructions to load IMM in the register REG.
1303 Write the instructions in BUF and return the number of bytes written. */
1304
1305 static int
1306 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1307 {
1308 uint32_t *p = buf;
1309
1310 if ((imm + 32768) < 65536)
1311 {
1312 /* li reg, imm[15:0] */
1313 p += GEN_LI (p, reg, imm);
1314 }
1315 else if ((imm >> 32) == 0)
1316 {
1317 /* lis reg, imm[31:16]
1318 ori reg, reg, imm[15:0]
1319 rldicl reg, reg, 0, 32 */
1320 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1321 if ((imm & 0xffff) != 0)
1322 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1323 /* Clear upper 32-bit if sign-bit is set. */
1324 if (imm & (1u << 31) && is_64)
1325 p += GEN_RLDICL (p, reg, reg, 0, 32);
1326 }
1327 else
1328 {
1329 gdb_assert (is_64);
1330 /* lis reg, <imm[63:48]>
1331 ori reg, reg, <imm[48:32]>
1332 rldicr reg, reg, 32, 31
1333 oris reg, reg, <imm[31:16]>
1334 ori reg, reg, <imm[15:0]> */
1335 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1336 if (((imm >> 32) & 0xffff) != 0)
1337 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1338 p += GEN_RLDICR (p, reg, reg, 32, 31);
1339 if (((imm >> 16) & 0xffff) != 0)
1340 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1341 if ((imm & 0xffff) != 0)
1342 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1343 }
1344
1345 return p - buf;
1346 }
1347
1348 /* Generate a sequence for atomically exchange at location LOCK.
1349 This code sequence clobbers r6, r7, r8. LOCK is the location for
1350 the atomic-xchg, OLD_VALUE is expected old value stored in the
1351 location, and R_NEW is a register for the new value. */
1352
1353 static int
1354 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1355 int is_64)
1356 {
1357 const int r_lock = 6;
1358 const int r_old = 7;
1359 const int r_tmp = 8;
1360 uint32_t *p = buf;
1361
1362 /*
1363 1: lwarx TMP, 0, LOCK
1364 cmpwi TMP, OLD
1365 bne 1b
1366 stwcx. NEW, 0, LOCK
1367 bne 1b */
1368
1369 p += gen_limm (p, r_lock, lock, is_64);
1370 p += gen_limm (p, r_old, old_value, is_64);
1371
1372 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1373 p += GEN_CMPW (p, r_tmp, r_old);
1374 p += GEN_BNE (p, -8);
1375 p += GEN_STWCX (p, r_new, 0, r_lock);
1376 p += GEN_BNE (p, -16);
1377
1378 return p - buf;
1379 }
1380
1381 /* Generate a sequence of instructions for calling a function
1382 at address of FN. Return the number of bytes are written in BUF. */
1383
1384 static int
1385 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1386 {
1387 uint32_t *p = buf;
1388
1389 /* Must be called by r12 for caller to calculate TOC address. */
1390 p += gen_limm (p, 12, fn, is_64);
1391 if (is_opd)
1392 {
1393 p += GEN_LOAD (p, 11, 12, 16, is_64);
1394 p += GEN_LOAD (p, 2, 12, 8, is_64);
1395 p += GEN_LOAD (p, 12, 12, 0, is_64);
1396 }
1397 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1398 *p++ = 0x4e800421; /* bctrl */
1399
1400 return p - buf;
1401 }
1402
1403 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1404 of instruction. This function is used to adjust pc-relative instructions
1405 when copying. */
1406
1407 static void
1408 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1409 {
1410 uint32_t insn, op6;
1411 long rel, newrel;
1412
1413 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1414 op6 = PPC_OP6 (insn);
1415
1416 if (op6 == 18 && (insn & 2) == 0)
1417 {
1418 /* branch && AA = 0 */
1419 rel = PPC_LI (insn);
1420 newrel = (oldloc - *to) + rel;
1421
1422 /* Out of range. Cannot relocate instruction. */
1423 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1424 return;
1425
1426 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1427 }
1428 else if (op6 == 16 && (insn & 2) == 0)
1429 {
1430 /* conditional branch && AA = 0 */
1431
1432 /* If the new relocation is too big for even a 26-bit unconditional
1433 branch, there is nothing we can do. Just abort.
1434
1435 Otherwise, if it can be fit in 16-bit conditional branch, just
1436 copy the instruction and relocate the address.
1437
1438 If the it's big for conditional-branch (16-bit), try to invert the
1439 condition and jump with 26-bit branch. For example,
1440
1441 beq .Lgoto
1442 INSN1
1443
1444 =>
1445
1446 bne 1f (+8)
1447 b .Lgoto
1448 1:INSN1
1449
1450 After this transform, we are actually jump from *TO+4 instead of *TO,
1451 so check the relocation again because it will be 1-insn farther then
1452 before if *TO is after OLDLOC.
1453
1454
1455 For BDNZT (or so) is transformed from
1456
1457 bdnzt eq, .Lgoto
1458 INSN1
1459
1460 =>
1461
1462 bdz 1f (+12)
1463 bf eq, 1f (+8)
1464 b .Lgoto
1465 1:INSN1
1466
1467 See also "BO field encodings". */
1468
1469 rel = PPC_BD (insn);
1470 newrel = (oldloc - *to) + rel;
1471
1472 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1473 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1474 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1475 {
1476 newrel -= 4;
1477
1478 /* Out of range. Cannot relocate instruction. */
1479 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1480 return;
1481
1482 if ((PPC_BO (insn) & 0x14) == 0x4)
1483 insn ^= (1 << 24);
1484 else if ((PPC_BO (insn) & 0x14) == 0x10)
1485 insn ^= (1 << 22);
1486
1487 /* Jump over the unconditional branch. */
1488 insn = (insn & ~0xfffc) | 0x8;
1489 target_write_memory (*to, (unsigned char *) &insn, 4);
1490 *to += 4;
1491
1492 /* Build a unconditional branch and copy LK bit. */
1493 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1494 target_write_memory (*to, (unsigned char *) &insn, 4);
1495 *to += 4;
1496
1497 return;
1498 }
1499 else if ((PPC_BO (insn) & 0x14) == 0)
1500 {
1501 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1502 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1503
1504 newrel -= 8;
1505
1506 /* Out of range. Cannot relocate instruction. */
1507 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1508 return;
1509
1510 /* Copy BI field. */
1511 bf_insn |= (insn & 0x1f0000);
1512
1513 /* Invert condition. */
1514 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1515 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1516
1517 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1518 *to += 4;
1519 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1520 *to += 4;
1521
1522 /* Build a unconditional branch and copy LK bit. */
1523 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1524 target_write_memory (*to, (unsigned char *) &insn, 4);
1525 *to += 4;
1526
1527 return;
1528 }
1529 else /* (BO & 0x14) == 0x14, branch always. */
1530 {
1531 /* Out of range. Cannot relocate instruction. */
1532 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1533 return;
1534
1535 /* Build a unconditional branch and copy LK bit. */
1536 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1537 target_write_memory (*to, (unsigned char *) &insn, 4);
1538 *to += 4;
1539
1540 return;
1541 }
1542 }
1543
1544 target_write_memory (*to, (unsigned char *) &insn, 4);
1545 *to += 4;
1546 }
1547
1548 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1549 See target.h for details. */
1550
1551 static int
1552 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1553 CORE_ADDR collector,
1554 CORE_ADDR lockaddr,
1555 ULONGEST orig_size,
1556 CORE_ADDR *jump_entry,
1557 CORE_ADDR *trampoline,
1558 ULONGEST *trampoline_size,
1559 unsigned char *jjump_pad_insn,
1560 ULONGEST *jjump_pad_insn_size,
1561 CORE_ADDR *adjusted_insn_addr,
1562 CORE_ADDR *adjusted_insn_addr_end,
1563 char *err)
1564 {
1565 uint32_t buf[256];
1566 uint32_t *p = buf;
1567 int j, offset;
1568 CORE_ADDR buildaddr = *jump_entry;
1569 const CORE_ADDR entryaddr = *jump_entry;
1570 int rsz, min_frame, frame_size, tp_reg;
1571 #ifdef __powerpc64__
1572 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1573 int is_64 = register_size (regcache->tdesc, 0) == 8;
1574 int is_opd = is_64 && !is_elfv2_inferior ();
1575 #else
1576 int is_64 = 0, is_opd = 0;
1577 #endif
1578
1579 #ifdef __powerpc64__
1580 if (is_64)
1581 {
1582 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1583 rsz = 8;
1584 min_frame = 112;
1585 frame_size = (40 * rsz) + min_frame;
1586 tp_reg = 13;
1587 }
1588 else
1589 {
1590 #endif
1591 rsz = 4;
1592 min_frame = 16;
1593 frame_size = (40 * rsz) + min_frame;
1594 tp_reg = 2;
1595 #ifdef __powerpc64__
1596 }
1597 #endif
1598
1599 /* Stack frame layout for this jump pad,
1600
1601 High thread_area (r13/r2) |
1602 tpoint - collecting_t obj
1603 PC/<tpaddr> | +36
1604 CTR | +35
1605 LR | +34
1606 XER | +33
1607 CR | +32
1608 R31 |
1609 R29 |
1610 ... |
1611 R1 | +1
1612 R0 - collected registers
1613 ... |
1614 ... |
1615 Low Back-chain -
1616
1617
1618 The code flow of this jump pad,
1619
1620 1. Adjust SP
1621 2. Save GPR and SPR
1622 3. Prepare argument
1623 4. Call gdb_collector
1624 5. Restore GPR and SPR
1625 6. Restore SP
1626 7. Build a jump for back to the program
1627 8. Copy/relocate original instruction
1628 9. Build a jump for replacing original instruction. */
1629
1630 /* Adjust stack pointer. */
1631 if (is_64)
1632 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1633 else
1634 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1635
1636 /* Store GPRs. Save R1 later, because it had just been modified, but
1637 we want the original value. */
1638 for (j = 2; j < 32; j++)
1639 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1640 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1641 /* Set r0 to the original value of r1 before adjusting stack frame,
1642 and then save it. */
1643 p += GEN_ADDI (p, 0, 1, frame_size);
1644 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1645
1646 /* Save CR, XER, LR, and CTR. */
1647 p += GEN_MFCR (p, 3); /* mfcr r3 */
1648 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1649 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1650 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1651 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1652 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1653 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1654 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1655
1656 /* Save PC<tpaddr> */
1657 p += gen_limm (p, 3, tpaddr, is_64);
1658 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1659
1660
1661 /* Setup arguments to collector. */
1662 /* Set r4 to collected registers. */
1663 p += GEN_ADDI (p, 4, 1, min_frame);
1664 /* Set r3 to TPOINT. */
1665 p += gen_limm (p, 3, tpoint, is_64);
1666
1667 /* Prepare collecting_t object for lock. */
1668 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1669 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1670 /* Set R5 to collecting object. */
1671 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1672
1673 p += GEN_LWSYNC (p);
1674 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1675 p += GEN_LWSYNC (p);
1676
1677 /* Call to collector. */
1678 p += gen_call (p, collector, is_64, is_opd);
1679
1680 /* Simply write 0 to release the lock. */
1681 p += gen_limm (p, 3, lockaddr, is_64);
1682 p += gen_limm (p, 4, 0, is_64);
1683 p += GEN_LWSYNC (p);
1684 p += GEN_STORE (p, 4, 3, 0, is_64);
1685
1686 /* Restore stack and registers. */
1687 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1688 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1689 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1690 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1691 p += GEN_MTCR (p, 3); /* mtcr r3 */
1692 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1693 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1694 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1695
1696 /* Restore GPRs. */
1697 for (j = 2; j < 32; j++)
1698 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1699 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1700 /* Restore SP. */
1701 p += GEN_ADDI (p, 1, 1, frame_size);
1702
1703 /* Flush instructions to inferior memory. */
1704 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1705
1706 /* Now, insert the original instruction to execute in the jump pad. */
1707 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1708 *adjusted_insn_addr_end = *adjusted_insn_addr;
1709 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1710
1711 /* Verify the relocation size. If should be 4 for normal copy,
1712 8 or 12 for some conditional branch. */
1713 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1714 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1715 {
1716 sprintf (err, "E.Unexpected instruction length = %d"
1717 "when relocate instruction.",
1718 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1719 return 1;
1720 }
1721
1722 buildaddr = *adjusted_insn_addr_end;
1723 p = buf;
1724 /* Finally, write a jump back to the program. */
1725 offset = (tpaddr + 4) - buildaddr;
1726 if (offset >= (1 << 25) || offset < -(1 << 25))
1727 {
1728 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1729 "(offset 0x%x > 26-bit).", offset);
1730 return 1;
1731 }
1732 /* b <tpaddr+4> */
1733 p += GEN_B (p, offset);
1734 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1735 *jump_entry = buildaddr + (p - buf) * 4;
1736
1737 /* The jump pad is now built. Wire in a jump to our jump pad. This
1738 is always done last (by our caller actually), so that we can
1739 install fast tracepoints with threads running. This relies on
1740 the agent's atomic write support. */
1741 offset = entryaddr - tpaddr;
1742 if (offset >= (1 << 25) || offset < -(1 << 25))
1743 {
1744 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1745 "(offset 0x%x > 26-bit).", offset);
1746 return 1;
1747 }
1748 /* b <jentry> */
1749 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1750 *jjump_pad_insn_size = 4;
1751
1752 return 0;
1753 }
1754
1755 /* Returns the minimum instruction length for installing a tracepoint. */
1756
1757 static int
1758 ppc_get_min_fast_tracepoint_insn_len (void)
1759 {
1760 return 4;
1761 }
1762
1763 /* Emits a given buffer into the target at current_insn_ptr. Length
1764 is in units of 32-bit words. */
1765
1766 static void
1767 emit_insns (uint32_t *buf, int n)
1768 {
1769 n = n * sizeof (uint32_t);
1770 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1771 current_insn_ptr += n;
1772 }
1773
1774 #define __EMIT_ASM(NAME, INSNS) \
1775 do \
1776 { \
1777 extern uint32_t start_bcax_ ## NAME []; \
1778 extern uint32_t end_bcax_ ## NAME []; \
1779 emit_insns (start_bcax_ ## NAME, \
1780 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1781 __asm__ (".section .text.__ppcbcax\n\t" \
1782 "start_bcax_" #NAME ":\n\t" \
1783 INSNS "\n\t" \
1784 "end_bcax_" #NAME ":\n\t" \
1785 ".previous\n\t"); \
1786 } while (0)
1787
1788 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1789 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1790
1791 /*
1792
1793 Bytecode execution stack frame - 32-bit
1794
1795 | LR save area (SP + 4)
1796 SP' -> +- Back chain (SP + 0)
1797 | Save r31 for access saved arguments
1798 | Save r30 for bytecode stack pointer
1799 | Save r4 for incoming argument *value
1800 | Save r3 for incoming argument regs
1801 r30 -> +- Bytecode execution stack
1802 |
1803 | 64-byte (8 doublewords) at initial.
1804 | Expand stack as needed.
1805 |
1806 +-
1807 | Some padding for minimum stack frame and 16-byte alignment.
1808 | 16 bytes.
1809 SP +- Back-chain (SP')
1810
1811 initial frame size
1812 = 16 + (4 * 4) + 64
1813 = 96
1814
1815 r30 is the stack-pointer for bytecode machine.
1816 It should point to next-empty, so we can use LDU for pop.
1817 r3 is used for cache of the high part of TOP value.
1818 It was the first argument, pointer to regs.
1819 r4 is used for cache of the low part of TOP value.
1820 It was the second argument, pointer to the result.
1821 We should set *result = TOP after leaving this function.
1822
1823 Note:
1824 * To restore stack at epilogue
1825 => sp = r31
1826 * To check stack is big enough for bytecode execution.
1827 => r30 - 8 > SP + 8
1828 * To return execution result.
1829 => 0(r4) = TOP
1830
1831 */
1832
1833 /* Regardless of endian, register 3 is always high part, 4 is low part.
1834 These defines are used when the register pair is stored/loaded.
1835 Likewise, to simplify code, have a similiar define for 5:6. */
1836
1837 #if __BYTE_ORDER == __LITTLE_ENDIAN
1838 #define TOP_FIRST "4"
1839 #define TOP_SECOND "3"
1840 #define TMP_FIRST "6"
1841 #define TMP_SECOND "5"
1842 #else
1843 #define TOP_FIRST "3"
1844 #define TOP_SECOND "4"
1845 #define TMP_FIRST "5"
1846 #define TMP_SECOND "6"
1847 #endif
1848
1849 /* Emit prologue in inferior memory. See above comments. */
1850
1851 static void
1852 ppc_emit_prologue (void)
1853 {
1854 EMIT_ASM (/* Save return address. */
1855 "mflr 0 \n"
1856 "stw 0, 4(1) \n"
1857 /* Adjust SP. 96 is the initial frame size. */
1858 "stwu 1, -96(1) \n"
1859 /* Save r30 and incoming arguments. */
1860 "stw 31, 96-4(1) \n"
1861 "stw 30, 96-8(1) \n"
1862 "stw 4, 96-12(1) \n"
1863 "stw 3, 96-16(1) \n"
1864 /* Point r31 to original r1 for access arguments. */
1865 "addi 31, 1, 96 \n"
1866 /* Set r30 to pointing stack-top. */
1867 "addi 30, 1, 64 \n"
1868 /* Initial r3/TOP to 0. */
1869 "li 3, 0 \n"
1870 "li 4, 0 \n");
1871 }
1872
1873 /* Emit epilogue in inferior memory. See above comments. */
1874
1875 static void
1876 ppc_emit_epilogue (void)
1877 {
1878 EMIT_ASM (/* *result = TOP */
1879 "lwz 5, -12(31) \n"
1880 "stw " TOP_FIRST ", 0(5) \n"
1881 "stw " TOP_SECOND ", 4(5) \n"
1882 /* Restore registers. */
1883 "lwz 31, -4(31) \n"
1884 "lwz 30, -8(31) \n"
1885 /* Restore SP. */
1886 "lwz 1, 0(1) \n"
1887 /* Restore LR. */
1888 "lwz 0, 4(1) \n"
1889 /* Return 0 for no-error. */
1890 "li 3, 0 \n"
1891 "mtlr 0 \n"
1892 "blr \n");
1893 }
1894
1895 /* TOP = stack[--sp] + TOP */
1896
1897 static void
1898 ppc_emit_add (void)
1899 {
1900 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1901 "lwz " TMP_SECOND ", 4(30)\n"
1902 "addc 4, 6, 4 \n"
1903 "adde 3, 5, 3 \n");
1904 }
1905
1906 /* TOP = stack[--sp] - TOP */
1907
1908 static void
1909 ppc_emit_sub (void)
1910 {
1911 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1912 "lwz " TMP_SECOND ", 4(30) \n"
1913 "subfc 4, 4, 6 \n"
1914 "subfe 3, 3, 5 \n");
1915 }
1916
1917 /* TOP = stack[--sp] * TOP */
1918
1919 static void
1920 ppc_emit_mul (void)
1921 {
1922 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1923 "lwz " TMP_SECOND ", 4(30) \n"
1924 "mulhwu 7, 6, 4 \n"
1925 "mullw 3, 6, 3 \n"
1926 "mullw 5, 4, 5 \n"
1927 "mullw 4, 6, 4 \n"
1928 "add 3, 5, 3 \n"
1929 "add 3, 7, 3 \n");
1930 }
1931
1932 /* TOP = stack[--sp] << TOP */
1933
1934 static void
1935 ppc_emit_lsh (void)
1936 {
1937 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1938 "lwz " TMP_SECOND ", 4(30) \n"
1939 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1940 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1941 "slw 5, 5, 4\n" /* Shift high part left */
1942 "slw 4, 6, 4\n" /* Shift low part left */
1943 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1944 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1945 "or 3, 5, 3\n"
1946 "or 3, 7, 3\n"); /* Assemble high part */
1947 }
1948
1949 /* Top = stack[--sp] >> TOP
1950 (Arithmetic shift right) */
1951
1952 static void
1953 ppc_emit_rsh_signed (void)
1954 {
1955 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1956 "lwz " TMP_SECOND ", 4(30) \n"
1957 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1958 "sraw 3, 5, 4\n" /* Shift high part right */
1959 "cmpwi 7, 1\n"
1960 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1961 "sraw 4, 5, 7\n" /* Shift high to low */
1962 "b 2f\n"
1963 "1:\n"
1964 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1965 "srw 4, 6, 4\n" /* Shift low part right */
1966 "slw 5, 5, 7\n" /* Shift high to low */
1967 "or 4, 4, 5\n" /* Assemble low part */
1968 "2:\n");
1969 }
1970
1971 /* Top = stack[--sp] >> TOP
1972 (Logical shift right) */
1973
1974 static void
1975 ppc_emit_rsh_unsigned (void)
1976 {
1977 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1978 "lwz " TMP_SECOND ", 4(30) \n"
1979 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1980 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1981 "srw 6, 6, 4\n" /* Shift low part right */
1982 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1983 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1984 "or 6, 6, 3\n"
1985 "srw 3, 5, 4\n" /* Shift high part right */
1986 "or 4, 6, 7\n"); /* Assemble low part */
1987 }
1988
1989 /* Emit code for signed-extension specified by ARG. */
1990
1991 static void
1992 ppc_emit_ext (int arg)
1993 {
1994 switch (arg)
1995 {
1996 case 8:
1997 EMIT_ASM ("extsb 4, 4\n"
1998 "srawi 3, 4, 31");
1999 break;
2000 case 16:
2001 EMIT_ASM ("extsh 4, 4\n"
2002 "srawi 3, 4, 31");
2003 break;
2004 case 32:
2005 EMIT_ASM ("srawi 3, 4, 31");
2006 break;
2007 default:
2008 emit_error = 1;
2009 }
2010 }
2011
2012 /* Emit code for zero-extension specified by ARG. */
2013
2014 static void
2015 ppc_emit_zero_ext (int arg)
2016 {
2017 switch (arg)
2018 {
2019 case 8:
2020 EMIT_ASM ("clrlwi 4,4,24\n"
2021 "li 3, 0\n");
2022 break;
2023 case 16:
2024 EMIT_ASM ("clrlwi 4,4,16\n"
2025 "li 3, 0\n");
2026 break;
2027 case 32:
2028 EMIT_ASM ("li 3, 0");
2029 break;
2030 default:
2031 emit_error = 1;
2032 }
2033 }
2034
2035 /* TOP = !TOP
2036 i.e., TOP = (TOP == 0) ? 1 : 0; */
2037
2038 static void
2039 ppc_emit_log_not (void)
2040 {
2041 EMIT_ASM ("or 4, 3, 4 \n"
2042 "cntlzw 4, 4 \n"
2043 "srwi 4, 4, 5 \n"
2044 "li 3, 0 \n");
2045 }
2046
2047 /* TOP = stack[--sp] & TOP */
2048
2049 static void
2050 ppc_emit_bit_and (void)
2051 {
2052 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2053 "lwz " TMP_SECOND ", 4(30) \n"
2054 "and 4, 6, 4 \n"
2055 "and 3, 5, 3 \n");
2056 }
2057
2058 /* TOP = stack[--sp] | TOP */
2059
2060 static void
2061 ppc_emit_bit_or (void)
2062 {
2063 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2064 "lwz " TMP_SECOND ", 4(30) \n"
2065 "or 4, 6, 4 \n"
2066 "or 3, 5, 3 \n");
2067 }
2068
2069 /* TOP = stack[--sp] ^ TOP */
2070
2071 static void
2072 ppc_emit_bit_xor (void)
2073 {
2074 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2075 "lwz " TMP_SECOND ", 4(30) \n"
2076 "xor 4, 6, 4 \n"
2077 "xor 3, 5, 3 \n");
2078 }
2079
2080 /* TOP = ~TOP
2081 i.e., TOP = ~(TOP | TOP) */
2082
2083 static void
2084 ppc_emit_bit_not (void)
2085 {
2086 EMIT_ASM ("nor 3, 3, 3 \n"
2087 "nor 4, 4, 4 \n");
2088 }
2089
2090 /* TOP = stack[--sp] == TOP */
2091
2092 static void
2093 ppc_emit_equal (void)
2094 {
2095 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2096 "lwz " TMP_SECOND ", 4(30) \n"
2097 "xor 4, 6, 4 \n"
2098 "xor 3, 5, 3 \n"
2099 "or 4, 3, 4 \n"
2100 "cntlzw 4, 4 \n"
2101 "srwi 4, 4, 5 \n"
2102 "li 3, 0 \n");
2103 }
2104
2105 /* TOP = stack[--sp] < TOP
2106 (Signed comparison) */
2107
2108 static void
2109 ppc_emit_less_signed (void)
2110 {
2111 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2112 "lwz " TMP_SECOND ", 4(30) \n"
2113 "cmplw 6, 6, 4 \n"
2114 "cmpw 7, 5, 3 \n"
2115 /* CR6 bit 0 = low less and high equal */
2116 "crand 6*4+0, 6*4+0, 7*4+2\n"
2117 /* CR7 bit 0 = (low less and high equal) or high less */
2118 "cror 7*4+0, 7*4+0, 6*4+0\n"
2119 "mfcr 4 \n"
2120 "rlwinm 4, 4, 29, 31, 31 \n"
2121 "li 3, 0 \n");
2122 }
2123
2124 /* TOP = stack[--sp] < TOP
2125 (Unsigned comparison) */
2126
2127 static void
2128 ppc_emit_less_unsigned (void)
2129 {
2130 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2131 "lwz " TMP_SECOND ", 4(30) \n"
2132 "cmplw 6, 6, 4 \n"
2133 "cmplw 7, 5, 3 \n"
2134 /* CR6 bit 0 = low less and high equal */
2135 "crand 6*4+0, 6*4+0, 7*4+2\n"
2136 /* CR7 bit 0 = (low less and high equal) or high less */
2137 "cror 7*4+0, 7*4+0, 6*4+0\n"
2138 "mfcr 4 \n"
2139 "rlwinm 4, 4, 29, 31, 31 \n"
2140 "li 3, 0 \n");
2141 }
2142
2143 /* Access the memory address in TOP in size of SIZE.
2144 Zero-extend the read value. */
2145
2146 static void
2147 ppc_emit_ref (int size)
2148 {
2149 switch (size)
2150 {
2151 case 1:
2152 EMIT_ASM ("lbz 4, 0(4)\n"
2153 "li 3, 0");
2154 break;
2155 case 2:
2156 EMIT_ASM ("lhz 4, 0(4)\n"
2157 "li 3, 0");
2158 break;
2159 case 4:
2160 EMIT_ASM ("lwz 4, 0(4)\n"
2161 "li 3, 0");
2162 break;
2163 case 8:
2164 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2165 EMIT_ASM ("lwz 3, 4(4)\n"
2166 "lwz 4, 0(4)");
2167 else
2168 EMIT_ASM ("lwz 3, 0(4)\n"
2169 "lwz 4, 4(4)");
2170 break;
2171 }
2172 }
2173
2174 /* TOP = NUM */
2175
2176 static void
2177 ppc_emit_const (LONGEST num)
2178 {
2179 uint32_t buf[10];
2180 uint32_t *p = buf;
2181
2182 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2183 p += gen_limm (p, 4, num & 0xffffffff, 0);
2184
2185 emit_insns (buf, p - buf);
2186 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2187 }
2188
2189 /* Set TOP to the value of register REG by calling get_raw_reg function
2190 with two argument, collected buffer and register number. */
2191
2192 static void
2193 ppc_emit_reg (int reg)
2194 {
2195 uint32_t buf[13];
2196 uint32_t *p = buf;
2197
2198 /* fctx->regs is passed in r3 and then saved in -16(31). */
2199 p += GEN_LWZ (p, 3, 31, -16);
2200 p += GEN_LI (p, 4, reg); /* li r4, reg */
2201 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2202
2203 emit_insns (buf, p - buf);
2204 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2205
2206 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2207 {
2208 EMIT_ASM ("mr 5, 4\n"
2209 "mr 4, 3\n"
2210 "mr 3, 5\n");
2211 }
2212 }
2213
2214 /* TOP = stack[--sp] */
2215
2216 static void
2217 ppc_emit_pop (void)
2218 {
2219 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2220 "lwz " TOP_SECOND ", 4(30) \n");
2221 }
2222
2223 /* stack[sp++] = TOP
2224
2225 Because we may use up bytecode stack, expand 8 doublewords more
2226 if needed. */
2227
2228 static void
2229 ppc_emit_stack_flush (void)
2230 {
2231 /* Make sure bytecode stack is big enough before push.
2232 Otherwise, expand 64-byte more. */
2233
2234 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2235 " stw " TOP_SECOND ", 4(30)\n"
2236 " addi 5, 30, -(8 + 8) \n"
2237 " cmpw 7, 5, 1 \n"
2238 " bgt 7, 1f \n"
2239 " stwu 31, -64(1) \n"
2240 "1:addi 30, 30, -8 \n");
2241 }
2242
2243 /* Swap TOP and stack[sp-1] */
2244
2245 static void
2246 ppc_emit_swap (void)
2247 {
2248 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2249 "lwz " TMP_SECOND ", 12(30) \n"
2250 "stw " TOP_FIRST ", 8(30) \n"
2251 "stw " TOP_SECOND ", 12(30) \n"
2252 "mr 3, 5 \n"
2253 "mr 4, 6 \n");
2254 }
2255
2256 /* Discard N elements in the stack. Also used for ppc64. */
2257
2258 static void
2259 ppc_emit_stack_adjust (int n)
2260 {
2261 uint32_t buf[6];
2262 uint32_t *p = buf;
2263
2264 n = n << 3;
2265 if ((n >> 15) != 0)
2266 {
2267 emit_error = 1;
2268 return;
2269 }
2270
2271 p += GEN_ADDI (p, 30, 30, n);
2272
2273 emit_insns (buf, p - buf);
2274 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2275 }
2276
2277 /* Call function FN. */
2278
2279 static void
2280 ppc_emit_call (CORE_ADDR fn)
2281 {
2282 uint32_t buf[11];
2283 uint32_t *p = buf;
2284
2285 p += gen_call (p, fn, 0, 0);
2286
2287 emit_insns (buf, p - buf);
2288 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2289 }
2290
2291 /* FN's prototype is `LONGEST(*fn)(int)'.
2292 TOP = fn (arg1)
2293 */
2294
2295 static void
2296 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2297 {
2298 uint32_t buf[15];
2299 uint32_t *p = buf;
2300
2301 /* Setup argument. arg1 is a 16-bit value. */
2302 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2303 p += gen_call (p, fn, 0, 0);
2304
2305 emit_insns (buf, p - buf);
2306 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2307
2308 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2309 {
2310 EMIT_ASM ("mr 5, 4\n"
2311 "mr 4, 3\n"
2312 "mr 3, 5\n");
2313 }
2314 }
2315
2316 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2317 fn (arg1, TOP)
2318
2319 TOP should be preserved/restored before/after the call. */
2320
2321 static void
2322 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2323 {
2324 uint32_t buf[21];
2325 uint32_t *p = buf;
2326
2327 /* Save TOP. 0(30) is next-empty. */
2328 p += GEN_STW (p, 3, 30, 0);
2329 p += GEN_STW (p, 4, 30, 4);
2330
2331 /* Setup argument. arg1 is a 16-bit value. */
2332 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2333 {
2334 p += GEN_MR (p, 5, 4);
2335 p += GEN_MR (p, 6, 3);
2336 }
2337 else
2338 {
2339 p += GEN_MR (p, 5, 3);
2340 p += GEN_MR (p, 6, 4);
2341 }
2342 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2343 p += gen_call (p, fn, 0, 0);
2344
2345 /* Restore TOP */
2346 p += GEN_LWZ (p, 3, 30, 0);
2347 p += GEN_LWZ (p, 4, 30, 4);
2348
2349 emit_insns (buf, p - buf);
2350 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2351 }
2352
2353 /* Note in the following goto ops:
2354
2355 When emitting goto, the target address is later relocated by
2356 write_goto_address. OFFSET_P is the offset of the branch instruction
2357 in the code sequence, and SIZE_P is how to relocate the instruction,
2358 recognized by ppc_write_goto_address. In current implementation,
2359 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2360 */
2361
2362 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2363
2364 static void
2365 ppc_emit_if_goto (int *offset_p, int *size_p)
2366 {
2367 EMIT_ASM ("or. 3, 3, 4 \n"
2368 "lwzu " TOP_FIRST ", 8(30) \n"
2369 "lwz " TOP_SECOND ", 4(30) \n"
2370 "1:bne 0, 1b \n");
2371
2372 if (offset_p)
2373 *offset_p = 12;
2374 if (size_p)
2375 *size_p = 14;
2376 }
2377
2378 /* Unconditional goto. Also used for ppc64. */
2379
2380 static void
2381 ppc_emit_goto (int *offset_p, int *size_p)
2382 {
2383 EMIT_ASM ("1:b 1b");
2384
2385 if (offset_p)
2386 *offset_p = 0;
2387 if (size_p)
2388 *size_p = 24;
2389 }
2390
2391 /* Goto if stack[--sp] == TOP */
2392
2393 static void
2394 ppc_emit_eq_goto (int *offset_p, int *size_p)
2395 {
2396 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2397 "lwz " TMP_SECOND ", 4(30) \n"
2398 "xor 4, 6, 4 \n"
2399 "xor 3, 5, 3 \n"
2400 "or. 3, 3, 4 \n"
2401 "lwzu " TOP_FIRST ", 8(30) \n"
2402 "lwz " TOP_SECOND ", 4(30) \n"
2403 "1:beq 0, 1b \n");
2404
2405 if (offset_p)
2406 *offset_p = 28;
2407 if (size_p)
2408 *size_p = 14;
2409 }
2410
2411 /* Goto if stack[--sp] != TOP */
2412
2413 static void
2414 ppc_emit_ne_goto (int *offset_p, int *size_p)
2415 {
2416 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2417 "lwz " TMP_SECOND ", 4(30) \n"
2418 "xor 4, 6, 4 \n"
2419 "xor 3, 5, 3 \n"
2420 "or. 3, 3, 4 \n"
2421 "lwzu " TOP_FIRST ", 8(30) \n"
2422 "lwz " TOP_SECOND ", 4(30) \n"
2423 "1:bne 0, 1b \n");
2424
2425 if (offset_p)
2426 *offset_p = 28;
2427 if (size_p)
2428 *size_p = 14;
2429 }
2430
2431 /* Goto if stack[--sp] < TOP */
2432
2433 static void
2434 ppc_emit_lt_goto (int *offset_p, int *size_p)
2435 {
2436 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2437 "lwz " TMP_SECOND ", 4(30) \n"
2438 "cmplw 6, 6, 4 \n"
2439 "cmpw 7, 5, 3 \n"
2440 /* CR6 bit 0 = low less and high equal */
2441 "crand 6*4+0, 6*4+0, 7*4+2\n"
2442 /* CR7 bit 0 = (low less and high equal) or high less */
2443 "cror 7*4+0, 7*4+0, 6*4+0\n"
2444 "lwzu " TOP_FIRST ", 8(30) \n"
2445 "lwz " TOP_SECOND ", 4(30)\n"
2446 "1:blt 7, 1b \n");
2447
2448 if (offset_p)
2449 *offset_p = 32;
2450 if (size_p)
2451 *size_p = 14;
2452 }
2453
2454 /* Goto if stack[--sp] <= TOP */
2455
2456 static void
2457 ppc_emit_le_goto (int *offset_p, int *size_p)
2458 {
2459 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2460 "lwz " TMP_SECOND ", 4(30) \n"
2461 "cmplw 6, 6, 4 \n"
2462 "cmpw 7, 5, 3 \n"
2463 /* CR6 bit 0 = low less/equal and high equal */
2464 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2465 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2466 "cror 7*4+0, 7*4+0, 6*4+0\n"
2467 "lwzu " TOP_FIRST ", 8(30) \n"
2468 "lwz " TOP_SECOND ", 4(30)\n"
2469 "1:blt 7, 1b \n");
2470
2471 if (offset_p)
2472 *offset_p = 32;
2473 if (size_p)
2474 *size_p = 14;
2475 }
2476
2477 /* Goto if stack[--sp] > TOP */
2478
2479 static void
2480 ppc_emit_gt_goto (int *offset_p, int *size_p)
2481 {
2482 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2483 "lwz " TMP_SECOND ", 4(30) \n"
2484 "cmplw 6, 6, 4 \n"
2485 "cmpw 7, 5, 3 \n"
2486 /* CR6 bit 0 = low greater and high equal */
2487 "crand 6*4+0, 6*4+1, 7*4+2\n"
2488 /* CR7 bit 0 = (low greater and high equal) or high greater */
2489 "cror 7*4+0, 7*4+1, 6*4+0\n"
2490 "lwzu " TOP_FIRST ", 8(30) \n"
2491 "lwz " TOP_SECOND ", 4(30)\n"
2492 "1:blt 7, 1b \n");
2493
2494 if (offset_p)
2495 *offset_p = 32;
2496 if (size_p)
2497 *size_p = 14;
2498 }
2499
2500 /* Goto if stack[--sp] >= TOP */
2501
2502 static void
2503 ppc_emit_ge_goto (int *offset_p, int *size_p)
2504 {
2505 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2506 "lwz " TMP_SECOND ", 4(30) \n"
2507 "cmplw 6, 6, 4 \n"
2508 "cmpw 7, 5, 3 \n"
2509 /* CR6 bit 0 = low ge and high equal */
2510 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2511 /* CR7 bit 0 = (low ge and high equal) or high greater */
2512 "cror 7*4+0, 7*4+1, 6*4+0\n"
2513 "lwzu " TOP_FIRST ", 8(30)\n"
2514 "lwz " TOP_SECOND ", 4(30)\n"
2515 "1:blt 7, 1b \n");
2516
2517 if (offset_p)
2518 *offset_p = 32;
2519 if (size_p)
2520 *size_p = 14;
2521 }
2522
2523 /* Relocate previous emitted branch instruction. FROM is the address
2524 of the branch instruction, TO is the goto target address, and SIZE
2525 if the value we set by *SIZE_P before. Currently, it is either
2526 24 or 14 of branch and conditional-branch instruction.
2527 Also used for ppc64. */
2528
2529 static void
2530 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2531 {
2532 long rel = to - from;
2533 uint32_t insn;
2534 int opcd;
2535
2536 read_inferior_memory (from, (unsigned char *) &insn, 4);
2537 opcd = (insn >> 26) & 0x3f;
2538
2539 switch (size)
2540 {
2541 case 14:
2542 if (opcd != 16
2543 || (rel >= (1 << 15) || rel < -(1 << 15)))
2544 emit_error = 1;
2545 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2546 break;
2547 case 24:
2548 if (opcd != 18
2549 || (rel >= (1 << 25) || rel < -(1 << 25)))
2550 emit_error = 1;
2551 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2552 break;
2553 default:
2554 emit_error = 1;
2555 }
2556
2557 if (!emit_error)
2558 target_write_memory (from, (unsigned char *) &insn, 4);
2559 }
2560
2561 /* Table of emit ops for 32-bit. */
2562
2563 static struct emit_ops ppc_emit_ops_impl =
2564 {
2565 ppc_emit_prologue,
2566 ppc_emit_epilogue,
2567 ppc_emit_add,
2568 ppc_emit_sub,
2569 ppc_emit_mul,
2570 ppc_emit_lsh,
2571 ppc_emit_rsh_signed,
2572 ppc_emit_rsh_unsigned,
2573 ppc_emit_ext,
2574 ppc_emit_log_not,
2575 ppc_emit_bit_and,
2576 ppc_emit_bit_or,
2577 ppc_emit_bit_xor,
2578 ppc_emit_bit_not,
2579 ppc_emit_equal,
2580 ppc_emit_less_signed,
2581 ppc_emit_less_unsigned,
2582 ppc_emit_ref,
2583 ppc_emit_if_goto,
2584 ppc_emit_goto,
2585 ppc_write_goto_address,
2586 ppc_emit_const,
2587 ppc_emit_call,
2588 ppc_emit_reg,
2589 ppc_emit_pop,
2590 ppc_emit_stack_flush,
2591 ppc_emit_zero_ext,
2592 ppc_emit_swap,
2593 ppc_emit_stack_adjust,
2594 ppc_emit_int_call_1,
2595 ppc_emit_void_call_2,
2596 ppc_emit_eq_goto,
2597 ppc_emit_ne_goto,
2598 ppc_emit_lt_goto,
2599 ppc_emit_le_goto,
2600 ppc_emit_gt_goto,
2601 ppc_emit_ge_goto
2602 };
2603
2604 #ifdef __powerpc64__
2605
2606 /*
2607
2608 Bytecode execution stack frame - 64-bit
2609
2610 | LR save area (SP + 16)
2611 | CR save area (SP + 8)
2612 SP' -> +- Back chain (SP + 0)
2613 | Save r31 for access saved arguments
2614 | Save r30 for bytecode stack pointer
2615 | Save r4 for incoming argument *value
2616 | Save r3 for incoming argument regs
2617 r30 -> +- Bytecode execution stack
2618 |
2619 | 64-byte (8 doublewords) at initial.
2620 | Expand stack as needed.
2621 |
2622 +-
2623 | Some padding for minimum stack frame.
2624 | 112 for ELFv1.
2625 SP +- Back-chain (SP')
2626
2627 initial frame size
2628 = 112 + (4 * 8) + 64
2629 = 208
2630
2631 r30 is the stack-pointer for bytecode machine.
2632 It should point to next-empty, so we can use LDU for pop.
2633 r3 is used for cache of TOP value.
2634 It was the first argument, pointer to regs.
2635 r4 is the second argument, pointer to the result.
2636 We should set *result = TOP after leaving this function.
2637
2638 Note:
2639 * To restore stack at epilogue
2640 => sp = r31
2641 * To check stack is big enough for bytecode execution.
2642 => r30 - 8 > SP + 112
2643 * To return execution result.
2644 => 0(r4) = TOP
2645
2646 */
2647
2648 /* Emit prologue in inferior memory. See above comments. */
2649
2650 static void
2651 ppc64v1_emit_prologue (void)
2652 {
2653 /* On ELFv1, function pointers really point to function descriptor,
2654 so emit one here. We don't care about contents of words 1 and 2,
2655 so let them just overlap out code. */
2656 uint64_t opd = current_insn_ptr + 8;
2657 uint32_t buf[2];
2658
2659 /* Mind the strict aliasing rules. */
2660 memcpy (buf, &opd, sizeof buf);
2661 emit_insns(buf, 2);
2662 EMIT_ASM (/* Save return address. */
2663 "mflr 0 \n"
2664 "std 0, 16(1) \n"
2665 /* Save r30 and incoming arguments. */
2666 "std 31, -8(1) \n"
2667 "std 30, -16(1) \n"
2668 "std 4, -24(1) \n"
2669 "std 3, -32(1) \n"
2670 /* Point r31 to current r1 for access arguments. */
2671 "mr 31, 1 \n"
2672 /* Adjust SP. 208 is the initial frame size. */
2673 "stdu 1, -208(1) \n"
2674 /* Set r30 to pointing stack-top. */
2675 "addi 30, 1, 168 \n"
2676 /* Initial r3/TOP to 0. */
2677 "li 3, 0 \n");
2678 }
2679
2680 /* Emit prologue in inferior memory. See above comments. */
2681
2682 static void
2683 ppc64v2_emit_prologue (void)
2684 {
2685 EMIT_ASM (/* Save return address. */
2686 "mflr 0 \n"
2687 "std 0, 16(1) \n"
2688 /* Save r30 and incoming arguments. */
2689 "std 31, -8(1) \n"
2690 "std 30, -16(1) \n"
2691 "std 4, -24(1) \n"
2692 "std 3, -32(1) \n"
2693 /* Point r31 to current r1 for access arguments. */
2694 "mr 31, 1 \n"
2695 /* Adjust SP. 208 is the initial frame size. */
2696 "stdu 1, -208(1) \n"
2697 /* Set r30 to pointing stack-top. */
2698 "addi 30, 1, 168 \n"
2699 /* Initial r3/TOP to 0. */
2700 "li 3, 0 \n");
2701 }
2702
2703 /* Emit epilogue in inferior memory. See above comments. */
2704
2705 static void
2706 ppc64_emit_epilogue (void)
2707 {
2708 EMIT_ASM (/* Restore SP. */
2709 "ld 1, 0(1) \n"
2710 /* *result = TOP */
2711 "ld 4, -24(1) \n"
2712 "std 3, 0(4) \n"
2713 /* Restore registers. */
2714 "ld 31, -8(1) \n"
2715 "ld 30, -16(1) \n"
2716 /* Restore LR. */
2717 "ld 0, 16(1) \n"
2718 /* Return 0 for no-error. */
2719 "li 3, 0 \n"
2720 "mtlr 0 \n"
2721 "blr \n");
2722 }
2723
2724 /* TOP = stack[--sp] + TOP */
2725
2726 static void
2727 ppc64_emit_add (void)
2728 {
2729 EMIT_ASM ("ldu 4, 8(30) \n"
2730 "add 3, 4, 3 \n");
2731 }
2732
2733 /* TOP = stack[--sp] - TOP */
2734
2735 static void
2736 ppc64_emit_sub (void)
2737 {
2738 EMIT_ASM ("ldu 4, 8(30) \n"
2739 "sub 3, 4, 3 \n");
2740 }
2741
2742 /* TOP = stack[--sp] * TOP */
2743
2744 static void
2745 ppc64_emit_mul (void)
2746 {
2747 EMIT_ASM ("ldu 4, 8(30) \n"
2748 "mulld 3, 4, 3 \n");
2749 }
2750
2751 /* TOP = stack[--sp] << TOP */
2752
2753 static void
2754 ppc64_emit_lsh (void)
2755 {
2756 EMIT_ASM ("ldu 4, 8(30) \n"
2757 "sld 3, 4, 3 \n");
2758 }
2759
2760 /* Top = stack[--sp] >> TOP
2761 (Arithmetic shift right) */
2762
2763 static void
2764 ppc64_emit_rsh_signed (void)
2765 {
2766 EMIT_ASM ("ldu 4, 8(30) \n"
2767 "srad 3, 4, 3 \n");
2768 }
2769
2770 /* Top = stack[--sp] >> TOP
2771 (Logical shift right) */
2772
2773 static void
2774 ppc64_emit_rsh_unsigned (void)
2775 {
2776 EMIT_ASM ("ldu 4, 8(30) \n"
2777 "srd 3, 4, 3 \n");
2778 }
2779
2780 /* Emit code for signed-extension specified by ARG. */
2781
2782 static void
2783 ppc64_emit_ext (int arg)
2784 {
2785 switch (arg)
2786 {
2787 case 8:
2788 EMIT_ASM ("extsb 3, 3");
2789 break;
2790 case 16:
2791 EMIT_ASM ("extsh 3, 3");
2792 break;
2793 case 32:
2794 EMIT_ASM ("extsw 3, 3");
2795 break;
2796 default:
2797 emit_error = 1;
2798 }
2799 }
2800
2801 /* Emit code for zero-extension specified by ARG. */
2802
2803 static void
2804 ppc64_emit_zero_ext (int arg)
2805 {
2806 switch (arg)
2807 {
2808 case 8:
2809 EMIT_ASM ("rldicl 3,3,0,56");
2810 break;
2811 case 16:
2812 EMIT_ASM ("rldicl 3,3,0,48");
2813 break;
2814 case 32:
2815 EMIT_ASM ("rldicl 3,3,0,32");
2816 break;
2817 default:
2818 emit_error = 1;
2819 }
2820 }
2821
2822 /* TOP = !TOP
2823 i.e., TOP = (TOP == 0) ? 1 : 0; */
2824
2825 static void
2826 ppc64_emit_log_not (void)
2827 {
2828 EMIT_ASM ("cntlzd 3, 3 \n"
2829 "srdi 3, 3, 6 \n");
2830 }
2831
2832 /* TOP = stack[--sp] & TOP */
2833
2834 static void
2835 ppc64_emit_bit_and (void)
2836 {
2837 EMIT_ASM ("ldu 4, 8(30) \n"
2838 "and 3, 4, 3 \n");
2839 }
2840
2841 /* TOP = stack[--sp] | TOP */
2842
2843 static void
2844 ppc64_emit_bit_or (void)
2845 {
2846 EMIT_ASM ("ldu 4, 8(30) \n"
2847 "or 3, 4, 3 \n");
2848 }
2849
2850 /* TOP = stack[--sp] ^ TOP */
2851
2852 static void
2853 ppc64_emit_bit_xor (void)
2854 {
2855 EMIT_ASM ("ldu 4, 8(30) \n"
2856 "xor 3, 4, 3 \n");
2857 }
2858
2859 /* TOP = ~TOP
2860 i.e., TOP = ~(TOP | TOP) */
2861
2862 static void
2863 ppc64_emit_bit_not (void)
2864 {
2865 EMIT_ASM ("nor 3, 3, 3 \n");
2866 }
2867
2868 /* TOP = stack[--sp] == TOP */
2869
2870 static void
2871 ppc64_emit_equal (void)
2872 {
2873 EMIT_ASM ("ldu 4, 8(30) \n"
2874 "xor 3, 3, 4 \n"
2875 "cntlzd 3, 3 \n"
2876 "srdi 3, 3, 6 \n");
2877 }
2878
2879 /* TOP = stack[--sp] < TOP
2880 (Signed comparison) */
2881
2882 static void
2883 ppc64_emit_less_signed (void)
2884 {
2885 EMIT_ASM ("ldu 4, 8(30) \n"
2886 "cmpd 7, 4, 3 \n"
2887 "mfcr 3 \n"
2888 "rlwinm 3, 3, 29, 31, 31 \n");
2889 }
2890
2891 /* TOP = stack[--sp] < TOP
2892 (Unsigned comparison) */
2893
2894 static void
2895 ppc64_emit_less_unsigned (void)
2896 {
2897 EMIT_ASM ("ldu 4, 8(30) \n"
2898 "cmpld 7, 4, 3 \n"
2899 "mfcr 3 \n"
2900 "rlwinm 3, 3, 29, 31, 31 \n");
2901 }
2902
2903 /* Access the memory address in TOP in size of SIZE.
2904 Zero-extend the read value. */
2905
2906 static void
2907 ppc64_emit_ref (int size)
2908 {
2909 switch (size)
2910 {
2911 case 1:
2912 EMIT_ASM ("lbz 3, 0(3)");
2913 break;
2914 case 2:
2915 EMIT_ASM ("lhz 3, 0(3)");
2916 break;
2917 case 4:
2918 EMIT_ASM ("lwz 3, 0(3)");
2919 break;
2920 case 8:
2921 EMIT_ASM ("ld 3, 0(3)");
2922 break;
2923 }
2924 }
2925
2926 /* TOP = NUM */
2927
2928 static void
2929 ppc64_emit_const (LONGEST num)
2930 {
2931 uint32_t buf[5];
2932 uint32_t *p = buf;
2933
2934 p += gen_limm (p, 3, num, 1);
2935
2936 emit_insns (buf, p - buf);
2937 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2938 }
2939
2940 /* Set TOP to the value of register REG by calling get_raw_reg function
2941 with two argument, collected buffer and register number. */
2942
2943 static void
2944 ppc64v1_emit_reg (int reg)
2945 {
2946 uint32_t buf[15];
2947 uint32_t *p = buf;
2948
2949 /* fctx->regs is passed in r3 and then saved in 176(1). */
2950 p += GEN_LD (p, 3, 31, -32);
2951 p += GEN_LI (p, 4, reg);
2952 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2953 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2954 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2955
2956 emit_insns (buf, p - buf);
2957 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2958 }
2959
2960 /* Likewise, for ELFv2. */
2961
2962 static void
2963 ppc64v2_emit_reg (int reg)
2964 {
2965 uint32_t buf[12];
2966 uint32_t *p = buf;
2967
2968 /* fctx->regs is passed in r3 and then saved in 176(1). */
2969 p += GEN_LD (p, 3, 31, -32);
2970 p += GEN_LI (p, 4, reg);
2971 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2972 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2973 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2974
2975 emit_insns (buf, p - buf);
2976 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2977 }
2978
2979 /* TOP = stack[--sp] */
2980
2981 static void
2982 ppc64_emit_pop (void)
2983 {
2984 EMIT_ASM ("ldu 3, 8(30)");
2985 }
2986
2987 /* stack[sp++] = TOP
2988
2989 Because we may use up bytecode stack, expand 8 doublewords more
2990 if needed. */
2991
2992 static void
2993 ppc64_emit_stack_flush (void)
2994 {
2995 /* Make sure bytecode stack is big enough before push.
2996 Otherwise, expand 64-byte more. */
2997
2998 EMIT_ASM (" std 3, 0(30) \n"
2999 " addi 4, 30, -(112 + 8) \n"
3000 " cmpd 7, 4, 1 \n"
3001 " bgt 7, 1f \n"
3002 " stdu 31, -64(1) \n"
3003 "1:addi 30, 30, -8 \n");
3004 }
3005
3006 /* Swap TOP and stack[sp-1] */
3007
3008 static void
3009 ppc64_emit_swap (void)
3010 {
3011 EMIT_ASM ("ld 4, 8(30) \n"
3012 "std 3, 8(30) \n"
3013 "mr 3, 4 \n");
3014 }
3015
3016 /* Call function FN - ELFv1. */
3017
3018 static void
3019 ppc64v1_emit_call (CORE_ADDR fn)
3020 {
3021 uint32_t buf[13];
3022 uint32_t *p = buf;
3023
3024 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3025 p += gen_call (p, fn, 1, 1);
3026 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3027
3028 emit_insns (buf, p - buf);
3029 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3030 }
3031
3032 /* Call function FN - ELFv2. */
3033
3034 static void
3035 ppc64v2_emit_call (CORE_ADDR fn)
3036 {
3037 uint32_t buf[10];
3038 uint32_t *p = buf;
3039
3040 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3041 p += gen_call (p, fn, 1, 0);
3042 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3043
3044 emit_insns (buf, p - buf);
3045 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3046 }
3047
3048 /* FN's prototype is `LONGEST(*fn)(int)'.
3049 TOP = fn (arg1)
3050 */
3051
3052 static void
3053 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3054 {
3055 uint32_t buf[13];
3056 uint32_t *p = buf;
3057
3058 /* Setup argument. arg1 is a 16-bit value. */
3059 p += gen_limm (p, 3, arg1, 1);
3060 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3061 p += gen_call (p, fn, 1, 1);
3062 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3063
3064 emit_insns (buf, p - buf);
3065 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3066 }
3067
3068 /* Likewise for ELFv2. */
3069
3070 static void
3071 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3072 {
3073 uint32_t buf[10];
3074 uint32_t *p = buf;
3075
3076 /* Setup argument. arg1 is a 16-bit value. */
3077 p += gen_limm (p, 3, arg1, 1);
3078 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3079 p += gen_call (p, fn, 1, 0);
3080 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3081
3082 emit_insns (buf, p - buf);
3083 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3084 }
3085
3086 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3087 fn (arg1, TOP)
3088
3089 TOP should be preserved/restored before/after the call. */
3090
3091 static void
3092 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3093 {
3094 uint32_t buf[17];
3095 uint32_t *p = buf;
3096
3097 /* Save TOP. 0(30) is next-empty. */
3098 p += GEN_STD (p, 3, 30, 0);
3099
3100 /* Setup argument. arg1 is a 16-bit value. */
3101 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3102 p += gen_limm (p, 3, arg1, 1);
3103 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3104 p += gen_call (p, fn, 1, 1);
3105 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3106
3107 /* Restore TOP */
3108 p += GEN_LD (p, 3, 30, 0);
3109
3110 emit_insns (buf, p - buf);
3111 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3112 }
3113
3114 /* Likewise for ELFv2. */
3115
3116 static void
3117 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3118 {
3119 uint32_t buf[14];
3120 uint32_t *p = buf;
3121
3122 /* Save TOP. 0(30) is next-empty. */
3123 p += GEN_STD (p, 3, 30, 0);
3124
3125 /* Setup argument. arg1 is a 16-bit value. */
3126 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3127 p += gen_limm (p, 3, arg1, 1);
3128 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3129 p += gen_call (p, fn, 1, 0);
3130 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3131
3132 /* Restore TOP */
3133 p += GEN_LD (p, 3, 30, 0);
3134
3135 emit_insns (buf, p - buf);
3136 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3137 }
3138
3139 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3140
3141 static void
3142 ppc64_emit_if_goto (int *offset_p, int *size_p)
3143 {
3144 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3145 "ldu 3, 8(30) \n"
3146 "1:bne 7, 1b \n");
3147
3148 if (offset_p)
3149 *offset_p = 8;
3150 if (size_p)
3151 *size_p = 14;
3152 }
3153
3154 /* Goto if stack[--sp] == TOP */
3155
3156 static void
3157 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3158 {
3159 EMIT_ASM ("ldu 4, 8(30) \n"
3160 "cmpd 7, 4, 3 \n"
3161 "ldu 3, 8(30) \n"
3162 "1:beq 7, 1b \n");
3163
3164 if (offset_p)
3165 *offset_p = 12;
3166 if (size_p)
3167 *size_p = 14;
3168 }
3169
3170 /* Goto if stack[--sp] != TOP */
3171
3172 static void
3173 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3174 {
3175 EMIT_ASM ("ldu 4, 8(30) \n"
3176 "cmpd 7, 4, 3 \n"
3177 "ldu 3, 8(30) \n"
3178 "1:bne 7, 1b \n");
3179
3180 if (offset_p)
3181 *offset_p = 12;
3182 if (size_p)
3183 *size_p = 14;
3184 }
3185
3186 /* Goto if stack[--sp] < TOP */
3187
3188 static void
3189 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3190 {
3191 EMIT_ASM ("ldu 4, 8(30) \n"
3192 "cmpd 7, 4, 3 \n"
3193 "ldu 3, 8(30) \n"
3194 "1:blt 7, 1b \n");
3195
3196 if (offset_p)
3197 *offset_p = 12;
3198 if (size_p)
3199 *size_p = 14;
3200 }
3201
3202 /* Goto if stack[--sp] <= TOP */
3203
3204 static void
3205 ppc64_emit_le_goto (int *offset_p, int *size_p)
3206 {
3207 EMIT_ASM ("ldu 4, 8(30) \n"
3208 "cmpd 7, 4, 3 \n"
3209 "ldu 3, 8(30) \n"
3210 "1:ble 7, 1b \n");
3211
3212 if (offset_p)
3213 *offset_p = 12;
3214 if (size_p)
3215 *size_p = 14;
3216 }
3217
3218 /* Goto if stack[--sp] > TOP */
3219
3220 static void
3221 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3222 {
3223 EMIT_ASM ("ldu 4, 8(30) \n"
3224 "cmpd 7, 4, 3 \n"
3225 "ldu 3, 8(30) \n"
3226 "1:bgt 7, 1b \n");
3227
3228 if (offset_p)
3229 *offset_p = 12;
3230 if (size_p)
3231 *size_p = 14;
3232 }
3233
3234 /* Goto if stack[--sp] >= TOP */
3235
3236 static void
3237 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3238 {
3239 EMIT_ASM ("ldu 4, 8(30) \n"
3240 "cmpd 7, 4, 3 \n"
3241 "ldu 3, 8(30) \n"
3242 "1:bge 7, 1b \n");
3243
3244 if (offset_p)
3245 *offset_p = 12;
3246 if (size_p)
3247 *size_p = 14;
3248 }
3249
3250 /* Table of emit ops for 64-bit ELFv1. */
3251
3252 static struct emit_ops ppc64v1_emit_ops_impl =
3253 {
3254 ppc64v1_emit_prologue,
3255 ppc64_emit_epilogue,
3256 ppc64_emit_add,
3257 ppc64_emit_sub,
3258 ppc64_emit_mul,
3259 ppc64_emit_lsh,
3260 ppc64_emit_rsh_signed,
3261 ppc64_emit_rsh_unsigned,
3262 ppc64_emit_ext,
3263 ppc64_emit_log_not,
3264 ppc64_emit_bit_and,
3265 ppc64_emit_bit_or,
3266 ppc64_emit_bit_xor,
3267 ppc64_emit_bit_not,
3268 ppc64_emit_equal,
3269 ppc64_emit_less_signed,
3270 ppc64_emit_less_unsigned,
3271 ppc64_emit_ref,
3272 ppc64_emit_if_goto,
3273 ppc_emit_goto,
3274 ppc_write_goto_address,
3275 ppc64_emit_const,
3276 ppc64v1_emit_call,
3277 ppc64v1_emit_reg,
3278 ppc64_emit_pop,
3279 ppc64_emit_stack_flush,
3280 ppc64_emit_zero_ext,
3281 ppc64_emit_swap,
3282 ppc_emit_stack_adjust,
3283 ppc64v1_emit_int_call_1,
3284 ppc64v1_emit_void_call_2,
3285 ppc64_emit_eq_goto,
3286 ppc64_emit_ne_goto,
3287 ppc64_emit_lt_goto,
3288 ppc64_emit_le_goto,
3289 ppc64_emit_gt_goto,
3290 ppc64_emit_ge_goto
3291 };
3292
3293 /* Table of emit ops for 64-bit ELFv2. */
3294
3295 static struct emit_ops ppc64v2_emit_ops_impl =
3296 {
3297 ppc64v2_emit_prologue,
3298 ppc64_emit_epilogue,
3299 ppc64_emit_add,
3300 ppc64_emit_sub,
3301 ppc64_emit_mul,
3302 ppc64_emit_lsh,
3303 ppc64_emit_rsh_signed,
3304 ppc64_emit_rsh_unsigned,
3305 ppc64_emit_ext,
3306 ppc64_emit_log_not,
3307 ppc64_emit_bit_and,
3308 ppc64_emit_bit_or,
3309 ppc64_emit_bit_xor,
3310 ppc64_emit_bit_not,
3311 ppc64_emit_equal,
3312 ppc64_emit_less_signed,
3313 ppc64_emit_less_unsigned,
3314 ppc64_emit_ref,
3315 ppc64_emit_if_goto,
3316 ppc_emit_goto,
3317 ppc_write_goto_address,
3318 ppc64_emit_const,
3319 ppc64v2_emit_call,
3320 ppc64v2_emit_reg,
3321 ppc64_emit_pop,
3322 ppc64_emit_stack_flush,
3323 ppc64_emit_zero_ext,
3324 ppc64_emit_swap,
3325 ppc_emit_stack_adjust,
3326 ppc64v2_emit_int_call_1,
3327 ppc64v2_emit_void_call_2,
3328 ppc64_emit_eq_goto,
3329 ppc64_emit_ne_goto,
3330 ppc64_emit_lt_goto,
3331 ppc64_emit_le_goto,
3332 ppc64_emit_gt_goto,
3333 ppc64_emit_ge_goto
3334 };
3335
3336 #endif
3337
3338 /* Implementation of linux_target_ops method "emit_ops". */
3339
3340 static struct emit_ops *
3341 ppc_emit_ops (void)
3342 {
3343 #ifdef __powerpc64__
3344 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3345
3346 if (register_size (regcache->tdesc, 0) == 8)
3347 {
3348 if (is_elfv2_inferior ())
3349 return &ppc64v2_emit_ops_impl;
3350 else
3351 return &ppc64v1_emit_ops_impl;
3352 }
3353 #endif
3354 return &ppc_emit_ops_impl;
3355 }
3356
3357 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3358
3359 static int
3360 ppc_get_ipa_tdesc_idx (void)
3361 {
3362 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3363 const struct target_desc *tdesc = regcache->tdesc;
3364
3365 #ifdef __powerpc64__
3366 if (tdesc == tdesc_powerpc_64l)
3367 return PPC_TDESC_BASE;
3368 if (tdesc == tdesc_powerpc_altivec64l)
3369 return PPC_TDESC_ALTIVEC;
3370 if (tdesc == tdesc_powerpc_vsx64l)
3371 return PPC_TDESC_VSX;
3372 if (tdesc == tdesc_powerpc_isa205_64l)
3373 return PPC_TDESC_ISA205;
3374 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3375 return PPC_TDESC_ISA205_ALTIVEC;
3376 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3377 return PPC_TDESC_ISA205_VSX;
3378 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3379 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3380 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3381 return PPC_TDESC_ISA207_VSX;
3382 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3383 return PPC_TDESC_ISA207_HTM_VSX;
3384 #endif
3385
3386 if (tdesc == tdesc_powerpc_32l)
3387 return PPC_TDESC_BASE;
3388 if (tdesc == tdesc_powerpc_altivec32l)
3389 return PPC_TDESC_ALTIVEC;
3390 if (tdesc == tdesc_powerpc_vsx32l)
3391 return PPC_TDESC_VSX;
3392 if (tdesc == tdesc_powerpc_isa205_32l)
3393 return PPC_TDESC_ISA205;
3394 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3395 return PPC_TDESC_ISA205_ALTIVEC;
3396 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3397 return PPC_TDESC_ISA205_VSX;
3398 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3399 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3400 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3401 return PPC_TDESC_ISA207_VSX;
3402 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3403 return PPC_TDESC_ISA207_HTM_VSX;
3404 if (tdesc == tdesc_powerpc_e500l)
3405 return PPC_TDESC_E500;
3406
3407 return 0;
3408 }
3409
3410 struct linux_target_ops the_low_target = {
3411 ppc_supports_z_point_type,
3412 ppc_insert_point,
3413 ppc_remove_point,
3414 NULL,
3415 NULL,
3416 ppc_collect_ptrace_register,
3417 ppc_supply_ptrace_register,
3418 NULL, /* siginfo_fixup */
3419 NULL, /* new_process */
3420 NULL, /* delete_process */
3421 NULL, /* new_thread */
3422 NULL, /* delete_thread */
3423 NULL, /* new_fork */
3424 NULL, /* prepare_to_resume */
3425 NULL, /* process_qsupported */
3426 ppc_supports_tracepoints,
3427 ppc_get_thread_area,
3428 ppc_install_fast_tracepoint_jump_pad,
3429 ppc_emit_ops,
3430 ppc_get_min_fast_tracepoint_insn_len,
3431 NULL, /* supports_range_stepping */
3432 ppc_supports_hardware_single_step,
3433 NULL, /* get_syscall_trapinfo */
3434 ppc_get_ipa_tdesc_idx,
3435 };
3436
3437 /* The linux target ops object. */
3438
3439 linux_process_target *the_linux_target = &the_ppc_target;
3440
3441 void
3442 initialize_low_arch (void)
3443 {
3444 /* Initialize the Linux target descriptions. */
3445
3446 init_registers_powerpc_32l ();
3447 init_registers_powerpc_altivec32l ();
3448 init_registers_powerpc_vsx32l ();
3449 init_registers_powerpc_isa205_32l ();
3450 init_registers_powerpc_isa205_altivec32l ();
3451 init_registers_powerpc_isa205_vsx32l ();
3452 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3453 init_registers_powerpc_isa207_vsx32l ();
3454 init_registers_powerpc_isa207_htm_vsx32l ();
3455 init_registers_powerpc_e500l ();
3456 #if __powerpc64__
3457 init_registers_powerpc_64l ();
3458 init_registers_powerpc_altivec64l ();
3459 init_registers_powerpc_vsx64l ();
3460 init_registers_powerpc_isa205_64l ();
3461 init_registers_powerpc_isa205_altivec64l ();
3462 init_registers_powerpc_isa205_vsx64l ();
3463 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3464 init_registers_powerpc_isa207_vsx64l ();
3465 init_registers_powerpc_isa207_htm_vsx64l ();
3466 #endif
3467
3468 initialize_regsets_info (&ppc_regsets_info);
3469 }
This page took 0.101981 seconds and 3 git commands to generate.