gdbserver/linux-low: turn 'supports_software_single_step' and 'get_next_pcs' into...
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 protected:
58
59 void low_arch_setup () override;
60
61 bool low_cannot_fetch_register (int regno) override;
62
63 bool low_cannot_store_register (int regno) override;
64
65 bool low_supports_breakpoints () override;
66
67 CORE_ADDR low_get_pc (regcache *regcache) override;
68
69 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
70 };
71
72 /* The singleton target ops object. */
73
74 static ppc_target the_ppc_target;
75
76 /* Holds the AT_HWCAP auxv entry. */
77
78 static unsigned long ppc_hwcap;
79
80 /* Holds the AT_HWCAP2 auxv entry. */
81
82 static unsigned long ppc_hwcap2;
83
84
85 #define ppc_num_regs 73
86
87 #ifdef __powerpc64__
88 /* We use a constant for FPSCR instead of PT_FPSCR, because
89 many shipped PPC64 kernels had the wrong value in ptrace.h. */
90 static int ppc_regmap[] =
91 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
92 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
93 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
94 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
95 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
96 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
97 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
98 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
99 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
100 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
101 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
102 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
103 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
104 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
105 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
106 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
107 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
108 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
109 PT_ORIG_R3 * 8, PT_TRAP * 8 };
110 #else
111 /* Currently, don't check/send MQ. */
112 static int ppc_regmap[] =
113 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
114 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
115 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
116 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
117 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
118 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
119 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
120 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
121 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
122 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
123 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
124 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
125 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
126 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
127 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
128 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
129 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
130 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
131 PT_ORIG_R3 * 4, PT_TRAP * 4
132 };
133
134 static int ppc_regmap_e500[] =
135 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
136 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
137 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
138 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
139 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
140 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
141 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
142 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
143 -1, -1, -1, -1,
144 -1, -1, -1, -1,
145 -1, -1, -1, -1,
146 -1, -1, -1, -1,
147 -1, -1, -1, -1,
148 -1, -1, -1, -1,
149 -1, -1, -1, -1,
150 -1, -1, -1, -1,
151 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
152 PT_CTR * 4, PT_XER * 4, -1,
153 PT_ORIG_R3 * 4, PT_TRAP * 4
154 };
155 #endif
156
157 /* Check whether the kernel provides a register set with number
158 REGSET_ID of size REGSETSIZE for process/thread TID. */
159
160 static int
161 ppc_check_regset (int tid, int regset_id, int regsetsize)
162 {
163 void *buf = alloca (regsetsize);
164 struct iovec iov;
165
166 iov.iov_base = buf;
167 iov.iov_len = regsetsize;
168
169 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
170 || errno == ENODATA)
171 return 1;
172 return 0;
173 }
174
175 bool
176 ppc_target::low_cannot_store_register (int regno)
177 {
178 const struct target_desc *tdesc = current_process ()->tdesc;
179
180 #ifndef __powerpc64__
181 /* Some kernels do not allow us to store fpscr. */
182 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
183 && regno == find_regno (tdesc, "fpscr"))
184 return true;
185 #endif
186
187 /* Some kernels do not allow us to store orig_r3 or trap. */
188 if (regno == find_regno (tdesc, "orig_r3")
189 || regno == find_regno (tdesc, "trap"))
190 return true;
191
192 return false;
193 }
194
195 bool
196 ppc_target::low_cannot_fetch_register (int regno)
197 {
198 return false;
199 }
200
201 static void
202 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
203 {
204 memset (buf, 0, sizeof (long));
205
206 if (__BYTE_ORDER == __LITTLE_ENDIAN)
207 {
208 /* Little-endian values always sit at the left end of the buffer. */
209 collect_register (regcache, regno, buf);
210 }
211 else if (__BYTE_ORDER == __BIG_ENDIAN)
212 {
213 /* Big-endian values sit at the right end of the buffer. In case of
214 registers whose sizes are smaller than sizeof (long), we must use a
215 padding to access them correctly. */
216 int size = register_size (regcache->tdesc, regno);
217
218 if (size < sizeof (long))
219 collect_register (regcache, regno, buf + sizeof (long) - size);
220 else
221 collect_register (regcache, regno, buf);
222 }
223 else
224 perror_with_name ("Unexpected byte order");
225 }
226
227 static void
228 ppc_supply_ptrace_register (struct regcache *regcache,
229 int regno, const char *buf)
230 {
231 if (__BYTE_ORDER == __LITTLE_ENDIAN)
232 {
233 /* Little-endian values always sit at the left end of the buffer. */
234 supply_register (regcache, regno, buf);
235 }
236 else if (__BYTE_ORDER == __BIG_ENDIAN)
237 {
238 /* Big-endian values sit at the right end of the buffer. In case of
239 registers whose sizes are smaller than sizeof (long), we must use a
240 padding to access them correctly. */
241 int size = register_size (regcache->tdesc, regno);
242
243 if (size < sizeof (long))
244 supply_register (regcache, regno, buf + sizeof (long) - size);
245 else
246 supply_register (regcache, regno, buf);
247 }
248 else
249 perror_with_name ("Unexpected byte order");
250 }
251
252 bool
253 ppc_target::low_supports_breakpoints ()
254 {
255 return true;
256 }
257
258 CORE_ADDR
259 ppc_target::low_get_pc (regcache *regcache)
260 {
261 if (register_size (regcache->tdesc, 0) == 4)
262 {
263 unsigned int pc;
264 collect_register_by_name (regcache, "pc", &pc);
265 return (CORE_ADDR) pc;
266 }
267 else
268 {
269 unsigned long pc;
270 collect_register_by_name (regcache, "pc", &pc);
271 return (CORE_ADDR) pc;
272 }
273 }
274
275 void
276 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
277 {
278 if (register_size (regcache->tdesc, 0) == 4)
279 {
280 unsigned int newpc = pc;
281 supply_register_by_name (regcache, "pc", &newpc);
282 }
283 else
284 {
285 unsigned long newpc = pc;
286 supply_register_by_name (regcache, "pc", &newpc);
287 }
288 }
289
290 #ifndef __powerpc64__
291 static int ppc_regmap_adjusted;
292 #endif
293
294
295 /* Correct in either endianness.
296 This instruction is "twge r2, r2", which GDB uses as a software
297 breakpoint. */
298 static const unsigned int ppc_breakpoint = 0x7d821008;
299 #define ppc_breakpoint_len 4
300
301 /* Implementation of target ops method "sw_breakpoint_from_kind". */
302
303 const gdb_byte *
304 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
305 {
306 *size = ppc_breakpoint_len;
307 return (const gdb_byte *) &ppc_breakpoint;
308 }
309
310 static int
311 ppc_breakpoint_at (CORE_ADDR where)
312 {
313 unsigned int insn;
314
315 the_target->read_memory (where, (unsigned char *) &insn, 4);
316 if (insn == ppc_breakpoint)
317 return 1;
318 /* If necessary, recognize more trap instructions here. GDB only uses
319 the one. */
320
321 return 0;
322 }
323
324 /* Implement supports_z_point_type target-ops.
325 Returns true if type Z_TYPE breakpoint is supported.
326
327 Handling software breakpoint at server side, so tracepoints
328 and breakpoints can be inserted at the same location. */
329
330 static int
331 ppc_supports_z_point_type (char z_type)
332 {
333 switch (z_type)
334 {
335 case Z_PACKET_SW_BP:
336 return 1;
337 case Z_PACKET_HW_BP:
338 case Z_PACKET_WRITE_WP:
339 case Z_PACKET_ACCESS_WP:
340 default:
341 return 0;
342 }
343 }
344
345 /* Implement insert_point target-ops.
346 Returns 0 on success, -1 on failure and 1 on unsupported. */
347
348 static int
349 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
350 int size, struct raw_breakpoint *bp)
351 {
352 switch (type)
353 {
354 case raw_bkpt_type_sw:
355 return insert_memory_breakpoint (bp);
356
357 case raw_bkpt_type_hw:
358 case raw_bkpt_type_write_wp:
359 case raw_bkpt_type_access_wp:
360 default:
361 /* Unsupported. */
362 return 1;
363 }
364 }
365
366 /* Implement remove_point target-ops.
367 Returns 0 on success, -1 on failure and 1 on unsupported. */
368
369 static int
370 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
371 int size, struct raw_breakpoint *bp)
372 {
373 switch (type)
374 {
375 case raw_bkpt_type_sw:
376 return remove_memory_breakpoint (bp);
377
378 case raw_bkpt_type_hw:
379 case raw_bkpt_type_write_wp:
380 case raw_bkpt_type_access_wp:
381 default:
382 /* Unsupported. */
383 return 1;
384 }
385 }
386
387 /* Provide only a fill function for the general register set. ps_lgetregs
388 will use this for NPTL support. */
389
390 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
391 {
392 int i;
393
394 for (i = 0; i < 32; i++)
395 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
396
397 for (i = 64; i < 70; i++)
398 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
399
400 for (i = 71; i < 73; i++)
401 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
402 }
403
404 /* Program Priority Register regset fill function. */
405
406 static void
407 ppc_fill_pprregset (struct regcache *regcache, void *buf)
408 {
409 char *ppr = (char *) buf;
410
411 collect_register_by_name (regcache, "ppr", ppr);
412 }
413
414 /* Program Priority Register regset store function. */
415
416 static void
417 ppc_store_pprregset (struct regcache *regcache, const void *buf)
418 {
419 const char *ppr = (const char *) buf;
420
421 supply_register_by_name (regcache, "ppr", ppr);
422 }
423
424 /* Data Stream Control Register regset fill function. */
425
426 static void
427 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
428 {
429 char *dscr = (char *) buf;
430
431 collect_register_by_name (regcache, "dscr", dscr);
432 }
433
434 /* Data Stream Control Register regset store function. */
435
436 static void
437 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
438 {
439 const char *dscr = (const char *) buf;
440
441 supply_register_by_name (regcache, "dscr", dscr);
442 }
443
444 /* Target Address Register regset fill function. */
445
446 static void
447 ppc_fill_tarregset (struct regcache *regcache, void *buf)
448 {
449 char *tar = (char *) buf;
450
451 collect_register_by_name (regcache, "tar", tar);
452 }
453
454 /* Target Address Register regset store function. */
455
456 static void
457 ppc_store_tarregset (struct regcache *regcache, const void *buf)
458 {
459 const char *tar = (const char *) buf;
460
461 supply_register_by_name (regcache, "tar", tar);
462 }
463
464 /* Event-Based Branching regset store function. Unless the inferior
465 has a perf event open, ptrace can return in error when reading and
466 writing to the regset, with ENODATA. For reading, the registers
467 will correctly show as unavailable. For writing, gdbserver
468 currently only caches any register writes from P and G packets and
469 the stub always tries to write all the regsets when resuming the
470 inferior, which would result in frequent warnings. For this
471 reason, we don't define a fill function. This also means that the
472 client-side regcache will be dirty if the user tries to write to
473 the EBB registers. G packets that the client sends to write to
474 unrelated registers will also include data for EBB registers, even
475 if they are unavailable. */
476
477 static void
478 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
479 {
480 const char *regset = (const char *) buf;
481
482 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
483 .dat file is BESCR, EBBHR, EBBRR. */
484 supply_register_by_name (regcache, "ebbrr", &regset[0]);
485 supply_register_by_name (regcache, "ebbhr", &regset[8]);
486 supply_register_by_name (regcache, "bescr", &regset[16]);
487 }
488
489 /* Performance Monitoring Unit regset fill function. */
490
491 static void
492 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
493 {
494 char *regset = (char *) buf;
495
496 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
497 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
498 collect_register_by_name (regcache, "siar", &regset[0]);
499 collect_register_by_name (regcache, "sdar", &regset[8]);
500 collect_register_by_name (regcache, "sier", &regset[16]);
501 collect_register_by_name (regcache, "mmcr2", &regset[24]);
502 collect_register_by_name (regcache, "mmcr0", &regset[32]);
503 }
504
505 /* Performance Monitoring Unit regset store function. */
506
507 static void
508 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
509 {
510 const char *regset = (const char *) buf;
511
512 supply_register_by_name (regcache, "siar", &regset[0]);
513 supply_register_by_name (regcache, "sdar", &regset[8]);
514 supply_register_by_name (regcache, "sier", &regset[16]);
515 supply_register_by_name (regcache, "mmcr2", &regset[24]);
516 supply_register_by_name (regcache, "mmcr0", &regset[32]);
517 }
518
519 /* Hardware Transactional Memory special-purpose register regset fill
520 function. */
521
522 static void
523 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
524 {
525 int i, base;
526 char *regset = (char *) buf;
527
528 base = find_regno (regcache->tdesc, "tfhar");
529 for (i = 0; i < 3; i++)
530 collect_register (regcache, base + i, &regset[i * 8]);
531 }
532
533 /* Hardware Transactional Memory special-purpose register regset store
534 function. */
535
536 static void
537 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
538 {
539 int i, base;
540 const char *regset = (const char *) buf;
541
542 base = find_regno (regcache->tdesc, "tfhar");
543 for (i = 0; i < 3; i++)
544 supply_register (regcache, base + i, &regset[i * 8]);
545 }
546
547 /* For the same reasons as the EBB regset, none of the HTM
548 checkpointed regsets have a fill function. These registers are
549 only available if the inferior is in a transaction. */
550
551 /* Hardware Transactional Memory checkpointed general-purpose regset
552 store function. */
553
554 static void
555 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
556 {
557 int i, base, size, endian_offset;
558 const char *regset = (const char *) buf;
559
560 base = find_regno (regcache->tdesc, "cr0");
561 size = register_size (regcache->tdesc, base);
562
563 gdb_assert (size == 4 || size == 8);
564
565 for (i = 0; i < 32; i++)
566 supply_register (regcache, base + i, &regset[i * size]);
567
568 endian_offset = 0;
569
570 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
571 endian_offset = 4;
572
573 supply_register_by_name (regcache, "ccr",
574 &regset[PT_CCR * size + endian_offset]);
575
576 supply_register_by_name (regcache, "cxer",
577 &regset[PT_XER * size + endian_offset]);
578
579 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
580 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
581 }
582
583 /* Hardware Transactional Memory checkpointed floating-point regset
584 store function. */
585
586 static void
587 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
588 {
589 int i, base;
590 const char *regset = (const char *) buf;
591
592 base = find_regno (regcache->tdesc, "cf0");
593
594 for (i = 0; i < 32; i++)
595 supply_register (regcache, base + i, &regset[i * 8]);
596
597 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
598 }
599
600 /* Hardware Transactional Memory checkpointed vector regset store
601 function. */
602
603 static void
604 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
605 {
606 int i, base;
607 const char *regset = (const char *) buf;
608 int vscr_offset = 0;
609
610 base = find_regno (regcache->tdesc, "cvr0");
611
612 for (i = 0; i < 32; i++)
613 supply_register (regcache, base + i, &regset[i * 16]);
614
615 if (__BYTE_ORDER == __BIG_ENDIAN)
616 vscr_offset = 12;
617
618 supply_register_by_name (regcache, "cvscr",
619 &regset[32 * 16 + vscr_offset]);
620
621 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
622 }
623
624 /* Hardware Transactional Memory checkpointed vector-scalar regset
625 store function. */
626
627 static void
628 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
629 {
630 int i, base;
631 const char *regset = (const char *) buf;
632
633 base = find_regno (regcache->tdesc, "cvs0h");
634 for (i = 0; i < 32; i++)
635 supply_register (regcache, base + i, &regset[i * 8]);
636 }
637
638 /* Hardware Transactional Memory checkpointed Program Priority
639 Register regset store function. */
640
641 static void
642 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
643 {
644 const char *cppr = (const char *) buf;
645
646 supply_register_by_name (regcache, "cppr", cppr);
647 }
648
649 /* Hardware Transactional Memory checkpointed Data Stream Control
650 Register regset store function. */
651
652 static void
653 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
654 {
655 const char *cdscr = (const char *) buf;
656
657 supply_register_by_name (regcache, "cdscr", cdscr);
658 }
659
660 /* Hardware Transactional Memory checkpointed Target Address Register
661 regset store function. */
662
663 static void
664 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
665 {
666 const char *ctar = (const char *) buf;
667
668 supply_register_by_name (regcache, "ctar", ctar);
669 }
670
671 static void
672 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
673 {
674 int i, base;
675 char *regset = (char *) buf;
676
677 base = find_regno (regcache->tdesc, "vs0h");
678 for (i = 0; i < 32; i++)
679 collect_register (regcache, base + i, &regset[i * 8]);
680 }
681
682 static void
683 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
684 {
685 int i, base;
686 const char *regset = (const char *) buf;
687
688 base = find_regno (regcache->tdesc, "vs0h");
689 for (i = 0; i < 32; i++)
690 supply_register (regcache, base + i, &regset[i * 8]);
691 }
692
693 static void
694 ppc_fill_vrregset (struct regcache *regcache, void *buf)
695 {
696 int i, base;
697 char *regset = (char *) buf;
698 int vscr_offset = 0;
699
700 base = find_regno (regcache->tdesc, "vr0");
701 for (i = 0; i < 32; i++)
702 collect_register (regcache, base + i, &regset[i * 16]);
703
704 if (__BYTE_ORDER == __BIG_ENDIAN)
705 vscr_offset = 12;
706
707 collect_register_by_name (regcache, "vscr",
708 &regset[32 * 16 + vscr_offset]);
709
710 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
711 }
712
713 static void
714 ppc_store_vrregset (struct regcache *regcache, const void *buf)
715 {
716 int i, base;
717 const char *regset = (const char *) buf;
718 int vscr_offset = 0;
719
720 base = find_regno (regcache->tdesc, "vr0");
721 for (i = 0; i < 32; i++)
722 supply_register (regcache, base + i, &regset[i * 16]);
723
724 if (__BYTE_ORDER == __BIG_ENDIAN)
725 vscr_offset = 12;
726
727 supply_register_by_name (regcache, "vscr",
728 &regset[32 * 16 + vscr_offset]);
729 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
730 }
731
732 struct gdb_evrregset_t
733 {
734 unsigned long evr[32];
735 unsigned long long acc;
736 unsigned long spefscr;
737 };
738
739 static void
740 ppc_fill_evrregset (struct regcache *regcache, void *buf)
741 {
742 int i, ev0;
743 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
744
745 ev0 = find_regno (regcache->tdesc, "ev0h");
746 for (i = 0; i < 32; i++)
747 collect_register (regcache, ev0 + i, &regset->evr[i]);
748
749 collect_register_by_name (regcache, "acc", &regset->acc);
750 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
751 }
752
753 static void
754 ppc_store_evrregset (struct regcache *regcache, const void *buf)
755 {
756 int i, ev0;
757 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
758
759 ev0 = find_regno (regcache->tdesc, "ev0h");
760 for (i = 0; i < 32; i++)
761 supply_register (regcache, ev0 + i, &regset->evr[i]);
762
763 supply_register_by_name (regcache, "acc", &regset->acc);
764 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
765 }
766
767 /* Support for hardware single step. */
768
769 static int
770 ppc_supports_hardware_single_step (void)
771 {
772 return 1;
773 }
774
775 static struct regset_info ppc_regsets[] = {
776 /* List the extra register sets before GENERAL_REGS. That way we will
777 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
778 general registers. Some kernels support these, but not the newer
779 PPC_PTRACE_GETREGS. */
780 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
781 NULL, ppc_store_tm_ctarregset },
782 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
783 NULL, ppc_store_tm_cdscrregset },
784 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
785 NULL, ppc_store_tm_cpprregset },
786 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
787 NULL, ppc_store_tm_cvsxregset },
788 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
789 NULL, ppc_store_tm_cvrregset },
790 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
791 NULL, ppc_store_tm_cfprregset },
792 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
793 NULL, ppc_store_tm_cgprregset },
794 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
795 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
796 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
797 NULL, ppc_store_ebbregset },
798 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
799 ppc_fill_pmuregset, ppc_store_pmuregset },
800 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
801 ppc_fill_tarregset, ppc_store_tarregset },
802 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
803 ppc_fill_pprregset, ppc_store_pprregset },
804 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
805 ppc_fill_dscrregset, ppc_store_dscrregset },
806 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
807 ppc_fill_vsxregset, ppc_store_vsxregset },
808 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
809 ppc_fill_vrregset, ppc_store_vrregset },
810 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
811 ppc_fill_evrregset, ppc_store_evrregset },
812 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
813 NULL_REGSET
814 };
815
816 static struct usrregs_info ppc_usrregs_info =
817 {
818 ppc_num_regs,
819 ppc_regmap,
820 };
821
822 static struct regsets_info ppc_regsets_info =
823 {
824 ppc_regsets, /* regsets */
825 0, /* num_regsets */
826 NULL, /* disabled_regsets */
827 };
828
829 static struct regs_info myregs_info =
830 {
831 NULL, /* regset_bitmap */
832 &ppc_usrregs_info,
833 &ppc_regsets_info
834 };
835
836 const regs_info *
837 ppc_target::get_regs_info ()
838 {
839 return &myregs_info;
840 }
841
842 void
843 ppc_target::low_arch_setup ()
844 {
845 const struct target_desc *tdesc;
846 struct regset_info *regset;
847 struct ppc_linux_features features = ppc_linux_no_features;
848
849 int tid = lwpid_of (current_thread);
850
851 features.wordsize = ppc_linux_target_wordsize (tid);
852
853 if (features.wordsize == 4)
854 tdesc = tdesc_powerpc_32l;
855 else
856 tdesc = tdesc_powerpc_64l;
857
858 current_process ()->tdesc = tdesc;
859
860 /* The value of current_process ()->tdesc needs to be set for this
861 call. */
862 ppc_hwcap = linux_get_hwcap (features.wordsize);
863 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
864
865 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
866
867 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
868 features.vsx = true;
869
870 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
871 features.altivec = true;
872
873 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
874 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
875 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
876 {
877 features.ppr_dscr = true;
878 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
879 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
880 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
881 && ppc_check_regset (tid, NT_PPC_TAR,
882 PPC_LINUX_SIZEOF_TARREGSET)
883 && ppc_check_regset (tid, NT_PPC_EBB,
884 PPC_LINUX_SIZEOF_EBBREGSET)
885 && ppc_check_regset (tid, NT_PPC_PMU,
886 PPC_LINUX_SIZEOF_PMUREGSET))
887 {
888 features.isa207 = true;
889 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
890 && ppc_check_regset (tid, NT_PPC_TM_SPR,
891 PPC_LINUX_SIZEOF_TM_SPRREGSET))
892 features.htm = true;
893 }
894 }
895
896 tdesc = ppc_linux_match_description (features);
897
898 /* On 32-bit machines, check for SPE registers.
899 Set the low target's regmap field as appropriately. */
900 #ifndef __powerpc64__
901 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
902 tdesc = tdesc_powerpc_e500l;
903
904 if (!ppc_regmap_adjusted)
905 {
906 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
907 ppc_usrregs_info.regmap = ppc_regmap_e500;
908
909 /* If the FPSCR is 64-bit wide, we need to fetch the whole
910 64-bit slot and not just its second word. The PT_FPSCR
911 supplied in a 32-bit GDB compilation doesn't reflect
912 this. */
913 if (register_size (tdesc, 70) == 8)
914 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
915
916 ppc_regmap_adjusted = 1;
917 }
918 #endif
919
920 current_process ()->tdesc = tdesc;
921
922 for (regset = ppc_regsets; regset->size >= 0; regset++)
923 switch (regset->get_request)
924 {
925 case PTRACE_GETVRREGS:
926 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
927 break;
928 case PTRACE_GETVSXREGS:
929 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
930 break;
931 case PTRACE_GETEVRREGS:
932 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
933 regset->size = 32 * 4 + 8 + 4;
934 else
935 regset->size = 0;
936 break;
937 case PTRACE_GETREGSET:
938 switch (regset->nt_type)
939 {
940 case NT_PPC_PPR:
941 regset->size = (features.ppr_dscr ?
942 PPC_LINUX_SIZEOF_PPRREGSET : 0);
943 break;
944 case NT_PPC_DSCR:
945 regset->size = (features.ppr_dscr ?
946 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
947 break;
948 case NT_PPC_TAR:
949 regset->size = (features.isa207 ?
950 PPC_LINUX_SIZEOF_TARREGSET : 0);
951 break;
952 case NT_PPC_EBB:
953 regset->size = (features.isa207 ?
954 PPC_LINUX_SIZEOF_EBBREGSET : 0);
955 break;
956 case NT_PPC_PMU:
957 regset->size = (features.isa207 ?
958 PPC_LINUX_SIZEOF_PMUREGSET : 0);
959 break;
960 case NT_PPC_TM_SPR:
961 regset->size = (features.htm ?
962 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
963 break;
964 case NT_PPC_TM_CGPR:
965 if (features.wordsize == 4)
966 regset->size = (features.htm ?
967 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
968 else
969 regset->size = (features.htm ?
970 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
971 break;
972 case NT_PPC_TM_CFPR:
973 regset->size = (features.htm ?
974 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
975 break;
976 case NT_PPC_TM_CVMX:
977 regset->size = (features.htm ?
978 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
979 break;
980 case NT_PPC_TM_CVSX:
981 regset->size = (features.htm ?
982 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
983 break;
984 case NT_PPC_TM_CPPR:
985 regset->size = (features.htm ?
986 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
987 break;
988 case NT_PPC_TM_CDSCR:
989 regset->size = (features.htm ?
990 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
991 break;
992 case NT_PPC_TM_CTAR:
993 regset->size = (features.htm ?
994 PPC_LINUX_SIZEOF_CTARREGSET : 0);
995 break;
996 default:
997 break;
998 }
999 break;
1000 default:
1001 break;
1002 }
1003 }
1004
1005 /* Implementation of linux_target_ops method "supports_tracepoints". */
1006
1007 static int
1008 ppc_supports_tracepoints (void)
1009 {
1010 return 1;
1011 }
1012
1013 /* Get the thread area address. This is used to recognize which
1014 thread is which when tracing with the in-process agent library. We
1015 don't read anything from the address, and treat it as opaque; it's
1016 the address itself that we assume is unique per-thread. */
1017
1018 static int
1019 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1020 {
1021 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1022 struct thread_info *thr = get_lwp_thread (lwp);
1023 struct regcache *regcache = get_thread_regcache (thr, 1);
1024 ULONGEST tp = 0;
1025
1026 #ifdef __powerpc64__
1027 if (register_size (regcache->tdesc, 0) == 8)
1028 collect_register_by_name (regcache, "r13", &tp);
1029 else
1030 #endif
1031 collect_register_by_name (regcache, "r2", &tp);
1032
1033 *addr = tp;
1034
1035 return 0;
1036 }
1037
1038 #ifdef __powerpc64__
1039
1040 /* Older glibc doesn't provide this. */
1041
1042 #ifndef EF_PPC64_ABI
1043 #define EF_PPC64_ABI 3
1044 #endif
1045
1046 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1047 inferiors. */
1048
1049 static int
1050 is_elfv2_inferior (void)
1051 {
1052 /* To be used as fallback if we're unable to determine the right result -
1053 assume inferior uses the same ABI as gdbserver. */
1054 #if _CALL_ELF == 2
1055 const int def_res = 1;
1056 #else
1057 const int def_res = 0;
1058 #endif
1059 CORE_ADDR phdr;
1060 Elf64_Ehdr ehdr;
1061
1062 const struct target_desc *tdesc = current_process ()->tdesc;
1063 int wordsize = register_size (tdesc, 0);
1064
1065 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1066 return def_res;
1067
1068 /* Assume ELF header is at the beginning of the page where program headers
1069 are located. If it doesn't look like one, bail. */
1070
1071 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1072 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1073 return def_res;
1074
1075 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1076 }
1077
1078 #endif
1079
1080 /* Generate a ds-form instruction in BUF and return the number of bytes written
1081
1082 0 6 11 16 30 32
1083 | OPCD | RST | RA | DS |XO| */
1084
1085 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1086 static int
1087 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1088 {
1089 uint32_t insn;
1090
1091 gdb_assert ((opcd & ~0x3f) == 0);
1092 gdb_assert ((rst & ~0x1f) == 0);
1093 gdb_assert ((ra & ~0x1f) == 0);
1094 gdb_assert ((xo & ~0x3) == 0);
1095
1096 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1097 *buf = (opcd << 26) | insn;
1098 return 1;
1099 }
1100
1101 /* Followings are frequently used ds-form instructions. */
1102
1103 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1104 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1105 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1106 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1107
1108 /* Generate a d-form instruction in BUF.
1109
1110 0 6 11 16 32
1111 | OPCD | RST | RA | D | */
1112
1113 static int
1114 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1115 {
1116 uint32_t insn;
1117
1118 gdb_assert ((opcd & ~0x3f) == 0);
1119 gdb_assert ((rst & ~0x1f) == 0);
1120 gdb_assert ((ra & ~0x1f) == 0);
1121
1122 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1123 *buf = (opcd << 26) | insn;
1124 return 1;
1125 }
1126
1127 /* Followings are frequently used d-form instructions. */
1128
1129 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1130 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1131 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1132 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1133 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1134 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1135 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1136 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1137 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1138
1139 /* Generate a xfx-form instruction in BUF and return the number of bytes
1140 written.
1141
1142 0 6 11 21 31 32
1143 | OPCD | RST | RI | XO |/| */
1144
1145 static int
1146 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1147 {
1148 uint32_t insn;
1149 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1150
1151 gdb_assert ((opcd & ~0x3f) == 0);
1152 gdb_assert ((rst & ~0x1f) == 0);
1153 gdb_assert ((xo & ~0x3ff) == 0);
1154
1155 insn = (rst << 21) | (n << 11) | (xo << 1);
1156 *buf = (opcd << 26) | insn;
1157 return 1;
1158 }
1159
1160 /* Followings are frequently used xfx-form instructions. */
1161
1162 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1163 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1164 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1165 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1166 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1167 E & 0xf, 598)
1168 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1169
1170
1171 /* Generate a x-form instruction in BUF and return the number of bytes written.
1172
1173 0 6 11 16 21 31 32
1174 | OPCD | RST | RA | RB | XO |RC| */
1175
1176 static int
1177 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1178 {
1179 uint32_t insn;
1180
1181 gdb_assert ((opcd & ~0x3f) == 0);
1182 gdb_assert ((rst & ~0x1f) == 0);
1183 gdb_assert ((ra & ~0x1f) == 0);
1184 gdb_assert ((rb & ~0x1f) == 0);
1185 gdb_assert ((xo & ~0x3ff) == 0);
1186 gdb_assert ((rc & ~1) == 0);
1187
1188 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1189 *buf = (opcd << 26) | insn;
1190 return 1;
1191 }
1192
1193 /* Followings are frequently used x-form instructions. */
1194
1195 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1196 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1197 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1198 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1199 /* Assume bf = cr7. */
1200 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1201
1202
1203 /* Generate a md-form instruction in BUF and return the number of bytes written.
1204
1205 0 6 11 16 21 27 30 31 32
1206 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1207
1208 static int
1209 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1210 int xo, int rc)
1211 {
1212 uint32_t insn;
1213 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1214 unsigned int sh0_4 = sh & 0x1f;
1215 unsigned int sh5 = (sh >> 5) & 1;
1216
1217 gdb_assert ((opcd & ~0x3f) == 0);
1218 gdb_assert ((rs & ~0x1f) == 0);
1219 gdb_assert ((ra & ~0x1f) == 0);
1220 gdb_assert ((sh & ~0x3f) == 0);
1221 gdb_assert ((mb & ~0x3f) == 0);
1222 gdb_assert ((xo & ~0x7) == 0);
1223 gdb_assert ((rc & ~0x1) == 0);
1224
1225 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1226 | (sh5 << 1) | (xo << 2) | (rc & 1);
1227 *buf = (opcd << 26) | insn;
1228 return 1;
1229 }
1230
1231 /* The following are frequently used md-form instructions. */
1232
1233 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1234 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1235 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1236 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1237
1238 /* Generate a i-form instruction in BUF and return the number of bytes written.
1239
1240 0 6 30 31 32
1241 | OPCD | LI |AA|LK| */
1242
1243 static int
1244 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1245 {
1246 uint32_t insn;
1247
1248 gdb_assert ((opcd & ~0x3f) == 0);
1249
1250 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1251 *buf = (opcd << 26) | insn;
1252 return 1;
1253 }
1254
1255 /* The following are frequently used i-form instructions. */
1256
1257 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1258 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1259
1260 /* Generate a b-form instruction in BUF and return the number of bytes written.
1261
1262 0 6 11 16 30 31 32
1263 | OPCD | BO | BI | BD |AA|LK| */
1264
1265 static int
1266 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1267 int aa, int lk)
1268 {
1269 uint32_t insn;
1270
1271 gdb_assert ((opcd & ~0x3f) == 0);
1272 gdb_assert ((bo & ~0x1f) == 0);
1273 gdb_assert ((bi & ~0x1f) == 0);
1274
1275 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1276 *buf = (opcd << 26) | insn;
1277 return 1;
1278 }
1279
1280 /* The following are frequently used b-form instructions. */
1281 /* Assume bi = cr7. */
1282 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1283
1284 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1285 respectively. They are primary used for save/restore GPRs in jump-pad,
1286 not used for bytecode compiling. */
1287
1288 #ifdef __powerpc64__
1289 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1290 GEN_LD (buf, rt, ra, si) : \
1291 GEN_LWZ (buf, rt, ra, si))
1292 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1293 GEN_STD (buf, rt, ra, si) : \
1294 GEN_STW (buf, rt, ra, si))
1295 #else
1296 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1297 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1298 #endif
1299
1300 /* Generate a sequence of instructions to load IMM in the register REG.
1301 Write the instructions in BUF and return the number of bytes written. */
1302
1303 static int
1304 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1305 {
1306 uint32_t *p = buf;
1307
1308 if ((imm + 32768) < 65536)
1309 {
1310 /* li reg, imm[15:0] */
1311 p += GEN_LI (p, reg, imm);
1312 }
1313 else if ((imm >> 32) == 0)
1314 {
1315 /* lis reg, imm[31:16]
1316 ori reg, reg, imm[15:0]
1317 rldicl reg, reg, 0, 32 */
1318 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1319 if ((imm & 0xffff) != 0)
1320 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1321 /* Clear upper 32-bit if sign-bit is set. */
1322 if (imm & (1u << 31) && is_64)
1323 p += GEN_RLDICL (p, reg, reg, 0, 32);
1324 }
1325 else
1326 {
1327 gdb_assert (is_64);
1328 /* lis reg, <imm[63:48]>
1329 ori reg, reg, <imm[48:32]>
1330 rldicr reg, reg, 32, 31
1331 oris reg, reg, <imm[31:16]>
1332 ori reg, reg, <imm[15:0]> */
1333 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1334 if (((imm >> 32) & 0xffff) != 0)
1335 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1336 p += GEN_RLDICR (p, reg, reg, 32, 31);
1337 if (((imm >> 16) & 0xffff) != 0)
1338 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1339 if ((imm & 0xffff) != 0)
1340 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1341 }
1342
1343 return p - buf;
1344 }
1345
1346 /* Generate a sequence for atomically exchange at location LOCK.
1347 This code sequence clobbers r6, r7, r8. LOCK is the location for
1348 the atomic-xchg, OLD_VALUE is expected old value stored in the
1349 location, and R_NEW is a register for the new value. */
1350
1351 static int
1352 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1353 int is_64)
1354 {
1355 const int r_lock = 6;
1356 const int r_old = 7;
1357 const int r_tmp = 8;
1358 uint32_t *p = buf;
1359
1360 /*
1361 1: lwarx TMP, 0, LOCK
1362 cmpwi TMP, OLD
1363 bne 1b
1364 stwcx. NEW, 0, LOCK
1365 bne 1b */
1366
1367 p += gen_limm (p, r_lock, lock, is_64);
1368 p += gen_limm (p, r_old, old_value, is_64);
1369
1370 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1371 p += GEN_CMPW (p, r_tmp, r_old);
1372 p += GEN_BNE (p, -8);
1373 p += GEN_STWCX (p, r_new, 0, r_lock);
1374 p += GEN_BNE (p, -16);
1375
1376 return p - buf;
1377 }
1378
1379 /* Generate a sequence of instructions for calling a function
1380 at address of FN. Return the number of bytes are written in BUF. */
1381
1382 static int
1383 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1384 {
1385 uint32_t *p = buf;
1386
1387 /* Must be called by r12 for caller to calculate TOC address. */
1388 p += gen_limm (p, 12, fn, is_64);
1389 if (is_opd)
1390 {
1391 p += GEN_LOAD (p, 11, 12, 16, is_64);
1392 p += GEN_LOAD (p, 2, 12, 8, is_64);
1393 p += GEN_LOAD (p, 12, 12, 0, is_64);
1394 }
1395 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1396 *p++ = 0x4e800421; /* bctrl */
1397
1398 return p - buf;
1399 }
1400
1401 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1402 of instruction. This function is used to adjust pc-relative instructions
1403 when copying. */
1404
1405 static void
1406 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1407 {
1408 uint32_t insn, op6;
1409 long rel, newrel;
1410
1411 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1412 op6 = PPC_OP6 (insn);
1413
1414 if (op6 == 18 && (insn & 2) == 0)
1415 {
1416 /* branch && AA = 0 */
1417 rel = PPC_LI (insn);
1418 newrel = (oldloc - *to) + rel;
1419
1420 /* Out of range. Cannot relocate instruction. */
1421 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1422 return;
1423
1424 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1425 }
1426 else if (op6 == 16 && (insn & 2) == 0)
1427 {
1428 /* conditional branch && AA = 0 */
1429
1430 /* If the new relocation is too big for even a 26-bit unconditional
1431 branch, there is nothing we can do. Just abort.
1432
1433 Otherwise, if it can be fit in 16-bit conditional branch, just
1434 copy the instruction and relocate the address.
1435
1436 If the it's big for conditional-branch (16-bit), try to invert the
1437 condition and jump with 26-bit branch. For example,
1438
1439 beq .Lgoto
1440 INSN1
1441
1442 =>
1443
1444 bne 1f (+8)
1445 b .Lgoto
1446 1:INSN1
1447
1448 After this transform, we are actually jump from *TO+4 instead of *TO,
1449 so check the relocation again because it will be 1-insn farther then
1450 before if *TO is after OLDLOC.
1451
1452
1453 For BDNZT (or so) is transformed from
1454
1455 bdnzt eq, .Lgoto
1456 INSN1
1457
1458 =>
1459
1460 bdz 1f (+12)
1461 bf eq, 1f (+8)
1462 b .Lgoto
1463 1:INSN1
1464
1465 See also "BO field encodings". */
1466
1467 rel = PPC_BD (insn);
1468 newrel = (oldloc - *to) + rel;
1469
1470 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1471 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1472 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1473 {
1474 newrel -= 4;
1475
1476 /* Out of range. Cannot relocate instruction. */
1477 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1478 return;
1479
1480 if ((PPC_BO (insn) & 0x14) == 0x4)
1481 insn ^= (1 << 24);
1482 else if ((PPC_BO (insn) & 0x14) == 0x10)
1483 insn ^= (1 << 22);
1484
1485 /* Jump over the unconditional branch. */
1486 insn = (insn & ~0xfffc) | 0x8;
1487 target_write_memory (*to, (unsigned char *) &insn, 4);
1488 *to += 4;
1489
1490 /* Build a unconditional branch and copy LK bit. */
1491 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1492 target_write_memory (*to, (unsigned char *) &insn, 4);
1493 *to += 4;
1494
1495 return;
1496 }
1497 else if ((PPC_BO (insn) & 0x14) == 0)
1498 {
1499 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1500 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1501
1502 newrel -= 8;
1503
1504 /* Out of range. Cannot relocate instruction. */
1505 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1506 return;
1507
1508 /* Copy BI field. */
1509 bf_insn |= (insn & 0x1f0000);
1510
1511 /* Invert condition. */
1512 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1513 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1514
1515 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1516 *to += 4;
1517 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1518 *to += 4;
1519
1520 /* Build a unconditional branch and copy LK bit. */
1521 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1522 target_write_memory (*to, (unsigned char *) &insn, 4);
1523 *to += 4;
1524
1525 return;
1526 }
1527 else /* (BO & 0x14) == 0x14, branch always. */
1528 {
1529 /* Out of range. Cannot relocate instruction. */
1530 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1531 return;
1532
1533 /* Build a unconditional branch and copy LK bit. */
1534 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1535 target_write_memory (*to, (unsigned char *) &insn, 4);
1536 *to += 4;
1537
1538 return;
1539 }
1540 }
1541
1542 target_write_memory (*to, (unsigned char *) &insn, 4);
1543 *to += 4;
1544 }
1545
1546 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1547 See target.h for details. */
1548
1549 static int
1550 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1551 CORE_ADDR collector,
1552 CORE_ADDR lockaddr,
1553 ULONGEST orig_size,
1554 CORE_ADDR *jump_entry,
1555 CORE_ADDR *trampoline,
1556 ULONGEST *trampoline_size,
1557 unsigned char *jjump_pad_insn,
1558 ULONGEST *jjump_pad_insn_size,
1559 CORE_ADDR *adjusted_insn_addr,
1560 CORE_ADDR *adjusted_insn_addr_end,
1561 char *err)
1562 {
1563 uint32_t buf[256];
1564 uint32_t *p = buf;
1565 int j, offset;
1566 CORE_ADDR buildaddr = *jump_entry;
1567 const CORE_ADDR entryaddr = *jump_entry;
1568 int rsz, min_frame, frame_size, tp_reg;
1569 #ifdef __powerpc64__
1570 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1571 int is_64 = register_size (regcache->tdesc, 0) == 8;
1572 int is_opd = is_64 && !is_elfv2_inferior ();
1573 #else
1574 int is_64 = 0, is_opd = 0;
1575 #endif
1576
1577 #ifdef __powerpc64__
1578 if (is_64)
1579 {
1580 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1581 rsz = 8;
1582 min_frame = 112;
1583 frame_size = (40 * rsz) + min_frame;
1584 tp_reg = 13;
1585 }
1586 else
1587 {
1588 #endif
1589 rsz = 4;
1590 min_frame = 16;
1591 frame_size = (40 * rsz) + min_frame;
1592 tp_reg = 2;
1593 #ifdef __powerpc64__
1594 }
1595 #endif
1596
1597 /* Stack frame layout for this jump pad,
1598
1599 High thread_area (r13/r2) |
1600 tpoint - collecting_t obj
1601 PC/<tpaddr> | +36
1602 CTR | +35
1603 LR | +34
1604 XER | +33
1605 CR | +32
1606 R31 |
1607 R29 |
1608 ... |
1609 R1 | +1
1610 R0 - collected registers
1611 ... |
1612 ... |
1613 Low Back-chain -
1614
1615
1616 The code flow of this jump pad,
1617
1618 1. Adjust SP
1619 2. Save GPR and SPR
1620 3. Prepare argument
1621 4. Call gdb_collector
1622 5. Restore GPR and SPR
1623 6. Restore SP
1624 7. Build a jump for back to the program
1625 8. Copy/relocate original instruction
1626 9. Build a jump for replacing original instruction. */
1627
1628 /* Adjust stack pointer. */
1629 if (is_64)
1630 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1631 else
1632 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1633
1634 /* Store GPRs. Save R1 later, because it had just been modified, but
1635 we want the original value. */
1636 for (j = 2; j < 32; j++)
1637 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1638 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1639 /* Set r0 to the original value of r1 before adjusting stack frame,
1640 and then save it. */
1641 p += GEN_ADDI (p, 0, 1, frame_size);
1642 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1643
1644 /* Save CR, XER, LR, and CTR. */
1645 p += GEN_MFCR (p, 3); /* mfcr r3 */
1646 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1647 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1648 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1649 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1650 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1651 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1652 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1653
1654 /* Save PC<tpaddr> */
1655 p += gen_limm (p, 3, tpaddr, is_64);
1656 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1657
1658
1659 /* Setup arguments to collector. */
1660 /* Set r4 to collected registers. */
1661 p += GEN_ADDI (p, 4, 1, min_frame);
1662 /* Set r3 to TPOINT. */
1663 p += gen_limm (p, 3, tpoint, is_64);
1664
1665 /* Prepare collecting_t object for lock. */
1666 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1667 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1668 /* Set R5 to collecting object. */
1669 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1670
1671 p += GEN_LWSYNC (p);
1672 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1673 p += GEN_LWSYNC (p);
1674
1675 /* Call to collector. */
1676 p += gen_call (p, collector, is_64, is_opd);
1677
1678 /* Simply write 0 to release the lock. */
1679 p += gen_limm (p, 3, lockaddr, is_64);
1680 p += gen_limm (p, 4, 0, is_64);
1681 p += GEN_LWSYNC (p);
1682 p += GEN_STORE (p, 4, 3, 0, is_64);
1683
1684 /* Restore stack and registers. */
1685 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1686 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1687 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1688 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1689 p += GEN_MTCR (p, 3); /* mtcr r3 */
1690 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1691 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1692 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1693
1694 /* Restore GPRs. */
1695 for (j = 2; j < 32; j++)
1696 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1697 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1698 /* Restore SP. */
1699 p += GEN_ADDI (p, 1, 1, frame_size);
1700
1701 /* Flush instructions to inferior memory. */
1702 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1703
1704 /* Now, insert the original instruction to execute in the jump pad. */
1705 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1706 *adjusted_insn_addr_end = *adjusted_insn_addr;
1707 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1708
1709 /* Verify the relocation size. If should be 4 for normal copy,
1710 8 or 12 for some conditional branch. */
1711 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1712 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1713 {
1714 sprintf (err, "E.Unexpected instruction length = %d"
1715 "when relocate instruction.",
1716 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1717 return 1;
1718 }
1719
1720 buildaddr = *adjusted_insn_addr_end;
1721 p = buf;
1722 /* Finally, write a jump back to the program. */
1723 offset = (tpaddr + 4) - buildaddr;
1724 if (offset >= (1 << 25) || offset < -(1 << 25))
1725 {
1726 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1727 "(offset 0x%x > 26-bit).", offset);
1728 return 1;
1729 }
1730 /* b <tpaddr+4> */
1731 p += GEN_B (p, offset);
1732 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1733 *jump_entry = buildaddr + (p - buf) * 4;
1734
1735 /* The jump pad is now built. Wire in a jump to our jump pad. This
1736 is always done last (by our caller actually), so that we can
1737 install fast tracepoints with threads running. This relies on
1738 the agent's atomic write support. */
1739 offset = entryaddr - tpaddr;
1740 if (offset >= (1 << 25) || offset < -(1 << 25))
1741 {
1742 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1743 "(offset 0x%x > 26-bit).", offset);
1744 return 1;
1745 }
1746 /* b <jentry> */
1747 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1748 *jjump_pad_insn_size = 4;
1749
1750 return 0;
1751 }
1752
1753 /* Returns the minimum instruction length for installing a tracepoint. */
1754
1755 static int
1756 ppc_get_min_fast_tracepoint_insn_len (void)
1757 {
1758 return 4;
1759 }
1760
1761 /* Emits a given buffer into the target at current_insn_ptr. Length
1762 is in units of 32-bit words. */
1763
1764 static void
1765 emit_insns (uint32_t *buf, int n)
1766 {
1767 n = n * sizeof (uint32_t);
1768 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1769 current_insn_ptr += n;
1770 }
1771
1772 #define __EMIT_ASM(NAME, INSNS) \
1773 do \
1774 { \
1775 extern uint32_t start_bcax_ ## NAME []; \
1776 extern uint32_t end_bcax_ ## NAME []; \
1777 emit_insns (start_bcax_ ## NAME, \
1778 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1779 __asm__ (".section .text.__ppcbcax\n\t" \
1780 "start_bcax_" #NAME ":\n\t" \
1781 INSNS "\n\t" \
1782 "end_bcax_" #NAME ":\n\t" \
1783 ".previous\n\t"); \
1784 } while (0)
1785
1786 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1787 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1788
1789 /*
1790
1791 Bytecode execution stack frame - 32-bit
1792
1793 | LR save area (SP + 4)
1794 SP' -> +- Back chain (SP + 0)
1795 | Save r31 for access saved arguments
1796 | Save r30 for bytecode stack pointer
1797 | Save r4 for incoming argument *value
1798 | Save r3 for incoming argument regs
1799 r30 -> +- Bytecode execution stack
1800 |
1801 | 64-byte (8 doublewords) at initial.
1802 | Expand stack as needed.
1803 |
1804 +-
1805 | Some padding for minimum stack frame and 16-byte alignment.
1806 | 16 bytes.
1807 SP +- Back-chain (SP')
1808
1809 initial frame size
1810 = 16 + (4 * 4) + 64
1811 = 96
1812
1813 r30 is the stack-pointer for bytecode machine.
1814 It should point to next-empty, so we can use LDU for pop.
1815 r3 is used for cache of the high part of TOP value.
1816 It was the first argument, pointer to regs.
1817 r4 is used for cache of the low part of TOP value.
1818 It was the second argument, pointer to the result.
1819 We should set *result = TOP after leaving this function.
1820
1821 Note:
1822 * To restore stack at epilogue
1823 => sp = r31
1824 * To check stack is big enough for bytecode execution.
1825 => r30 - 8 > SP + 8
1826 * To return execution result.
1827 => 0(r4) = TOP
1828
1829 */
1830
1831 /* Regardless of endian, register 3 is always high part, 4 is low part.
1832 These defines are used when the register pair is stored/loaded.
1833 Likewise, to simplify code, have a similiar define for 5:6. */
1834
1835 #if __BYTE_ORDER == __LITTLE_ENDIAN
1836 #define TOP_FIRST "4"
1837 #define TOP_SECOND "3"
1838 #define TMP_FIRST "6"
1839 #define TMP_SECOND "5"
1840 #else
1841 #define TOP_FIRST "3"
1842 #define TOP_SECOND "4"
1843 #define TMP_FIRST "5"
1844 #define TMP_SECOND "6"
1845 #endif
1846
1847 /* Emit prologue in inferior memory. See above comments. */
1848
1849 static void
1850 ppc_emit_prologue (void)
1851 {
1852 EMIT_ASM (/* Save return address. */
1853 "mflr 0 \n"
1854 "stw 0, 4(1) \n"
1855 /* Adjust SP. 96 is the initial frame size. */
1856 "stwu 1, -96(1) \n"
1857 /* Save r30 and incoming arguments. */
1858 "stw 31, 96-4(1) \n"
1859 "stw 30, 96-8(1) \n"
1860 "stw 4, 96-12(1) \n"
1861 "stw 3, 96-16(1) \n"
1862 /* Point r31 to original r1 for access arguments. */
1863 "addi 31, 1, 96 \n"
1864 /* Set r30 to pointing stack-top. */
1865 "addi 30, 1, 64 \n"
1866 /* Initial r3/TOP to 0. */
1867 "li 3, 0 \n"
1868 "li 4, 0 \n");
1869 }
1870
1871 /* Emit epilogue in inferior memory. See above comments. */
1872
1873 static void
1874 ppc_emit_epilogue (void)
1875 {
1876 EMIT_ASM (/* *result = TOP */
1877 "lwz 5, -12(31) \n"
1878 "stw " TOP_FIRST ", 0(5) \n"
1879 "stw " TOP_SECOND ", 4(5) \n"
1880 /* Restore registers. */
1881 "lwz 31, -4(31) \n"
1882 "lwz 30, -8(31) \n"
1883 /* Restore SP. */
1884 "lwz 1, 0(1) \n"
1885 /* Restore LR. */
1886 "lwz 0, 4(1) \n"
1887 /* Return 0 for no-error. */
1888 "li 3, 0 \n"
1889 "mtlr 0 \n"
1890 "blr \n");
1891 }
1892
1893 /* TOP = stack[--sp] + TOP */
1894
1895 static void
1896 ppc_emit_add (void)
1897 {
1898 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1899 "lwz " TMP_SECOND ", 4(30)\n"
1900 "addc 4, 6, 4 \n"
1901 "adde 3, 5, 3 \n");
1902 }
1903
1904 /* TOP = stack[--sp] - TOP */
1905
1906 static void
1907 ppc_emit_sub (void)
1908 {
1909 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1910 "lwz " TMP_SECOND ", 4(30) \n"
1911 "subfc 4, 4, 6 \n"
1912 "subfe 3, 3, 5 \n");
1913 }
1914
1915 /* TOP = stack[--sp] * TOP */
1916
1917 static void
1918 ppc_emit_mul (void)
1919 {
1920 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1921 "lwz " TMP_SECOND ", 4(30) \n"
1922 "mulhwu 7, 6, 4 \n"
1923 "mullw 3, 6, 3 \n"
1924 "mullw 5, 4, 5 \n"
1925 "mullw 4, 6, 4 \n"
1926 "add 3, 5, 3 \n"
1927 "add 3, 7, 3 \n");
1928 }
1929
1930 /* TOP = stack[--sp] << TOP */
1931
1932 static void
1933 ppc_emit_lsh (void)
1934 {
1935 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1936 "lwz " TMP_SECOND ", 4(30) \n"
1937 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1938 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1939 "slw 5, 5, 4\n" /* Shift high part left */
1940 "slw 4, 6, 4\n" /* Shift low part left */
1941 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1942 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1943 "or 3, 5, 3\n"
1944 "or 3, 7, 3\n"); /* Assemble high part */
1945 }
1946
1947 /* Top = stack[--sp] >> TOP
1948 (Arithmetic shift right) */
1949
1950 static void
1951 ppc_emit_rsh_signed (void)
1952 {
1953 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1954 "lwz " TMP_SECOND ", 4(30) \n"
1955 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1956 "sraw 3, 5, 4\n" /* Shift high part right */
1957 "cmpwi 7, 1\n"
1958 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1959 "sraw 4, 5, 7\n" /* Shift high to low */
1960 "b 2f\n"
1961 "1:\n"
1962 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1963 "srw 4, 6, 4\n" /* Shift low part right */
1964 "slw 5, 5, 7\n" /* Shift high to low */
1965 "or 4, 4, 5\n" /* Assemble low part */
1966 "2:\n");
1967 }
1968
1969 /* Top = stack[--sp] >> TOP
1970 (Logical shift right) */
1971
1972 static void
1973 ppc_emit_rsh_unsigned (void)
1974 {
1975 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1976 "lwz " TMP_SECOND ", 4(30) \n"
1977 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1978 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1979 "srw 6, 6, 4\n" /* Shift low part right */
1980 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1981 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1982 "or 6, 6, 3\n"
1983 "srw 3, 5, 4\n" /* Shift high part right */
1984 "or 4, 6, 7\n"); /* Assemble low part */
1985 }
1986
1987 /* Emit code for signed-extension specified by ARG. */
1988
1989 static void
1990 ppc_emit_ext (int arg)
1991 {
1992 switch (arg)
1993 {
1994 case 8:
1995 EMIT_ASM ("extsb 4, 4\n"
1996 "srawi 3, 4, 31");
1997 break;
1998 case 16:
1999 EMIT_ASM ("extsh 4, 4\n"
2000 "srawi 3, 4, 31");
2001 break;
2002 case 32:
2003 EMIT_ASM ("srawi 3, 4, 31");
2004 break;
2005 default:
2006 emit_error = 1;
2007 }
2008 }
2009
2010 /* Emit code for zero-extension specified by ARG. */
2011
2012 static void
2013 ppc_emit_zero_ext (int arg)
2014 {
2015 switch (arg)
2016 {
2017 case 8:
2018 EMIT_ASM ("clrlwi 4,4,24\n"
2019 "li 3, 0\n");
2020 break;
2021 case 16:
2022 EMIT_ASM ("clrlwi 4,4,16\n"
2023 "li 3, 0\n");
2024 break;
2025 case 32:
2026 EMIT_ASM ("li 3, 0");
2027 break;
2028 default:
2029 emit_error = 1;
2030 }
2031 }
2032
2033 /* TOP = !TOP
2034 i.e., TOP = (TOP == 0) ? 1 : 0; */
2035
2036 static void
2037 ppc_emit_log_not (void)
2038 {
2039 EMIT_ASM ("or 4, 3, 4 \n"
2040 "cntlzw 4, 4 \n"
2041 "srwi 4, 4, 5 \n"
2042 "li 3, 0 \n");
2043 }
2044
2045 /* TOP = stack[--sp] & TOP */
2046
2047 static void
2048 ppc_emit_bit_and (void)
2049 {
2050 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2051 "lwz " TMP_SECOND ", 4(30) \n"
2052 "and 4, 6, 4 \n"
2053 "and 3, 5, 3 \n");
2054 }
2055
2056 /* TOP = stack[--sp] | TOP */
2057
2058 static void
2059 ppc_emit_bit_or (void)
2060 {
2061 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2062 "lwz " TMP_SECOND ", 4(30) \n"
2063 "or 4, 6, 4 \n"
2064 "or 3, 5, 3 \n");
2065 }
2066
2067 /* TOP = stack[--sp] ^ TOP */
2068
2069 static void
2070 ppc_emit_bit_xor (void)
2071 {
2072 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2073 "lwz " TMP_SECOND ", 4(30) \n"
2074 "xor 4, 6, 4 \n"
2075 "xor 3, 5, 3 \n");
2076 }
2077
2078 /* TOP = ~TOP
2079 i.e., TOP = ~(TOP | TOP) */
2080
2081 static void
2082 ppc_emit_bit_not (void)
2083 {
2084 EMIT_ASM ("nor 3, 3, 3 \n"
2085 "nor 4, 4, 4 \n");
2086 }
2087
2088 /* TOP = stack[--sp] == TOP */
2089
2090 static void
2091 ppc_emit_equal (void)
2092 {
2093 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2094 "lwz " TMP_SECOND ", 4(30) \n"
2095 "xor 4, 6, 4 \n"
2096 "xor 3, 5, 3 \n"
2097 "or 4, 3, 4 \n"
2098 "cntlzw 4, 4 \n"
2099 "srwi 4, 4, 5 \n"
2100 "li 3, 0 \n");
2101 }
2102
2103 /* TOP = stack[--sp] < TOP
2104 (Signed comparison) */
2105
2106 static void
2107 ppc_emit_less_signed (void)
2108 {
2109 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2110 "lwz " TMP_SECOND ", 4(30) \n"
2111 "cmplw 6, 6, 4 \n"
2112 "cmpw 7, 5, 3 \n"
2113 /* CR6 bit 0 = low less and high equal */
2114 "crand 6*4+0, 6*4+0, 7*4+2\n"
2115 /* CR7 bit 0 = (low less and high equal) or high less */
2116 "cror 7*4+0, 7*4+0, 6*4+0\n"
2117 "mfcr 4 \n"
2118 "rlwinm 4, 4, 29, 31, 31 \n"
2119 "li 3, 0 \n");
2120 }
2121
2122 /* TOP = stack[--sp] < TOP
2123 (Unsigned comparison) */
2124
2125 static void
2126 ppc_emit_less_unsigned (void)
2127 {
2128 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2129 "lwz " TMP_SECOND ", 4(30) \n"
2130 "cmplw 6, 6, 4 \n"
2131 "cmplw 7, 5, 3 \n"
2132 /* CR6 bit 0 = low less and high equal */
2133 "crand 6*4+0, 6*4+0, 7*4+2\n"
2134 /* CR7 bit 0 = (low less and high equal) or high less */
2135 "cror 7*4+0, 7*4+0, 6*4+0\n"
2136 "mfcr 4 \n"
2137 "rlwinm 4, 4, 29, 31, 31 \n"
2138 "li 3, 0 \n");
2139 }
2140
2141 /* Access the memory address in TOP in size of SIZE.
2142 Zero-extend the read value. */
2143
2144 static void
2145 ppc_emit_ref (int size)
2146 {
2147 switch (size)
2148 {
2149 case 1:
2150 EMIT_ASM ("lbz 4, 0(4)\n"
2151 "li 3, 0");
2152 break;
2153 case 2:
2154 EMIT_ASM ("lhz 4, 0(4)\n"
2155 "li 3, 0");
2156 break;
2157 case 4:
2158 EMIT_ASM ("lwz 4, 0(4)\n"
2159 "li 3, 0");
2160 break;
2161 case 8:
2162 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2163 EMIT_ASM ("lwz 3, 4(4)\n"
2164 "lwz 4, 0(4)");
2165 else
2166 EMIT_ASM ("lwz 3, 0(4)\n"
2167 "lwz 4, 4(4)");
2168 break;
2169 }
2170 }
2171
2172 /* TOP = NUM */
2173
2174 static void
2175 ppc_emit_const (LONGEST num)
2176 {
2177 uint32_t buf[10];
2178 uint32_t *p = buf;
2179
2180 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2181 p += gen_limm (p, 4, num & 0xffffffff, 0);
2182
2183 emit_insns (buf, p - buf);
2184 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2185 }
2186
2187 /* Set TOP to the value of register REG by calling get_raw_reg function
2188 with two argument, collected buffer and register number. */
2189
2190 static void
2191 ppc_emit_reg (int reg)
2192 {
2193 uint32_t buf[13];
2194 uint32_t *p = buf;
2195
2196 /* fctx->regs is passed in r3 and then saved in -16(31). */
2197 p += GEN_LWZ (p, 3, 31, -16);
2198 p += GEN_LI (p, 4, reg); /* li r4, reg */
2199 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2200
2201 emit_insns (buf, p - buf);
2202 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2203
2204 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2205 {
2206 EMIT_ASM ("mr 5, 4\n"
2207 "mr 4, 3\n"
2208 "mr 3, 5\n");
2209 }
2210 }
2211
2212 /* TOP = stack[--sp] */
2213
2214 static void
2215 ppc_emit_pop (void)
2216 {
2217 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2218 "lwz " TOP_SECOND ", 4(30) \n");
2219 }
2220
2221 /* stack[sp++] = TOP
2222
2223 Because we may use up bytecode stack, expand 8 doublewords more
2224 if needed. */
2225
2226 static void
2227 ppc_emit_stack_flush (void)
2228 {
2229 /* Make sure bytecode stack is big enough before push.
2230 Otherwise, expand 64-byte more. */
2231
2232 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2233 " stw " TOP_SECOND ", 4(30)\n"
2234 " addi 5, 30, -(8 + 8) \n"
2235 " cmpw 7, 5, 1 \n"
2236 " bgt 7, 1f \n"
2237 " stwu 31, -64(1) \n"
2238 "1:addi 30, 30, -8 \n");
2239 }
2240
2241 /* Swap TOP and stack[sp-1] */
2242
2243 static void
2244 ppc_emit_swap (void)
2245 {
2246 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2247 "lwz " TMP_SECOND ", 12(30) \n"
2248 "stw " TOP_FIRST ", 8(30) \n"
2249 "stw " TOP_SECOND ", 12(30) \n"
2250 "mr 3, 5 \n"
2251 "mr 4, 6 \n");
2252 }
2253
2254 /* Discard N elements in the stack. Also used for ppc64. */
2255
2256 static void
2257 ppc_emit_stack_adjust (int n)
2258 {
2259 uint32_t buf[6];
2260 uint32_t *p = buf;
2261
2262 n = n << 3;
2263 if ((n >> 15) != 0)
2264 {
2265 emit_error = 1;
2266 return;
2267 }
2268
2269 p += GEN_ADDI (p, 30, 30, n);
2270
2271 emit_insns (buf, p - buf);
2272 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2273 }
2274
2275 /* Call function FN. */
2276
2277 static void
2278 ppc_emit_call (CORE_ADDR fn)
2279 {
2280 uint32_t buf[11];
2281 uint32_t *p = buf;
2282
2283 p += gen_call (p, fn, 0, 0);
2284
2285 emit_insns (buf, p - buf);
2286 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2287 }
2288
2289 /* FN's prototype is `LONGEST(*fn)(int)'.
2290 TOP = fn (arg1)
2291 */
2292
2293 static void
2294 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2295 {
2296 uint32_t buf[15];
2297 uint32_t *p = buf;
2298
2299 /* Setup argument. arg1 is a 16-bit value. */
2300 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2301 p += gen_call (p, fn, 0, 0);
2302
2303 emit_insns (buf, p - buf);
2304 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2305
2306 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2307 {
2308 EMIT_ASM ("mr 5, 4\n"
2309 "mr 4, 3\n"
2310 "mr 3, 5\n");
2311 }
2312 }
2313
2314 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2315 fn (arg1, TOP)
2316
2317 TOP should be preserved/restored before/after the call. */
2318
2319 static void
2320 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2321 {
2322 uint32_t buf[21];
2323 uint32_t *p = buf;
2324
2325 /* Save TOP. 0(30) is next-empty. */
2326 p += GEN_STW (p, 3, 30, 0);
2327 p += GEN_STW (p, 4, 30, 4);
2328
2329 /* Setup argument. arg1 is a 16-bit value. */
2330 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2331 {
2332 p += GEN_MR (p, 5, 4);
2333 p += GEN_MR (p, 6, 3);
2334 }
2335 else
2336 {
2337 p += GEN_MR (p, 5, 3);
2338 p += GEN_MR (p, 6, 4);
2339 }
2340 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2341 p += gen_call (p, fn, 0, 0);
2342
2343 /* Restore TOP */
2344 p += GEN_LWZ (p, 3, 30, 0);
2345 p += GEN_LWZ (p, 4, 30, 4);
2346
2347 emit_insns (buf, p - buf);
2348 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2349 }
2350
2351 /* Note in the following goto ops:
2352
2353 When emitting goto, the target address is later relocated by
2354 write_goto_address. OFFSET_P is the offset of the branch instruction
2355 in the code sequence, and SIZE_P is how to relocate the instruction,
2356 recognized by ppc_write_goto_address. In current implementation,
2357 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2358 */
2359
2360 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2361
2362 static void
2363 ppc_emit_if_goto (int *offset_p, int *size_p)
2364 {
2365 EMIT_ASM ("or. 3, 3, 4 \n"
2366 "lwzu " TOP_FIRST ", 8(30) \n"
2367 "lwz " TOP_SECOND ", 4(30) \n"
2368 "1:bne 0, 1b \n");
2369
2370 if (offset_p)
2371 *offset_p = 12;
2372 if (size_p)
2373 *size_p = 14;
2374 }
2375
2376 /* Unconditional goto. Also used for ppc64. */
2377
2378 static void
2379 ppc_emit_goto (int *offset_p, int *size_p)
2380 {
2381 EMIT_ASM ("1:b 1b");
2382
2383 if (offset_p)
2384 *offset_p = 0;
2385 if (size_p)
2386 *size_p = 24;
2387 }
2388
2389 /* Goto if stack[--sp] == TOP */
2390
2391 static void
2392 ppc_emit_eq_goto (int *offset_p, int *size_p)
2393 {
2394 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2395 "lwz " TMP_SECOND ", 4(30) \n"
2396 "xor 4, 6, 4 \n"
2397 "xor 3, 5, 3 \n"
2398 "or. 3, 3, 4 \n"
2399 "lwzu " TOP_FIRST ", 8(30) \n"
2400 "lwz " TOP_SECOND ", 4(30) \n"
2401 "1:beq 0, 1b \n");
2402
2403 if (offset_p)
2404 *offset_p = 28;
2405 if (size_p)
2406 *size_p = 14;
2407 }
2408
2409 /* Goto if stack[--sp] != TOP */
2410
2411 static void
2412 ppc_emit_ne_goto (int *offset_p, int *size_p)
2413 {
2414 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2415 "lwz " TMP_SECOND ", 4(30) \n"
2416 "xor 4, 6, 4 \n"
2417 "xor 3, 5, 3 \n"
2418 "or. 3, 3, 4 \n"
2419 "lwzu " TOP_FIRST ", 8(30) \n"
2420 "lwz " TOP_SECOND ", 4(30) \n"
2421 "1:bne 0, 1b \n");
2422
2423 if (offset_p)
2424 *offset_p = 28;
2425 if (size_p)
2426 *size_p = 14;
2427 }
2428
2429 /* Goto if stack[--sp] < TOP */
2430
2431 static void
2432 ppc_emit_lt_goto (int *offset_p, int *size_p)
2433 {
2434 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2435 "lwz " TMP_SECOND ", 4(30) \n"
2436 "cmplw 6, 6, 4 \n"
2437 "cmpw 7, 5, 3 \n"
2438 /* CR6 bit 0 = low less and high equal */
2439 "crand 6*4+0, 6*4+0, 7*4+2\n"
2440 /* CR7 bit 0 = (low less and high equal) or high less */
2441 "cror 7*4+0, 7*4+0, 6*4+0\n"
2442 "lwzu " TOP_FIRST ", 8(30) \n"
2443 "lwz " TOP_SECOND ", 4(30)\n"
2444 "1:blt 7, 1b \n");
2445
2446 if (offset_p)
2447 *offset_p = 32;
2448 if (size_p)
2449 *size_p = 14;
2450 }
2451
2452 /* Goto if stack[--sp] <= TOP */
2453
2454 static void
2455 ppc_emit_le_goto (int *offset_p, int *size_p)
2456 {
2457 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2458 "lwz " TMP_SECOND ", 4(30) \n"
2459 "cmplw 6, 6, 4 \n"
2460 "cmpw 7, 5, 3 \n"
2461 /* CR6 bit 0 = low less/equal and high equal */
2462 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2463 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2464 "cror 7*4+0, 7*4+0, 6*4+0\n"
2465 "lwzu " TOP_FIRST ", 8(30) \n"
2466 "lwz " TOP_SECOND ", 4(30)\n"
2467 "1:blt 7, 1b \n");
2468
2469 if (offset_p)
2470 *offset_p = 32;
2471 if (size_p)
2472 *size_p = 14;
2473 }
2474
2475 /* Goto if stack[--sp] > TOP */
2476
2477 static void
2478 ppc_emit_gt_goto (int *offset_p, int *size_p)
2479 {
2480 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2481 "lwz " TMP_SECOND ", 4(30) \n"
2482 "cmplw 6, 6, 4 \n"
2483 "cmpw 7, 5, 3 \n"
2484 /* CR6 bit 0 = low greater and high equal */
2485 "crand 6*4+0, 6*4+1, 7*4+2\n"
2486 /* CR7 bit 0 = (low greater and high equal) or high greater */
2487 "cror 7*4+0, 7*4+1, 6*4+0\n"
2488 "lwzu " TOP_FIRST ", 8(30) \n"
2489 "lwz " TOP_SECOND ", 4(30)\n"
2490 "1:blt 7, 1b \n");
2491
2492 if (offset_p)
2493 *offset_p = 32;
2494 if (size_p)
2495 *size_p = 14;
2496 }
2497
2498 /* Goto if stack[--sp] >= TOP */
2499
2500 static void
2501 ppc_emit_ge_goto (int *offset_p, int *size_p)
2502 {
2503 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2504 "lwz " TMP_SECOND ", 4(30) \n"
2505 "cmplw 6, 6, 4 \n"
2506 "cmpw 7, 5, 3 \n"
2507 /* CR6 bit 0 = low ge and high equal */
2508 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2509 /* CR7 bit 0 = (low ge and high equal) or high greater */
2510 "cror 7*4+0, 7*4+1, 6*4+0\n"
2511 "lwzu " TOP_FIRST ", 8(30)\n"
2512 "lwz " TOP_SECOND ", 4(30)\n"
2513 "1:blt 7, 1b \n");
2514
2515 if (offset_p)
2516 *offset_p = 32;
2517 if (size_p)
2518 *size_p = 14;
2519 }
2520
2521 /* Relocate previous emitted branch instruction. FROM is the address
2522 of the branch instruction, TO is the goto target address, and SIZE
2523 if the value we set by *SIZE_P before. Currently, it is either
2524 24 or 14 of branch and conditional-branch instruction.
2525 Also used for ppc64. */
2526
2527 static void
2528 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2529 {
2530 long rel = to - from;
2531 uint32_t insn;
2532 int opcd;
2533
2534 read_inferior_memory (from, (unsigned char *) &insn, 4);
2535 opcd = (insn >> 26) & 0x3f;
2536
2537 switch (size)
2538 {
2539 case 14:
2540 if (opcd != 16
2541 || (rel >= (1 << 15) || rel < -(1 << 15)))
2542 emit_error = 1;
2543 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2544 break;
2545 case 24:
2546 if (opcd != 18
2547 || (rel >= (1 << 25) || rel < -(1 << 25)))
2548 emit_error = 1;
2549 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2550 break;
2551 default:
2552 emit_error = 1;
2553 }
2554
2555 if (!emit_error)
2556 target_write_memory (from, (unsigned char *) &insn, 4);
2557 }
2558
2559 /* Table of emit ops for 32-bit. */
2560
2561 static struct emit_ops ppc_emit_ops_impl =
2562 {
2563 ppc_emit_prologue,
2564 ppc_emit_epilogue,
2565 ppc_emit_add,
2566 ppc_emit_sub,
2567 ppc_emit_mul,
2568 ppc_emit_lsh,
2569 ppc_emit_rsh_signed,
2570 ppc_emit_rsh_unsigned,
2571 ppc_emit_ext,
2572 ppc_emit_log_not,
2573 ppc_emit_bit_and,
2574 ppc_emit_bit_or,
2575 ppc_emit_bit_xor,
2576 ppc_emit_bit_not,
2577 ppc_emit_equal,
2578 ppc_emit_less_signed,
2579 ppc_emit_less_unsigned,
2580 ppc_emit_ref,
2581 ppc_emit_if_goto,
2582 ppc_emit_goto,
2583 ppc_write_goto_address,
2584 ppc_emit_const,
2585 ppc_emit_call,
2586 ppc_emit_reg,
2587 ppc_emit_pop,
2588 ppc_emit_stack_flush,
2589 ppc_emit_zero_ext,
2590 ppc_emit_swap,
2591 ppc_emit_stack_adjust,
2592 ppc_emit_int_call_1,
2593 ppc_emit_void_call_2,
2594 ppc_emit_eq_goto,
2595 ppc_emit_ne_goto,
2596 ppc_emit_lt_goto,
2597 ppc_emit_le_goto,
2598 ppc_emit_gt_goto,
2599 ppc_emit_ge_goto
2600 };
2601
2602 #ifdef __powerpc64__
2603
2604 /*
2605
2606 Bytecode execution stack frame - 64-bit
2607
2608 | LR save area (SP + 16)
2609 | CR save area (SP + 8)
2610 SP' -> +- Back chain (SP + 0)
2611 | Save r31 for access saved arguments
2612 | Save r30 for bytecode stack pointer
2613 | Save r4 for incoming argument *value
2614 | Save r3 for incoming argument regs
2615 r30 -> +- Bytecode execution stack
2616 |
2617 | 64-byte (8 doublewords) at initial.
2618 | Expand stack as needed.
2619 |
2620 +-
2621 | Some padding for minimum stack frame.
2622 | 112 for ELFv1.
2623 SP +- Back-chain (SP')
2624
2625 initial frame size
2626 = 112 + (4 * 8) + 64
2627 = 208
2628
2629 r30 is the stack-pointer for bytecode machine.
2630 It should point to next-empty, so we can use LDU for pop.
2631 r3 is used for cache of TOP value.
2632 It was the first argument, pointer to regs.
2633 r4 is the second argument, pointer to the result.
2634 We should set *result = TOP after leaving this function.
2635
2636 Note:
2637 * To restore stack at epilogue
2638 => sp = r31
2639 * To check stack is big enough for bytecode execution.
2640 => r30 - 8 > SP + 112
2641 * To return execution result.
2642 => 0(r4) = TOP
2643
2644 */
2645
2646 /* Emit prologue in inferior memory. See above comments. */
2647
2648 static void
2649 ppc64v1_emit_prologue (void)
2650 {
2651 /* On ELFv1, function pointers really point to function descriptor,
2652 so emit one here. We don't care about contents of words 1 and 2,
2653 so let them just overlap out code. */
2654 uint64_t opd = current_insn_ptr + 8;
2655 uint32_t buf[2];
2656
2657 /* Mind the strict aliasing rules. */
2658 memcpy (buf, &opd, sizeof buf);
2659 emit_insns(buf, 2);
2660 EMIT_ASM (/* Save return address. */
2661 "mflr 0 \n"
2662 "std 0, 16(1) \n"
2663 /* Save r30 and incoming arguments. */
2664 "std 31, -8(1) \n"
2665 "std 30, -16(1) \n"
2666 "std 4, -24(1) \n"
2667 "std 3, -32(1) \n"
2668 /* Point r31 to current r1 for access arguments. */
2669 "mr 31, 1 \n"
2670 /* Adjust SP. 208 is the initial frame size. */
2671 "stdu 1, -208(1) \n"
2672 /* Set r30 to pointing stack-top. */
2673 "addi 30, 1, 168 \n"
2674 /* Initial r3/TOP to 0. */
2675 "li 3, 0 \n");
2676 }
2677
2678 /* Emit prologue in inferior memory. See above comments. */
2679
2680 static void
2681 ppc64v2_emit_prologue (void)
2682 {
2683 EMIT_ASM (/* Save return address. */
2684 "mflr 0 \n"
2685 "std 0, 16(1) \n"
2686 /* Save r30 and incoming arguments. */
2687 "std 31, -8(1) \n"
2688 "std 30, -16(1) \n"
2689 "std 4, -24(1) \n"
2690 "std 3, -32(1) \n"
2691 /* Point r31 to current r1 for access arguments. */
2692 "mr 31, 1 \n"
2693 /* Adjust SP. 208 is the initial frame size. */
2694 "stdu 1, -208(1) \n"
2695 /* Set r30 to pointing stack-top. */
2696 "addi 30, 1, 168 \n"
2697 /* Initial r3/TOP to 0. */
2698 "li 3, 0 \n");
2699 }
2700
2701 /* Emit epilogue in inferior memory. See above comments. */
2702
2703 static void
2704 ppc64_emit_epilogue (void)
2705 {
2706 EMIT_ASM (/* Restore SP. */
2707 "ld 1, 0(1) \n"
2708 /* *result = TOP */
2709 "ld 4, -24(1) \n"
2710 "std 3, 0(4) \n"
2711 /* Restore registers. */
2712 "ld 31, -8(1) \n"
2713 "ld 30, -16(1) \n"
2714 /* Restore LR. */
2715 "ld 0, 16(1) \n"
2716 /* Return 0 for no-error. */
2717 "li 3, 0 \n"
2718 "mtlr 0 \n"
2719 "blr \n");
2720 }
2721
2722 /* TOP = stack[--sp] + TOP */
2723
2724 static void
2725 ppc64_emit_add (void)
2726 {
2727 EMIT_ASM ("ldu 4, 8(30) \n"
2728 "add 3, 4, 3 \n");
2729 }
2730
2731 /* TOP = stack[--sp] - TOP */
2732
2733 static void
2734 ppc64_emit_sub (void)
2735 {
2736 EMIT_ASM ("ldu 4, 8(30) \n"
2737 "sub 3, 4, 3 \n");
2738 }
2739
2740 /* TOP = stack[--sp] * TOP */
2741
2742 static void
2743 ppc64_emit_mul (void)
2744 {
2745 EMIT_ASM ("ldu 4, 8(30) \n"
2746 "mulld 3, 4, 3 \n");
2747 }
2748
2749 /* TOP = stack[--sp] << TOP */
2750
2751 static void
2752 ppc64_emit_lsh (void)
2753 {
2754 EMIT_ASM ("ldu 4, 8(30) \n"
2755 "sld 3, 4, 3 \n");
2756 }
2757
2758 /* Top = stack[--sp] >> TOP
2759 (Arithmetic shift right) */
2760
2761 static void
2762 ppc64_emit_rsh_signed (void)
2763 {
2764 EMIT_ASM ("ldu 4, 8(30) \n"
2765 "srad 3, 4, 3 \n");
2766 }
2767
2768 /* Top = stack[--sp] >> TOP
2769 (Logical shift right) */
2770
2771 static void
2772 ppc64_emit_rsh_unsigned (void)
2773 {
2774 EMIT_ASM ("ldu 4, 8(30) \n"
2775 "srd 3, 4, 3 \n");
2776 }
2777
2778 /* Emit code for signed-extension specified by ARG. */
2779
2780 static void
2781 ppc64_emit_ext (int arg)
2782 {
2783 switch (arg)
2784 {
2785 case 8:
2786 EMIT_ASM ("extsb 3, 3");
2787 break;
2788 case 16:
2789 EMIT_ASM ("extsh 3, 3");
2790 break;
2791 case 32:
2792 EMIT_ASM ("extsw 3, 3");
2793 break;
2794 default:
2795 emit_error = 1;
2796 }
2797 }
2798
2799 /* Emit code for zero-extension specified by ARG. */
2800
2801 static void
2802 ppc64_emit_zero_ext (int arg)
2803 {
2804 switch (arg)
2805 {
2806 case 8:
2807 EMIT_ASM ("rldicl 3,3,0,56");
2808 break;
2809 case 16:
2810 EMIT_ASM ("rldicl 3,3,0,48");
2811 break;
2812 case 32:
2813 EMIT_ASM ("rldicl 3,3,0,32");
2814 break;
2815 default:
2816 emit_error = 1;
2817 }
2818 }
2819
2820 /* TOP = !TOP
2821 i.e., TOP = (TOP == 0) ? 1 : 0; */
2822
2823 static void
2824 ppc64_emit_log_not (void)
2825 {
2826 EMIT_ASM ("cntlzd 3, 3 \n"
2827 "srdi 3, 3, 6 \n");
2828 }
2829
2830 /* TOP = stack[--sp] & TOP */
2831
2832 static void
2833 ppc64_emit_bit_and (void)
2834 {
2835 EMIT_ASM ("ldu 4, 8(30) \n"
2836 "and 3, 4, 3 \n");
2837 }
2838
2839 /* TOP = stack[--sp] | TOP */
2840
2841 static void
2842 ppc64_emit_bit_or (void)
2843 {
2844 EMIT_ASM ("ldu 4, 8(30) \n"
2845 "or 3, 4, 3 \n");
2846 }
2847
2848 /* TOP = stack[--sp] ^ TOP */
2849
2850 static void
2851 ppc64_emit_bit_xor (void)
2852 {
2853 EMIT_ASM ("ldu 4, 8(30) \n"
2854 "xor 3, 4, 3 \n");
2855 }
2856
2857 /* TOP = ~TOP
2858 i.e., TOP = ~(TOP | TOP) */
2859
2860 static void
2861 ppc64_emit_bit_not (void)
2862 {
2863 EMIT_ASM ("nor 3, 3, 3 \n");
2864 }
2865
2866 /* TOP = stack[--sp] == TOP */
2867
2868 static void
2869 ppc64_emit_equal (void)
2870 {
2871 EMIT_ASM ("ldu 4, 8(30) \n"
2872 "xor 3, 3, 4 \n"
2873 "cntlzd 3, 3 \n"
2874 "srdi 3, 3, 6 \n");
2875 }
2876
2877 /* TOP = stack[--sp] < TOP
2878 (Signed comparison) */
2879
2880 static void
2881 ppc64_emit_less_signed (void)
2882 {
2883 EMIT_ASM ("ldu 4, 8(30) \n"
2884 "cmpd 7, 4, 3 \n"
2885 "mfcr 3 \n"
2886 "rlwinm 3, 3, 29, 31, 31 \n");
2887 }
2888
2889 /* TOP = stack[--sp] < TOP
2890 (Unsigned comparison) */
2891
2892 static void
2893 ppc64_emit_less_unsigned (void)
2894 {
2895 EMIT_ASM ("ldu 4, 8(30) \n"
2896 "cmpld 7, 4, 3 \n"
2897 "mfcr 3 \n"
2898 "rlwinm 3, 3, 29, 31, 31 \n");
2899 }
2900
2901 /* Access the memory address in TOP in size of SIZE.
2902 Zero-extend the read value. */
2903
2904 static void
2905 ppc64_emit_ref (int size)
2906 {
2907 switch (size)
2908 {
2909 case 1:
2910 EMIT_ASM ("lbz 3, 0(3)");
2911 break;
2912 case 2:
2913 EMIT_ASM ("lhz 3, 0(3)");
2914 break;
2915 case 4:
2916 EMIT_ASM ("lwz 3, 0(3)");
2917 break;
2918 case 8:
2919 EMIT_ASM ("ld 3, 0(3)");
2920 break;
2921 }
2922 }
2923
2924 /* TOP = NUM */
2925
2926 static void
2927 ppc64_emit_const (LONGEST num)
2928 {
2929 uint32_t buf[5];
2930 uint32_t *p = buf;
2931
2932 p += gen_limm (p, 3, num, 1);
2933
2934 emit_insns (buf, p - buf);
2935 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2936 }
2937
2938 /* Set TOP to the value of register REG by calling get_raw_reg function
2939 with two argument, collected buffer and register number. */
2940
2941 static void
2942 ppc64v1_emit_reg (int reg)
2943 {
2944 uint32_t buf[15];
2945 uint32_t *p = buf;
2946
2947 /* fctx->regs is passed in r3 and then saved in 176(1). */
2948 p += GEN_LD (p, 3, 31, -32);
2949 p += GEN_LI (p, 4, reg);
2950 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2951 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2952 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2953
2954 emit_insns (buf, p - buf);
2955 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2956 }
2957
2958 /* Likewise, for ELFv2. */
2959
2960 static void
2961 ppc64v2_emit_reg (int reg)
2962 {
2963 uint32_t buf[12];
2964 uint32_t *p = buf;
2965
2966 /* fctx->regs is passed in r3 and then saved in 176(1). */
2967 p += GEN_LD (p, 3, 31, -32);
2968 p += GEN_LI (p, 4, reg);
2969 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2970 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2971 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2972
2973 emit_insns (buf, p - buf);
2974 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2975 }
2976
2977 /* TOP = stack[--sp] */
2978
2979 static void
2980 ppc64_emit_pop (void)
2981 {
2982 EMIT_ASM ("ldu 3, 8(30)");
2983 }
2984
2985 /* stack[sp++] = TOP
2986
2987 Because we may use up bytecode stack, expand 8 doublewords more
2988 if needed. */
2989
2990 static void
2991 ppc64_emit_stack_flush (void)
2992 {
2993 /* Make sure bytecode stack is big enough before push.
2994 Otherwise, expand 64-byte more. */
2995
2996 EMIT_ASM (" std 3, 0(30) \n"
2997 " addi 4, 30, -(112 + 8) \n"
2998 " cmpd 7, 4, 1 \n"
2999 " bgt 7, 1f \n"
3000 " stdu 31, -64(1) \n"
3001 "1:addi 30, 30, -8 \n");
3002 }
3003
3004 /* Swap TOP and stack[sp-1] */
3005
3006 static void
3007 ppc64_emit_swap (void)
3008 {
3009 EMIT_ASM ("ld 4, 8(30) \n"
3010 "std 3, 8(30) \n"
3011 "mr 3, 4 \n");
3012 }
3013
3014 /* Call function FN - ELFv1. */
3015
3016 static void
3017 ppc64v1_emit_call (CORE_ADDR fn)
3018 {
3019 uint32_t buf[13];
3020 uint32_t *p = buf;
3021
3022 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3023 p += gen_call (p, fn, 1, 1);
3024 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3025
3026 emit_insns (buf, p - buf);
3027 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3028 }
3029
3030 /* Call function FN - ELFv2. */
3031
3032 static void
3033 ppc64v2_emit_call (CORE_ADDR fn)
3034 {
3035 uint32_t buf[10];
3036 uint32_t *p = buf;
3037
3038 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3039 p += gen_call (p, fn, 1, 0);
3040 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3041
3042 emit_insns (buf, p - buf);
3043 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3044 }
3045
3046 /* FN's prototype is `LONGEST(*fn)(int)'.
3047 TOP = fn (arg1)
3048 */
3049
3050 static void
3051 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3052 {
3053 uint32_t buf[13];
3054 uint32_t *p = buf;
3055
3056 /* Setup argument. arg1 is a 16-bit value. */
3057 p += gen_limm (p, 3, arg1, 1);
3058 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3059 p += gen_call (p, fn, 1, 1);
3060 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3061
3062 emit_insns (buf, p - buf);
3063 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3064 }
3065
3066 /* Likewise for ELFv2. */
3067
3068 static void
3069 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3070 {
3071 uint32_t buf[10];
3072 uint32_t *p = buf;
3073
3074 /* Setup argument. arg1 is a 16-bit value. */
3075 p += gen_limm (p, 3, arg1, 1);
3076 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3077 p += gen_call (p, fn, 1, 0);
3078 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3079
3080 emit_insns (buf, p - buf);
3081 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3082 }
3083
3084 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3085 fn (arg1, TOP)
3086
3087 TOP should be preserved/restored before/after the call. */
3088
3089 static void
3090 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3091 {
3092 uint32_t buf[17];
3093 uint32_t *p = buf;
3094
3095 /* Save TOP. 0(30) is next-empty. */
3096 p += GEN_STD (p, 3, 30, 0);
3097
3098 /* Setup argument. arg1 is a 16-bit value. */
3099 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3100 p += gen_limm (p, 3, arg1, 1);
3101 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3102 p += gen_call (p, fn, 1, 1);
3103 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3104
3105 /* Restore TOP */
3106 p += GEN_LD (p, 3, 30, 0);
3107
3108 emit_insns (buf, p - buf);
3109 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3110 }
3111
3112 /* Likewise for ELFv2. */
3113
3114 static void
3115 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3116 {
3117 uint32_t buf[14];
3118 uint32_t *p = buf;
3119
3120 /* Save TOP. 0(30) is next-empty. */
3121 p += GEN_STD (p, 3, 30, 0);
3122
3123 /* Setup argument. arg1 is a 16-bit value. */
3124 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3125 p += gen_limm (p, 3, arg1, 1);
3126 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3127 p += gen_call (p, fn, 1, 0);
3128 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3129
3130 /* Restore TOP */
3131 p += GEN_LD (p, 3, 30, 0);
3132
3133 emit_insns (buf, p - buf);
3134 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3135 }
3136
3137 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3138
3139 static void
3140 ppc64_emit_if_goto (int *offset_p, int *size_p)
3141 {
3142 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3143 "ldu 3, 8(30) \n"
3144 "1:bne 7, 1b \n");
3145
3146 if (offset_p)
3147 *offset_p = 8;
3148 if (size_p)
3149 *size_p = 14;
3150 }
3151
3152 /* Goto if stack[--sp] == TOP */
3153
3154 static void
3155 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3156 {
3157 EMIT_ASM ("ldu 4, 8(30) \n"
3158 "cmpd 7, 4, 3 \n"
3159 "ldu 3, 8(30) \n"
3160 "1:beq 7, 1b \n");
3161
3162 if (offset_p)
3163 *offset_p = 12;
3164 if (size_p)
3165 *size_p = 14;
3166 }
3167
3168 /* Goto if stack[--sp] != TOP */
3169
3170 static void
3171 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3172 {
3173 EMIT_ASM ("ldu 4, 8(30) \n"
3174 "cmpd 7, 4, 3 \n"
3175 "ldu 3, 8(30) \n"
3176 "1:bne 7, 1b \n");
3177
3178 if (offset_p)
3179 *offset_p = 12;
3180 if (size_p)
3181 *size_p = 14;
3182 }
3183
3184 /* Goto if stack[--sp] < TOP */
3185
3186 static void
3187 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3188 {
3189 EMIT_ASM ("ldu 4, 8(30) \n"
3190 "cmpd 7, 4, 3 \n"
3191 "ldu 3, 8(30) \n"
3192 "1:blt 7, 1b \n");
3193
3194 if (offset_p)
3195 *offset_p = 12;
3196 if (size_p)
3197 *size_p = 14;
3198 }
3199
3200 /* Goto if stack[--sp] <= TOP */
3201
3202 static void
3203 ppc64_emit_le_goto (int *offset_p, int *size_p)
3204 {
3205 EMIT_ASM ("ldu 4, 8(30) \n"
3206 "cmpd 7, 4, 3 \n"
3207 "ldu 3, 8(30) \n"
3208 "1:ble 7, 1b \n");
3209
3210 if (offset_p)
3211 *offset_p = 12;
3212 if (size_p)
3213 *size_p = 14;
3214 }
3215
3216 /* Goto if stack[--sp] > TOP */
3217
3218 static void
3219 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3220 {
3221 EMIT_ASM ("ldu 4, 8(30) \n"
3222 "cmpd 7, 4, 3 \n"
3223 "ldu 3, 8(30) \n"
3224 "1:bgt 7, 1b \n");
3225
3226 if (offset_p)
3227 *offset_p = 12;
3228 if (size_p)
3229 *size_p = 14;
3230 }
3231
3232 /* Goto if stack[--sp] >= TOP */
3233
3234 static void
3235 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3236 {
3237 EMIT_ASM ("ldu 4, 8(30) \n"
3238 "cmpd 7, 4, 3 \n"
3239 "ldu 3, 8(30) \n"
3240 "1:bge 7, 1b \n");
3241
3242 if (offset_p)
3243 *offset_p = 12;
3244 if (size_p)
3245 *size_p = 14;
3246 }
3247
3248 /* Table of emit ops for 64-bit ELFv1. */
3249
3250 static struct emit_ops ppc64v1_emit_ops_impl =
3251 {
3252 ppc64v1_emit_prologue,
3253 ppc64_emit_epilogue,
3254 ppc64_emit_add,
3255 ppc64_emit_sub,
3256 ppc64_emit_mul,
3257 ppc64_emit_lsh,
3258 ppc64_emit_rsh_signed,
3259 ppc64_emit_rsh_unsigned,
3260 ppc64_emit_ext,
3261 ppc64_emit_log_not,
3262 ppc64_emit_bit_and,
3263 ppc64_emit_bit_or,
3264 ppc64_emit_bit_xor,
3265 ppc64_emit_bit_not,
3266 ppc64_emit_equal,
3267 ppc64_emit_less_signed,
3268 ppc64_emit_less_unsigned,
3269 ppc64_emit_ref,
3270 ppc64_emit_if_goto,
3271 ppc_emit_goto,
3272 ppc_write_goto_address,
3273 ppc64_emit_const,
3274 ppc64v1_emit_call,
3275 ppc64v1_emit_reg,
3276 ppc64_emit_pop,
3277 ppc64_emit_stack_flush,
3278 ppc64_emit_zero_ext,
3279 ppc64_emit_swap,
3280 ppc_emit_stack_adjust,
3281 ppc64v1_emit_int_call_1,
3282 ppc64v1_emit_void_call_2,
3283 ppc64_emit_eq_goto,
3284 ppc64_emit_ne_goto,
3285 ppc64_emit_lt_goto,
3286 ppc64_emit_le_goto,
3287 ppc64_emit_gt_goto,
3288 ppc64_emit_ge_goto
3289 };
3290
3291 /* Table of emit ops for 64-bit ELFv2. */
3292
3293 static struct emit_ops ppc64v2_emit_ops_impl =
3294 {
3295 ppc64v2_emit_prologue,
3296 ppc64_emit_epilogue,
3297 ppc64_emit_add,
3298 ppc64_emit_sub,
3299 ppc64_emit_mul,
3300 ppc64_emit_lsh,
3301 ppc64_emit_rsh_signed,
3302 ppc64_emit_rsh_unsigned,
3303 ppc64_emit_ext,
3304 ppc64_emit_log_not,
3305 ppc64_emit_bit_and,
3306 ppc64_emit_bit_or,
3307 ppc64_emit_bit_xor,
3308 ppc64_emit_bit_not,
3309 ppc64_emit_equal,
3310 ppc64_emit_less_signed,
3311 ppc64_emit_less_unsigned,
3312 ppc64_emit_ref,
3313 ppc64_emit_if_goto,
3314 ppc_emit_goto,
3315 ppc_write_goto_address,
3316 ppc64_emit_const,
3317 ppc64v2_emit_call,
3318 ppc64v2_emit_reg,
3319 ppc64_emit_pop,
3320 ppc64_emit_stack_flush,
3321 ppc64_emit_zero_ext,
3322 ppc64_emit_swap,
3323 ppc_emit_stack_adjust,
3324 ppc64v2_emit_int_call_1,
3325 ppc64v2_emit_void_call_2,
3326 ppc64_emit_eq_goto,
3327 ppc64_emit_ne_goto,
3328 ppc64_emit_lt_goto,
3329 ppc64_emit_le_goto,
3330 ppc64_emit_gt_goto,
3331 ppc64_emit_ge_goto
3332 };
3333
3334 #endif
3335
3336 /* Implementation of linux_target_ops method "emit_ops". */
3337
3338 static struct emit_ops *
3339 ppc_emit_ops (void)
3340 {
3341 #ifdef __powerpc64__
3342 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3343
3344 if (register_size (regcache->tdesc, 0) == 8)
3345 {
3346 if (is_elfv2_inferior ())
3347 return &ppc64v2_emit_ops_impl;
3348 else
3349 return &ppc64v1_emit_ops_impl;
3350 }
3351 #endif
3352 return &ppc_emit_ops_impl;
3353 }
3354
3355 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3356
3357 static int
3358 ppc_get_ipa_tdesc_idx (void)
3359 {
3360 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3361 const struct target_desc *tdesc = regcache->tdesc;
3362
3363 #ifdef __powerpc64__
3364 if (tdesc == tdesc_powerpc_64l)
3365 return PPC_TDESC_BASE;
3366 if (tdesc == tdesc_powerpc_altivec64l)
3367 return PPC_TDESC_ALTIVEC;
3368 if (tdesc == tdesc_powerpc_vsx64l)
3369 return PPC_TDESC_VSX;
3370 if (tdesc == tdesc_powerpc_isa205_64l)
3371 return PPC_TDESC_ISA205;
3372 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3373 return PPC_TDESC_ISA205_ALTIVEC;
3374 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3375 return PPC_TDESC_ISA205_VSX;
3376 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3377 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3378 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3379 return PPC_TDESC_ISA207_VSX;
3380 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3381 return PPC_TDESC_ISA207_HTM_VSX;
3382 #endif
3383
3384 if (tdesc == tdesc_powerpc_32l)
3385 return PPC_TDESC_BASE;
3386 if (tdesc == tdesc_powerpc_altivec32l)
3387 return PPC_TDESC_ALTIVEC;
3388 if (tdesc == tdesc_powerpc_vsx32l)
3389 return PPC_TDESC_VSX;
3390 if (tdesc == tdesc_powerpc_isa205_32l)
3391 return PPC_TDESC_ISA205;
3392 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3393 return PPC_TDESC_ISA205_ALTIVEC;
3394 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3395 return PPC_TDESC_ISA205_VSX;
3396 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3397 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3398 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3399 return PPC_TDESC_ISA207_VSX;
3400 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3401 return PPC_TDESC_ISA207_HTM_VSX;
3402 if (tdesc == tdesc_powerpc_e500l)
3403 return PPC_TDESC_E500;
3404
3405 return 0;
3406 }
3407
3408 struct linux_target_ops the_low_target = {
3409 0,
3410 ppc_breakpoint_at,
3411 ppc_supports_z_point_type,
3412 ppc_insert_point,
3413 ppc_remove_point,
3414 NULL,
3415 NULL,
3416 ppc_collect_ptrace_register,
3417 ppc_supply_ptrace_register,
3418 NULL, /* siginfo_fixup */
3419 NULL, /* new_process */
3420 NULL, /* delete_process */
3421 NULL, /* new_thread */
3422 NULL, /* delete_thread */
3423 NULL, /* new_fork */
3424 NULL, /* prepare_to_resume */
3425 NULL, /* process_qsupported */
3426 ppc_supports_tracepoints,
3427 ppc_get_thread_area,
3428 ppc_install_fast_tracepoint_jump_pad,
3429 ppc_emit_ops,
3430 ppc_get_min_fast_tracepoint_insn_len,
3431 NULL, /* supports_range_stepping */
3432 ppc_supports_hardware_single_step,
3433 NULL, /* get_syscall_trapinfo */
3434 ppc_get_ipa_tdesc_idx,
3435 };
3436
3437 /* The linux target ops object. */
3438
3439 linux_process_target *the_linux_target = &the_ppc_target;
3440
3441 void
3442 initialize_low_arch (void)
3443 {
3444 /* Initialize the Linux target descriptions. */
3445
3446 init_registers_powerpc_32l ();
3447 init_registers_powerpc_altivec32l ();
3448 init_registers_powerpc_vsx32l ();
3449 init_registers_powerpc_isa205_32l ();
3450 init_registers_powerpc_isa205_altivec32l ();
3451 init_registers_powerpc_isa205_vsx32l ();
3452 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3453 init_registers_powerpc_isa207_vsx32l ();
3454 init_registers_powerpc_isa207_htm_vsx32l ();
3455 init_registers_powerpc_e500l ();
3456 #if __powerpc64__
3457 init_registers_powerpc_64l ();
3458 init_registers_powerpc_altivec64l ();
3459 init_registers_powerpc_vsx64l ();
3460 init_registers_powerpc_isa205_64l ();
3461 init_registers_powerpc_isa205_altivec64l ();
3462 init_registers_powerpc_isa205_vsx64l ();
3463 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3464 init_registers_powerpc_isa207_vsx64l ();
3465 init_registers_powerpc_isa207_htm_vsx64l ();
3466 #endif
3467
3468 initialize_regsets_info (&ppc_regsets_info);
3469 }
This page took 0.193403 seconds and 5 git commands to generate.