gdbserver/linux-low: turn 'siginfo_fixup' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 bool supports_z_point_type (char z_type) override;
58
59
60 void low_collect_ptrace_register (regcache *regcache, int regno,
61 char *buf) override;
62
63 void low_supply_ptrace_register (regcache *regcache, int regno,
64 const char *buf) override;
65 protected:
66
67 void low_arch_setup () override;
68
69 bool low_cannot_fetch_register (int regno) override;
70
71 bool low_cannot_store_register (int regno) override;
72
73 bool low_supports_breakpoints () override;
74
75 CORE_ADDR low_get_pc (regcache *regcache) override;
76
77 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
78
79 bool low_breakpoint_at (CORE_ADDR pc) override;
80
81 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
82 int size, raw_breakpoint *bp) override;
83
84 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86 };
87
88 /* The singleton target ops object. */
89
90 static ppc_target the_ppc_target;
91
92 /* Holds the AT_HWCAP auxv entry. */
93
94 static unsigned long ppc_hwcap;
95
96 /* Holds the AT_HWCAP2 auxv entry. */
97
98 static unsigned long ppc_hwcap2;
99
100
101 #define ppc_num_regs 73
102
103 #ifdef __powerpc64__
104 /* We use a constant for FPSCR instead of PT_FPSCR, because
105 many shipped PPC64 kernels had the wrong value in ptrace.h. */
106 static int ppc_regmap[] =
107 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
108 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
109 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
110 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
111 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
112 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
113 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
114 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
115 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
116 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
117 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
118 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
119 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
120 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
121 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
122 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
123 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
124 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
125 PT_ORIG_R3 * 8, PT_TRAP * 8 };
126 #else
127 /* Currently, don't check/send MQ. */
128 static int ppc_regmap[] =
129 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
130 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
131 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
132 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
133 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
134 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
135 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
136 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
137 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
138 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
139 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
140 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
141 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
142 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
143 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
144 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
145 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
146 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
147 PT_ORIG_R3 * 4, PT_TRAP * 4
148 };
149
150 static int ppc_regmap_e500[] =
151 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
152 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
153 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
154 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
155 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
156 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
157 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
158 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
159 -1, -1, -1, -1,
160 -1, -1, -1, -1,
161 -1, -1, -1, -1,
162 -1, -1, -1, -1,
163 -1, -1, -1, -1,
164 -1, -1, -1, -1,
165 -1, -1, -1, -1,
166 -1, -1, -1, -1,
167 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
168 PT_CTR * 4, PT_XER * 4, -1,
169 PT_ORIG_R3 * 4, PT_TRAP * 4
170 };
171 #endif
172
173 /* Check whether the kernel provides a register set with number
174 REGSET_ID of size REGSETSIZE for process/thread TID. */
175
176 static int
177 ppc_check_regset (int tid, int regset_id, int regsetsize)
178 {
179 void *buf = alloca (regsetsize);
180 struct iovec iov;
181
182 iov.iov_base = buf;
183 iov.iov_len = regsetsize;
184
185 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
186 || errno == ENODATA)
187 return 1;
188 return 0;
189 }
190
191 bool
192 ppc_target::low_cannot_store_register (int regno)
193 {
194 const struct target_desc *tdesc = current_process ()->tdesc;
195
196 #ifndef __powerpc64__
197 /* Some kernels do not allow us to store fpscr. */
198 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
199 && regno == find_regno (tdesc, "fpscr"))
200 return true;
201 #endif
202
203 /* Some kernels do not allow us to store orig_r3 or trap. */
204 if (regno == find_regno (tdesc, "orig_r3")
205 || regno == find_regno (tdesc, "trap"))
206 return true;
207
208 return false;
209 }
210
211 bool
212 ppc_target::low_cannot_fetch_register (int regno)
213 {
214 return false;
215 }
216
217 void
218 ppc_target::low_collect_ptrace_register (regcache *regcache, int regno,
219 char *buf)
220 {
221 memset (buf, 0, sizeof (long));
222
223 if (__BYTE_ORDER == __LITTLE_ENDIAN)
224 {
225 /* Little-endian values always sit at the left end of the buffer. */
226 collect_register (regcache, regno, buf);
227 }
228 else if (__BYTE_ORDER == __BIG_ENDIAN)
229 {
230 /* Big-endian values sit at the right end of the buffer. In case of
231 registers whose sizes are smaller than sizeof (long), we must use a
232 padding to access them correctly. */
233 int size = register_size (regcache->tdesc, regno);
234
235 if (size < sizeof (long))
236 collect_register (regcache, regno, buf + sizeof (long) - size);
237 else
238 collect_register (regcache, regno, buf);
239 }
240 else
241 perror_with_name ("Unexpected byte order");
242 }
243
244 void
245 ppc_target::low_supply_ptrace_register (regcache *regcache, int regno,
246 const char *buf)
247 {
248 if (__BYTE_ORDER == __LITTLE_ENDIAN)
249 {
250 /* Little-endian values always sit at the left end of the buffer. */
251 supply_register (regcache, regno, buf);
252 }
253 else if (__BYTE_ORDER == __BIG_ENDIAN)
254 {
255 /* Big-endian values sit at the right end of the buffer. In case of
256 registers whose sizes are smaller than sizeof (long), we must use a
257 padding to access them correctly. */
258 int size = register_size (regcache->tdesc, regno);
259
260 if (size < sizeof (long))
261 supply_register (regcache, regno, buf + sizeof (long) - size);
262 else
263 supply_register (regcache, regno, buf);
264 }
265 else
266 perror_with_name ("Unexpected byte order");
267 }
268
269 bool
270 ppc_target::low_supports_breakpoints ()
271 {
272 return true;
273 }
274
275 CORE_ADDR
276 ppc_target::low_get_pc (regcache *regcache)
277 {
278 if (register_size (regcache->tdesc, 0) == 4)
279 {
280 unsigned int pc;
281 collect_register_by_name (regcache, "pc", &pc);
282 return (CORE_ADDR) pc;
283 }
284 else
285 {
286 unsigned long pc;
287 collect_register_by_name (regcache, "pc", &pc);
288 return (CORE_ADDR) pc;
289 }
290 }
291
292 void
293 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
294 {
295 if (register_size (regcache->tdesc, 0) == 4)
296 {
297 unsigned int newpc = pc;
298 supply_register_by_name (regcache, "pc", &newpc);
299 }
300 else
301 {
302 unsigned long newpc = pc;
303 supply_register_by_name (regcache, "pc", &newpc);
304 }
305 }
306
307 #ifndef __powerpc64__
308 static int ppc_regmap_adjusted;
309 #endif
310
311
312 /* Correct in either endianness.
313 This instruction is "twge r2, r2", which GDB uses as a software
314 breakpoint. */
315 static const unsigned int ppc_breakpoint = 0x7d821008;
316 #define ppc_breakpoint_len 4
317
318 /* Implementation of target ops method "sw_breakpoint_from_kind". */
319
320 const gdb_byte *
321 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
322 {
323 *size = ppc_breakpoint_len;
324 return (const gdb_byte *) &ppc_breakpoint;
325 }
326
327 bool
328 ppc_target::low_breakpoint_at (CORE_ADDR where)
329 {
330 unsigned int insn;
331
332 read_memory (where, (unsigned char *) &insn, 4);
333 if (insn == ppc_breakpoint)
334 return true;
335 /* If necessary, recognize more trap instructions here. GDB only uses
336 the one. */
337
338 return false;
339 }
340
341 /* Implement supports_z_point_type target-ops.
342 Returns true if type Z_TYPE breakpoint is supported.
343
344 Handling software breakpoint at server side, so tracepoints
345 and breakpoints can be inserted at the same location. */
346
347 bool
348 ppc_target::supports_z_point_type (char z_type)
349 {
350 switch (z_type)
351 {
352 case Z_PACKET_SW_BP:
353 return true;
354 case Z_PACKET_HW_BP:
355 case Z_PACKET_WRITE_WP:
356 case Z_PACKET_ACCESS_WP:
357 default:
358 return false;
359 }
360 }
361
362 /* Implement the low_insert_point linux target op.
363 Returns 0 on success, -1 on failure and 1 on unsupported. */
364
365 int
366 ppc_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
367 int size, raw_breakpoint *bp)
368 {
369 switch (type)
370 {
371 case raw_bkpt_type_sw:
372 return insert_memory_breakpoint (bp);
373
374 case raw_bkpt_type_hw:
375 case raw_bkpt_type_write_wp:
376 case raw_bkpt_type_access_wp:
377 default:
378 /* Unsupported. */
379 return 1;
380 }
381 }
382
383 /* Implement the low_remove_point linux target op.
384 Returns 0 on success, -1 on failure and 1 on unsupported. */
385
386 int
387 ppc_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
388 int size, raw_breakpoint *bp)
389 {
390 switch (type)
391 {
392 case raw_bkpt_type_sw:
393 return remove_memory_breakpoint (bp);
394
395 case raw_bkpt_type_hw:
396 case raw_bkpt_type_write_wp:
397 case raw_bkpt_type_access_wp:
398 default:
399 /* Unsupported. */
400 return 1;
401 }
402 }
403
404 /* Provide only a fill function for the general register set. ps_lgetregs
405 will use this for NPTL support. */
406
407 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
408 {
409 int i;
410
411 ppc_target *my_ppc_target = (ppc_target *) the_linux_target;
412
413 for (i = 0; i < 32; i++)
414 my_ppc_target->low_collect_ptrace_register (regcache, i,
415 (char *) buf + ppc_regmap[i]);
416
417 for (i = 64; i < 70; i++)
418 my_ppc_target->low_collect_ptrace_register (regcache, i,
419 (char *) buf + ppc_regmap[i]);
420
421 for (i = 71; i < 73; i++)
422 my_ppc_target->low_collect_ptrace_register (regcache, i,
423 (char *) buf + ppc_regmap[i]);
424 }
425
426 /* Program Priority Register regset fill function. */
427
428 static void
429 ppc_fill_pprregset (struct regcache *regcache, void *buf)
430 {
431 char *ppr = (char *) buf;
432
433 collect_register_by_name (regcache, "ppr", ppr);
434 }
435
436 /* Program Priority Register regset store function. */
437
438 static void
439 ppc_store_pprregset (struct regcache *regcache, const void *buf)
440 {
441 const char *ppr = (const char *) buf;
442
443 supply_register_by_name (regcache, "ppr", ppr);
444 }
445
446 /* Data Stream Control Register regset fill function. */
447
448 static void
449 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
450 {
451 char *dscr = (char *) buf;
452
453 collect_register_by_name (regcache, "dscr", dscr);
454 }
455
456 /* Data Stream Control Register regset store function. */
457
458 static void
459 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
460 {
461 const char *dscr = (const char *) buf;
462
463 supply_register_by_name (regcache, "dscr", dscr);
464 }
465
466 /* Target Address Register regset fill function. */
467
468 static void
469 ppc_fill_tarregset (struct regcache *regcache, void *buf)
470 {
471 char *tar = (char *) buf;
472
473 collect_register_by_name (regcache, "tar", tar);
474 }
475
476 /* Target Address Register regset store function. */
477
478 static void
479 ppc_store_tarregset (struct regcache *regcache, const void *buf)
480 {
481 const char *tar = (const char *) buf;
482
483 supply_register_by_name (regcache, "tar", tar);
484 }
485
486 /* Event-Based Branching regset store function. Unless the inferior
487 has a perf event open, ptrace can return in error when reading and
488 writing to the regset, with ENODATA. For reading, the registers
489 will correctly show as unavailable. For writing, gdbserver
490 currently only caches any register writes from P and G packets and
491 the stub always tries to write all the regsets when resuming the
492 inferior, which would result in frequent warnings. For this
493 reason, we don't define a fill function. This also means that the
494 client-side regcache will be dirty if the user tries to write to
495 the EBB registers. G packets that the client sends to write to
496 unrelated registers will also include data for EBB registers, even
497 if they are unavailable. */
498
499 static void
500 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
501 {
502 const char *regset = (const char *) buf;
503
504 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
505 .dat file is BESCR, EBBHR, EBBRR. */
506 supply_register_by_name (regcache, "ebbrr", &regset[0]);
507 supply_register_by_name (regcache, "ebbhr", &regset[8]);
508 supply_register_by_name (regcache, "bescr", &regset[16]);
509 }
510
511 /* Performance Monitoring Unit regset fill function. */
512
513 static void
514 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
515 {
516 char *regset = (char *) buf;
517
518 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
519 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
520 collect_register_by_name (regcache, "siar", &regset[0]);
521 collect_register_by_name (regcache, "sdar", &regset[8]);
522 collect_register_by_name (regcache, "sier", &regset[16]);
523 collect_register_by_name (regcache, "mmcr2", &regset[24]);
524 collect_register_by_name (regcache, "mmcr0", &regset[32]);
525 }
526
527 /* Performance Monitoring Unit regset store function. */
528
529 static void
530 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
531 {
532 const char *regset = (const char *) buf;
533
534 supply_register_by_name (regcache, "siar", &regset[0]);
535 supply_register_by_name (regcache, "sdar", &regset[8]);
536 supply_register_by_name (regcache, "sier", &regset[16]);
537 supply_register_by_name (regcache, "mmcr2", &regset[24]);
538 supply_register_by_name (regcache, "mmcr0", &regset[32]);
539 }
540
541 /* Hardware Transactional Memory special-purpose register regset fill
542 function. */
543
544 static void
545 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
546 {
547 int i, base;
548 char *regset = (char *) buf;
549
550 base = find_regno (regcache->tdesc, "tfhar");
551 for (i = 0; i < 3; i++)
552 collect_register (regcache, base + i, &regset[i * 8]);
553 }
554
555 /* Hardware Transactional Memory special-purpose register regset store
556 function. */
557
558 static void
559 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
560 {
561 int i, base;
562 const char *regset = (const char *) buf;
563
564 base = find_regno (regcache->tdesc, "tfhar");
565 for (i = 0; i < 3; i++)
566 supply_register (regcache, base + i, &regset[i * 8]);
567 }
568
569 /* For the same reasons as the EBB regset, none of the HTM
570 checkpointed regsets have a fill function. These registers are
571 only available if the inferior is in a transaction. */
572
573 /* Hardware Transactional Memory checkpointed general-purpose regset
574 store function. */
575
576 static void
577 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
578 {
579 int i, base, size, endian_offset;
580 const char *regset = (const char *) buf;
581
582 base = find_regno (regcache->tdesc, "cr0");
583 size = register_size (regcache->tdesc, base);
584
585 gdb_assert (size == 4 || size == 8);
586
587 for (i = 0; i < 32; i++)
588 supply_register (regcache, base + i, &regset[i * size]);
589
590 endian_offset = 0;
591
592 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
593 endian_offset = 4;
594
595 supply_register_by_name (regcache, "ccr",
596 &regset[PT_CCR * size + endian_offset]);
597
598 supply_register_by_name (regcache, "cxer",
599 &regset[PT_XER * size + endian_offset]);
600
601 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
602 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
603 }
604
605 /* Hardware Transactional Memory checkpointed floating-point regset
606 store function. */
607
608 static void
609 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
610 {
611 int i, base;
612 const char *regset = (const char *) buf;
613
614 base = find_regno (regcache->tdesc, "cf0");
615
616 for (i = 0; i < 32; i++)
617 supply_register (regcache, base + i, &regset[i * 8]);
618
619 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
620 }
621
622 /* Hardware Transactional Memory checkpointed vector regset store
623 function. */
624
625 static void
626 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
627 {
628 int i, base;
629 const char *regset = (const char *) buf;
630 int vscr_offset = 0;
631
632 base = find_regno (regcache->tdesc, "cvr0");
633
634 for (i = 0; i < 32; i++)
635 supply_register (regcache, base + i, &regset[i * 16]);
636
637 if (__BYTE_ORDER == __BIG_ENDIAN)
638 vscr_offset = 12;
639
640 supply_register_by_name (regcache, "cvscr",
641 &regset[32 * 16 + vscr_offset]);
642
643 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
644 }
645
646 /* Hardware Transactional Memory checkpointed vector-scalar regset
647 store function. */
648
649 static void
650 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
651 {
652 int i, base;
653 const char *regset = (const char *) buf;
654
655 base = find_regno (regcache->tdesc, "cvs0h");
656 for (i = 0; i < 32; i++)
657 supply_register (regcache, base + i, &regset[i * 8]);
658 }
659
660 /* Hardware Transactional Memory checkpointed Program Priority
661 Register regset store function. */
662
663 static void
664 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
665 {
666 const char *cppr = (const char *) buf;
667
668 supply_register_by_name (regcache, "cppr", cppr);
669 }
670
671 /* Hardware Transactional Memory checkpointed Data Stream Control
672 Register regset store function. */
673
674 static void
675 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
676 {
677 const char *cdscr = (const char *) buf;
678
679 supply_register_by_name (regcache, "cdscr", cdscr);
680 }
681
682 /* Hardware Transactional Memory checkpointed Target Address Register
683 regset store function. */
684
685 static void
686 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
687 {
688 const char *ctar = (const char *) buf;
689
690 supply_register_by_name (regcache, "ctar", ctar);
691 }
692
693 static void
694 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
695 {
696 int i, base;
697 char *regset = (char *) buf;
698
699 base = find_regno (regcache->tdesc, "vs0h");
700 for (i = 0; i < 32; i++)
701 collect_register (regcache, base + i, &regset[i * 8]);
702 }
703
704 static void
705 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
706 {
707 int i, base;
708 const char *regset = (const char *) buf;
709
710 base = find_regno (regcache->tdesc, "vs0h");
711 for (i = 0; i < 32; i++)
712 supply_register (regcache, base + i, &regset[i * 8]);
713 }
714
715 static void
716 ppc_fill_vrregset (struct regcache *regcache, void *buf)
717 {
718 int i, base;
719 char *regset = (char *) buf;
720 int vscr_offset = 0;
721
722 base = find_regno (regcache->tdesc, "vr0");
723 for (i = 0; i < 32; i++)
724 collect_register (regcache, base + i, &regset[i * 16]);
725
726 if (__BYTE_ORDER == __BIG_ENDIAN)
727 vscr_offset = 12;
728
729 collect_register_by_name (regcache, "vscr",
730 &regset[32 * 16 + vscr_offset]);
731
732 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
733 }
734
735 static void
736 ppc_store_vrregset (struct regcache *regcache, const void *buf)
737 {
738 int i, base;
739 const char *regset = (const char *) buf;
740 int vscr_offset = 0;
741
742 base = find_regno (regcache->tdesc, "vr0");
743 for (i = 0; i < 32; i++)
744 supply_register (regcache, base + i, &regset[i * 16]);
745
746 if (__BYTE_ORDER == __BIG_ENDIAN)
747 vscr_offset = 12;
748
749 supply_register_by_name (regcache, "vscr",
750 &regset[32 * 16 + vscr_offset]);
751 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
752 }
753
754 struct gdb_evrregset_t
755 {
756 unsigned long evr[32];
757 unsigned long long acc;
758 unsigned long spefscr;
759 };
760
761 static void
762 ppc_fill_evrregset (struct regcache *regcache, void *buf)
763 {
764 int i, ev0;
765 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
766
767 ev0 = find_regno (regcache->tdesc, "ev0h");
768 for (i = 0; i < 32; i++)
769 collect_register (regcache, ev0 + i, &regset->evr[i]);
770
771 collect_register_by_name (regcache, "acc", &regset->acc);
772 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
773 }
774
775 static void
776 ppc_store_evrregset (struct regcache *regcache, const void *buf)
777 {
778 int i, ev0;
779 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
780
781 ev0 = find_regno (regcache->tdesc, "ev0h");
782 for (i = 0; i < 32; i++)
783 supply_register (regcache, ev0 + i, &regset->evr[i]);
784
785 supply_register_by_name (regcache, "acc", &regset->acc);
786 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
787 }
788
789 /* Support for hardware single step. */
790
791 static int
792 ppc_supports_hardware_single_step (void)
793 {
794 return 1;
795 }
796
797 static struct regset_info ppc_regsets[] = {
798 /* List the extra register sets before GENERAL_REGS. That way we will
799 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
800 general registers. Some kernels support these, but not the newer
801 PPC_PTRACE_GETREGS. */
802 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
803 NULL, ppc_store_tm_ctarregset },
804 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
805 NULL, ppc_store_tm_cdscrregset },
806 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
807 NULL, ppc_store_tm_cpprregset },
808 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
809 NULL, ppc_store_tm_cvsxregset },
810 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
811 NULL, ppc_store_tm_cvrregset },
812 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
813 NULL, ppc_store_tm_cfprregset },
814 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
815 NULL, ppc_store_tm_cgprregset },
816 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
817 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
818 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
819 NULL, ppc_store_ebbregset },
820 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
821 ppc_fill_pmuregset, ppc_store_pmuregset },
822 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
823 ppc_fill_tarregset, ppc_store_tarregset },
824 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
825 ppc_fill_pprregset, ppc_store_pprregset },
826 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
827 ppc_fill_dscrregset, ppc_store_dscrregset },
828 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
829 ppc_fill_vsxregset, ppc_store_vsxregset },
830 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
831 ppc_fill_vrregset, ppc_store_vrregset },
832 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
833 ppc_fill_evrregset, ppc_store_evrregset },
834 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
835 NULL_REGSET
836 };
837
838 static struct usrregs_info ppc_usrregs_info =
839 {
840 ppc_num_regs,
841 ppc_regmap,
842 };
843
844 static struct regsets_info ppc_regsets_info =
845 {
846 ppc_regsets, /* regsets */
847 0, /* num_regsets */
848 NULL, /* disabled_regsets */
849 };
850
851 static struct regs_info myregs_info =
852 {
853 NULL, /* regset_bitmap */
854 &ppc_usrregs_info,
855 &ppc_regsets_info
856 };
857
858 const regs_info *
859 ppc_target::get_regs_info ()
860 {
861 return &myregs_info;
862 }
863
864 void
865 ppc_target::low_arch_setup ()
866 {
867 const struct target_desc *tdesc;
868 struct regset_info *regset;
869 struct ppc_linux_features features = ppc_linux_no_features;
870
871 int tid = lwpid_of (current_thread);
872
873 features.wordsize = ppc_linux_target_wordsize (tid);
874
875 if (features.wordsize == 4)
876 tdesc = tdesc_powerpc_32l;
877 else
878 tdesc = tdesc_powerpc_64l;
879
880 current_process ()->tdesc = tdesc;
881
882 /* The value of current_process ()->tdesc needs to be set for this
883 call. */
884 ppc_hwcap = linux_get_hwcap (features.wordsize);
885 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
886
887 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
888
889 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
890 features.vsx = true;
891
892 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
893 features.altivec = true;
894
895 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
896 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
897 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
898 {
899 features.ppr_dscr = true;
900 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
901 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
902 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
903 && ppc_check_regset (tid, NT_PPC_TAR,
904 PPC_LINUX_SIZEOF_TARREGSET)
905 && ppc_check_regset (tid, NT_PPC_EBB,
906 PPC_LINUX_SIZEOF_EBBREGSET)
907 && ppc_check_regset (tid, NT_PPC_PMU,
908 PPC_LINUX_SIZEOF_PMUREGSET))
909 {
910 features.isa207 = true;
911 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
912 && ppc_check_regset (tid, NT_PPC_TM_SPR,
913 PPC_LINUX_SIZEOF_TM_SPRREGSET))
914 features.htm = true;
915 }
916 }
917
918 tdesc = ppc_linux_match_description (features);
919
920 /* On 32-bit machines, check for SPE registers.
921 Set the low target's regmap field as appropriately. */
922 #ifndef __powerpc64__
923 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
924 tdesc = tdesc_powerpc_e500l;
925
926 if (!ppc_regmap_adjusted)
927 {
928 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
929 ppc_usrregs_info.regmap = ppc_regmap_e500;
930
931 /* If the FPSCR is 64-bit wide, we need to fetch the whole
932 64-bit slot and not just its second word. The PT_FPSCR
933 supplied in a 32-bit GDB compilation doesn't reflect
934 this. */
935 if (register_size (tdesc, 70) == 8)
936 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
937
938 ppc_regmap_adjusted = 1;
939 }
940 #endif
941
942 current_process ()->tdesc = tdesc;
943
944 for (regset = ppc_regsets; regset->size >= 0; regset++)
945 switch (regset->get_request)
946 {
947 case PTRACE_GETVRREGS:
948 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
949 break;
950 case PTRACE_GETVSXREGS:
951 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
952 break;
953 case PTRACE_GETEVRREGS:
954 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
955 regset->size = 32 * 4 + 8 + 4;
956 else
957 regset->size = 0;
958 break;
959 case PTRACE_GETREGSET:
960 switch (regset->nt_type)
961 {
962 case NT_PPC_PPR:
963 regset->size = (features.ppr_dscr ?
964 PPC_LINUX_SIZEOF_PPRREGSET : 0);
965 break;
966 case NT_PPC_DSCR:
967 regset->size = (features.ppr_dscr ?
968 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
969 break;
970 case NT_PPC_TAR:
971 regset->size = (features.isa207 ?
972 PPC_LINUX_SIZEOF_TARREGSET : 0);
973 break;
974 case NT_PPC_EBB:
975 regset->size = (features.isa207 ?
976 PPC_LINUX_SIZEOF_EBBREGSET : 0);
977 break;
978 case NT_PPC_PMU:
979 regset->size = (features.isa207 ?
980 PPC_LINUX_SIZEOF_PMUREGSET : 0);
981 break;
982 case NT_PPC_TM_SPR:
983 regset->size = (features.htm ?
984 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
985 break;
986 case NT_PPC_TM_CGPR:
987 if (features.wordsize == 4)
988 regset->size = (features.htm ?
989 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
990 else
991 regset->size = (features.htm ?
992 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
993 break;
994 case NT_PPC_TM_CFPR:
995 regset->size = (features.htm ?
996 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
997 break;
998 case NT_PPC_TM_CVMX:
999 regset->size = (features.htm ?
1000 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1001 break;
1002 case NT_PPC_TM_CVSX:
1003 regset->size = (features.htm ?
1004 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1005 break;
1006 case NT_PPC_TM_CPPR:
1007 regset->size = (features.htm ?
1008 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1009 break;
1010 case NT_PPC_TM_CDSCR:
1011 regset->size = (features.htm ?
1012 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1013 break;
1014 case NT_PPC_TM_CTAR:
1015 regset->size = (features.htm ?
1016 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1017 break;
1018 default:
1019 break;
1020 }
1021 break;
1022 default:
1023 break;
1024 }
1025 }
1026
1027 /* Implementation of linux_target_ops method "supports_tracepoints". */
1028
1029 static int
1030 ppc_supports_tracepoints (void)
1031 {
1032 return 1;
1033 }
1034
1035 /* Get the thread area address. This is used to recognize which
1036 thread is which when tracing with the in-process agent library. We
1037 don't read anything from the address, and treat it as opaque; it's
1038 the address itself that we assume is unique per-thread. */
1039
1040 static int
1041 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1042 {
1043 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1044 struct thread_info *thr = get_lwp_thread (lwp);
1045 struct regcache *regcache = get_thread_regcache (thr, 1);
1046 ULONGEST tp = 0;
1047
1048 #ifdef __powerpc64__
1049 if (register_size (regcache->tdesc, 0) == 8)
1050 collect_register_by_name (regcache, "r13", &tp);
1051 else
1052 #endif
1053 collect_register_by_name (regcache, "r2", &tp);
1054
1055 *addr = tp;
1056
1057 return 0;
1058 }
1059
1060 #ifdef __powerpc64__
1061
1062 /* Older glibc doesn't provide this. */
1063
1064 #ifndef EF_PPC64_ABI
1065 #define EF_PPC64_ABI 3
1066 #endif
1067
1068 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1069 inferiors. */
1070
1071 static int
1072 is_elfv2_inferior (void)
1073 {
1074 /* To be used as fallback if we're unable to determine the right result -
1075 assume inferior uses the same ABI as gdbserver. */
1076 #if _CALL_ELF == 2
1077 const int def_res = 1;
1078 #else
1079 const int def_res = 0;
1080 #endif
1081 CORE_ADDR phdr;
1082 Elf64_Ehdr ehdr;
1083
1084 const struct target_desc *tdesc = current_process ()->tdesc;
1085 int wordsize = register_size (tdesc, 0);
1086
1087 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1088 return def_res;
1089
1090 /* Assume ELF header is at the beginning of the page where program headers
1091 are located. If it doesn't look like one, bail. */
1092
1093 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1094 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1095 return def_res;
1096
1097 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1098 }
1099
1100 #endif
1101
1102 /* Generate a ds-form instruction in BUF and return the number of bytes written
1103
1104 0 6 11 16 30 32
1105 | OPCD | RST | RA | DS |XO| */
1106
1107 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1108 static int
1109 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1110 {
1111 uint32_t insn;
1112
1113 gdb_assert ((opcd & ~0x3f) == 0);
1114 gdb_assert ((rst & ~0x1f) == 0);
1115 gdb_assert ((ra & ~0x1f) == 0);
1116 gdb_assert ((xo & ~0x3) == 0);
1117
1118 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1119 *buf = (opcd << 26) | insn;
1120 return 1;
1121 }
1122
1123 /* Followings are frequently used ds-form instructions. */
1124
1125 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1126 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1127 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1128 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1129
1130 /* Generate a d-form instruction in BUF.
1131
1132 0 6 11 16 32
1133 | OPCD | RST | RA | D | */
1134
1135 static int
1136 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1137 {
1138 uint32_t insn;
1139
1140 gdb_assert ((opcd & ~0x3f) == 0);
1141 gdb_assert ((rst & ~0x1f) == 0);
1142 gdb_assert ((ra & ~0x1f) == 0);
1143
1144 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1145 *buf = (opcd << 26) | insn;
1146 return 1;
1147 }
1148
1149 /* Followings are frequently used d-form instructions. */
1150
1151 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1152 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1153 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1154 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1155 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1156 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1157 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1158 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1159 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1160
1161 /* Generate a xfx-form instruction in BUF and return the number of bytes
1162 written.
1163
1164 0 6 11 21 31 32
1165 | OPCD | RST | RI | XO |/| */
1166
1167 static int
1168 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1169 {
1170 uint32_t insn;
1171 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1172
1173 gdb_assert ((opcd & ~0x3f) == 0);
1174 gdb_assert ((rst & ~0x1f) == 0);
1175 gdb_assert ((xo & ~0x3ff) == 0);
1176
1177 insn = (rst << 21) | (n << 11) | (xo << 1);
1178 *buf = (opcd << 26) | insn;
1179 return 1;
1180 }
1181
1182 /* Followings are frequently used xfx-form instructions. */
1183
1184 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1185 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1186 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1187 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1188 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1189 E & 0xf, 598)
1190 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1191
1192
1193 /* Generate a x-form instruction in BUF and return the number of bytes written.
1194
1195 0 6 11 16 21 31 32
1196 | OPCD | RST | RA | RB | XO |RC| */
1197
1198 static int
1199 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1200 {
1201 uint32_t insn;
1202
1203 gdb_assert ((opcd & ~0x3f) == 0);
1204 gdb_assert ((rst & ~0x1f) == 0);
1205 gdb_assert ((ra & ~0x1f) == 0);
1206 gdb_assert ((rb & ~0x1f) == 0);
1207 gdb_assert ((xo & ~0x3ff) == 0);
1208 gdb_assert ((rc & ~1) == 0);
1209
1210 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1211 *buf = (opcd << 26) | insn;
1212 return 1;
1213 }
1214
1215 /* Followings are frequently used x-form instructions. */
1216
1217 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1218 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1219 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1220 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1221 /* Assume bf = cr7. */
1222 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1223
1224
1225 /* Generate a md-form instruction in BUF and return the number of bytes written.
1226
1227 0 6 11 16 21 27 30 31 32
1228 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1229
1230 static int
1231 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1232 int xo, int rc)
1233 {
1234 uint32_t insn;
1235 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1236 unsigned int sh0_4 = sh & 0x1f;
1237 unsigned int sh5 = (sh >> 5) & 1;
1238
1239 gdb_assert ((opcd & ~0x3f) == 0);
1240 gdb_assert ((rs & ~0x1f) == 0);
1241 gdb_assert ((ra & ~0x1f) == 0);
1242 gdb_assert ((sh & ~0x3f) == 0);
1243 gdb_assert ((mb & ~0x3f) == 0);
1244 gdb_assert ((xo & ~0x7) == 0);
1245 gdb_assert ((rc & ~0x1) == 0);
1246
1247 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1248 | (sh5 << 1) | (xo << 2) | (rc & 1);
1249 *buf = (opcd << 26) | insn;
1250 return 1;
1251 }
1252
1253 /* The following are frequently used md-form instructions. */
1254
1255 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1256 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1257 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1258 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1259
1260 /* Generate a i-form instruction in BUF and return the number of bytes written.
1261
1262 0 6 30 31 32
1263 | OPCD | LI |AA|LK| */
1264
1265 static int
1266 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1267 {
1268 uint32_t insn;
1269
1270 gdb_assert ((opcd & ~0x3f) == 0);
1271
1272 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1273 *buf = (opcd << 26) | insn;
1274 return 1;
1275 }
1276
1277 /* The following are frequently used i-form instructions. */
1278
1279 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1280 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1281
1282 /* Generate a b-form instruction in BUF and return the number of bytes written.
1283
1284 0 6 11 16 30 31 32
1285 | OPCD | BO | BI | BD |AA|LK| */
1286
1287 static int
1288 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1289 int aa, int lk)
1290 {
1291 uint32_t insn;
1292
1293 gdb_assert ((opcd & ~0x3f) == 0);
1294 gdb_assert ((bo & ~0x1f) == 0);
1295 gdb_assert ((bi & ~0x1f) == 0);
1296
1297 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1298 *buf = (opcd << 26) | insn;
1299 return 1;
1300 }
1301
1302 /* The following are frequently used b-form instructions. */
1303 /* Assume bi = cr7. */
1304 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1305
1306 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1307 respectively. They are primary used for save/restore GPRs in jump-pad,
1308 not used for bytecode compiling. */
1309
1310 #ifdef __powerpc64__
1311 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1312 GEN_LD (buf, rt, ra, si) : \
1313 GEN_LWZ (buf, rt, ra, si))
1314 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1315 GEN_STD (buf, rt, ra, si) : \
1316 GEN_STW (buf, rt, ra, si))
1317 #else
1318 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1319 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1320 #endif
1321
1322 /* Generate a sequence of instructions to load IMM in the register REG.
1323 Write the instructions in BUF and return the number of bytes written. */
1324
1325 static int
1326 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1327 {
1328 uint32_t *p = buf;
1329
1330 if ((imm + 32768) < 65536)
1331 {
1332 /* li reg, imm[15:0] */
1333 p += GEN_LI (p, reg, imm);
1334 }
1335 else if ((imm >> 32) == 0)
1336 {
1337 /* lis reg, imm[31:16]
1338 ori reg, reg, imm[15:0]
1339 rldicl reg, reg, 0, 32 */
1340 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1341 if ((imm & 0xffff) != 0)
1342 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1343 /* Clear upper 32-bit if sign-bit is set. */
1344 if (imm & (1u << 31) && is_64)
1345 p += GEN_RLDICL (p, reg, reg, 0, 32);
1346 }
1347 else
1348 {
1349 gdb_assert (is_64);
1350 /* lis reg, <imm[63:48]>
1351 ori reg, reg, <imm[48:32]>
1352 rldicr reg, reg, 32, 31
1353 oris reg, reg, <imm[31:16]>
1354 ori reg, reg, <imm[15:0]> */
1355 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1356 if (((imm >> 32) & 0xffff) != 0)
1357 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1358 p += GEN_RLDICR (p, reg, reg, 32, 31);
1359 if (((imm >> 16) & 0xffff) != 0)
1360 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1361 if ((imm & 0xffff) != 0)
1362 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1363 }
1364
1365 return p - buf;
1366 }
1367
1368 /* Generate a sequence for atomically exchange at location LOCK.
1369 This code sequence clobbers r6, r7, r8. LOCK is the location for
1370 the atomic-xchg, OLD_VALUE is expected old value stored in the
1371 location, and R_NEW is a register for the new value. */
1372
1373 static int
1374 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1375 int is_64)
1376 {
1377 const int r_lock = 6;
1378 const int r_old = 7;
1379 const int r_tmp = 8;
1380 uint32_t *p = buf;
1381
1382 /*
1383 1: lwarx TMP, 0, LOCK
1384 cmpwi TMP, OLD
1385 bne 1b
1386 stwcx. NEW, 0, LOCK
1387 bne 1b */
1388
1389 p += gen_limm (p, r_lock, lock, is_64);
1390 p += gen_limm (p, r_old, old_value, is_64);
1391
1392 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1393 p += GEN_CMPW (p, r_tmp, r_old);
1394 p += GEN_BNE (p, -8);
1395 p += GEN_STWCX (p, r_new, 0, r_lock);
1396 p += GEN_BNE (p, -16);
1397
1398 return p - buf;
1399 }
1400
1401 /* Generate a sequence of instructions for calling a function
1402 at address of FN. Return the number of bytes are written in BUF. */
1403
1404 static int
1405 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1406 {
1407 uint32_t *p = buf;
1408
1409 /* Must be called by r12 for caller to calculate TOC address. */
1410 p += gen_limm (p, 12, fn, is_64);
1411 if (is_opd)
1412 {
1413 p += GEN_LOAD (p, 11, 12, 16, is_64);
1414 p += GEN_LOAD (p, 2, 12, 8, is_64);
1415 p += GEN_LOAD (p, 12, 12, 0, is_64);
1416 }
1417 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1418 *p++ = 0x4e800421; /* bctrl */
1419
1420 return p - buf;
1421 }
1422
1423 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1424 of instruction. This function is used to adjust pc-relative instructions
1425 when copying. */
1426
1427 static void
1428 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1429 {
1430 uint32_t insn, op6;
1431 long rel, newrel;
1432
1433 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1434 op6 = PPC_OP6 (insn);
1435
1436 if (op6 == 18 && (insn & 2) == 0)
1437 {
1438 /* branch && AA = 0 */
1439 rel = PPC_LI (insn);
1440 newrel = (oldloc - *to) + rel;
1441
1442 /* Out of range. Cannot relocate instruction. */
1443 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1444 return;
1445
1446 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1447 }
1448 else if (op6 == 16 && (insn & 2) == 0)
1449 {
1450 /* conditional branch && AA = 0 */
1451
1452 /* If the new relocation is too big for even a 26-bit unconditional
1453 branch, there is nothing we can do. Just abort.
1454
1455 Otherwise, if it can be fit in 16-bit conditional branch, just
1456 copy the instruction and relocate the address.
1457
1458 If the it's big for conditional-branch (16-bit), try to invert the
1459 condition and jump with 26-bit branch. For example,
1460
1461 beq .Lgoto
1462 INSN1
1463
1464 =>
1465
1466 bne 1f (+8)
1467 b .Lgoto
1468 1:INSN1
1469
1470 After this transform, we are actually jump from *TO+4 instead of *TO,
1471 so check the relocation again because it will be 1-insn farther then
1472 before if *TO is after OLDLOC.
1473
1474
1475 For BDNZT (or so) is transformed from
1476
1477 bdnzt eq, .Lgoto
1478 INSN1
1479
1480 =>
1481
1482 bdz 1f (+12)
1483 bf eq, 1f (+8)
1484 b .Lgoto
1485 1:INSN1
1486
1487 See also "BO field encodings". */
1488
1489 rel = PPC_BD (insn);
1490 newrel = (oldloc - *to) + rel;
1491
1492 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1493 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1494 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1495 {
1496 newrel -= 4;
1497
1498 /* Out of range. Cannot relocate instruction. */
1499 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1500 return;
1501
1502 if ((PPC_BO (insn) & 0x14) == 0x4)
1503 insn ^= (1 << 24);
1504 else if ((PPC_BO (insn) & 0x14) == 0x10)
1505 insn ^= (1 << 22);
1506
1507 /* Jump over the unconditional branch. */
1508 insn = (insn & ~0xfffc) | 0x8;
1509 target_write_memory (*to, (unsigned char *) &insn, 4);
1510 *to += 4;
1511
1512 /* Build a unconditional branch and copy LK bit. */
1513 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1514 target_write_memory (*to, (unsigned char *) &insn, 4);
1515 *to += 4;
1516
1517 return;
1518 }
1519 else if ((PPC_BO (insn) & 0x14) == 0)
1520 {
1521 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1522 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1523
1524 newrel -= 8;
1525
1526 /* Out of range. Cannot relocate instruction. */
1527 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1528 return;
1529
1530 /* Copy BI field. */
1531 bf_insn |= (insn & 0x1f0000);
1532
1533 /* Invert condition. */
1534 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1535 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1536
1537 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1538 *to += 4;
1539 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1540 *to += 4;
1541
1542 /* Build a unconditional branch and copy LK bit. */
1543 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1544 target_write_memory (*to, (unsigned char *) &insn, 4);
1545 *to += 4;
1546
1547 return;
1548 }
1549 else /* (BO & 0x14) == 0x14, branch always. */
1550 {
1551 /* Out of range. Cannot relocate instruction. */
1552 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1553 return;
1554
1555 /* Build a unconditional branch and copy LK bit. */
1556 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1557 target_write_memory (*to, (unsigned char *) &insn, 4);
1558 *to += 4;
1559
1560 return;
1561 }
1562 }
1563
1564 target_write_memory (*to, (unsigned char *) &insn, 4);
1565 *to += 4;
1566 }
1567
1568 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1569 See target.h for details. */
1570
1571 static int
1572 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1573 CORE_ADDR collector,
1574 CORE_ADDR lockaddr,
1575 ULONGEST orig_size,
1576 CORE_ADDR *jump_entry,
1577 CORE_ADDR *trampoline,
1578 ULONGEST *trampoline_size,
1579 unsigned char *jjump_pad_insn,
1580 ULONGEST *jjump_pad_insn_size,
1581 CORE_ADDR *adjusted_insn_addr,
1582 CORE_ADDR *adjusted_insn_addr_end,
1583 char *err)
1584 {
1585 uint32_t buf[256];
1586 uint32_t *p = buf;
1587 int j, offset;
1588 CORE_ADDR buildaddr = *jump_entry;
1589 const CORE_ADDR entryaddr = *jump_entry;
1590 int rsz, min_frame, frame_size, tp_reg;
1591 #ifdef __powerpc64__
1592 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1593 int is_64 = register_size (regcache->tdesc, 0) == 8;
1594 int is_opd = is_64 && !is_elfv2_inferior ();
1595 #else
1596 int is_64 = 0, is_opd = 0;
1597 #endif
1598
1599 #ifdef __powerpc64__
1600 if (is_64)
1601 {
1602 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1603 rsz = 8;
1604 min_frame = 112;
1605 frame_size = (40 * rsz) + min_frame;
1606 tp_reg = 13;
1607 }
1608 else
1609 {
1610 #endif
1611 rsz = 4;
1612 min_frame = 16;
1613 frame_size = (40 * rsz) + min_frame;
1614 tp_reg = 2;
1615 #ifdef __powerpc64__
1616 }
1617 #endif
1618
1619 /* Stack frame layout for this jump pad,
1620
1621 High thread_area (r13/r2) |
1622 tpoint - collecting_t obj
1623 PC/<tpaddr> | +36
1624 CTR | +35
1625 LR | +34
1626 XER | +33
1627 CR | +32
1628 R31 |
1629 R29 |
1630 ... |
1631 R1 | +1
1632 R0 - collected registers
1633 ... |
1634 ... |
1635 Low Back-chain -
1636
1637
1638 The code flow of this jump pad,
1639
1640 1. Adjust SP
1641 2. Save GPR and SPR
1642 3. Prepare argument
1643 4. Call gdb_collector
1644 5. Restore GPR and SPR
1645 6. Restore SP
1646 7. Build a jump for back to the program
1647 8. Copy/relocate original instruction
1648 9. Build a jump for replacing original instruction. */
1649
1650 /* Adjust stack pointer. */
1651 if (is_64)
1652 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1653 else
1654 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1655
1656 /* Store GPRs. Save R1 later, because it had just been modified, but
1657 we want the original value. */
1658 for (j = 2; j < 32; j++)
1659 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1660 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1661 /* Set r0 to the original value of r1 before adjusting stack frame,
1662 and then save it. */
1663 p += GEN_ADDI (p, 0, 1, frame_size);
1664 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1665
1666 /* Save CR, XER, LR, and CTR. */
1667 p += GEN_MFCR (p, 3); /* mfcr r3 */
1668 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1669 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1670 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1671 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1672 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1673 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1674 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1675
1676 /* Save PC<tpaddr> */
1677 p += gen_limm (p, 3, tpaddr, is_64);
1678 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1679
1680
1681 /* Setup arguments to collector. */
1682 /* Set r4 to collected registers. */
1683 p += GEN_ADDI (p, 4, 1, min_frame);
1684 /* Set r3 to TPOINT. */
1685 p += gen_limm (p, 3, tpoint, is_64);
1686
1687 /* Prepare collecting_t object for lock. */
1688 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1689 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1690 /* Set R5 to collecting object. */
1691 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1692
1693 p += GEN_LWSYNC (p);
1694 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1695 p += GEN_LWSYNC (p);
1696
1697 /* Call to collector. */
1698 p += gen_call (p, collector, is_64, is_opd);
1699
1700 /* Simply write 0 to release the lock. */
1701 p += gen_limm (p, 3, lockaddr, is_64);
1702 p += gen_limm (p, 4, 0, is_64);
1703 p += GEN_LWSYNC (p);
1704 p += GEN_STORE (p, 4, 3, 0, is_64);
1705
1706 /* Restore stack and registers. */
1707 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1708 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1709 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1710 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1711 p += GEN_MTCR (p, 3); /* mtcr r3 */
1712 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1713 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1714 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1715
1716 /* Restore GPRs. */
1717 for (j = 2; j < 32; j++)
1718 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1719 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1720 /* Restore SP. */
1721 p += GEN_ADDI (p, 1, 1, frame_size);
1722
1723 /* Flush instructions to inferior memory. */
1724 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1725
1726 /* Now, insert the original instruction to execute in the jump pad. */
1727 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1728 *adjusted_insn_addr_end = *adjusted_insn_addr;
1729 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1730
1731 /* Verify the relocation size. If should be 4 for normal copy,
1732 8 or 12 for some conditional branch. */
1733 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1734 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1735 {
1736 sprintf (err, "E.Unexpected instruction length = %d"
1737 "when relocate instruction.",
1738 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1739 return 1;
1740 }
1741
1742 buildaddr = *adjusted_insn_addr_end;
1743 p = buf;
1744 /* Finally, write a jump back to the program. */
1745 offset = (tpaddr + 4) - buildaddr;
1746 if (offset >= (1 << 25) || offset < -(1 << 25))
1747 {
1748 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1749 "(offset 0x%x > 26-bit).", offset);
1750 return 1;
1751 }
1752 /* b <tpaddr+4> */
1753 p += GEN_B (p, offset);
1754 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1755 *jump_entry = buildaddr + (p - buf) * 4;
1756
1757 /* The jump pad is now built. Wire in a jump to our jump pad. This
1758 is always done last (by our caller actually), so that we can
1759 install fast tracepoints with threads running. This relies on
1760 the agent's atomic write support. */
1761 offset = entryaddr - tpaddr;
1762 if (offset >= (1 << 25) || offset < -(1 << 25))
1763 {
1764 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1765 "(offset 0x%x > 26-bit).", offset);
1766 return 1;
1767 }
1768 /* b <jentry> */
1769 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1770 *jjump_pad_insn_size = 4;
1771
1772 return 0;
1773 }
1774
1775 /* Returns the minimum instruction length for installing a tracepoint. */
1776
1777 static int
1778 ppc_get_min_fast_tracepoint_insn_len (void)
1779 {
1780 return 4;
1781 }
1782
1783 /* Emits a given buffer into the target at current_insn_ptr. Length
1784 is in units of 32-bit words. */
1785
1786 static void
1787 emit_insns (uint32_t *buf, int n)
1788 {
1789 n = n * sizeof (uint32_t);
1790 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1791 current_insn_ptr += n;
1792 }
1793
1794 #define __EMIT_ASM(NAME, INSNS) \
1795 do \
1796 { \
1797 extern uint32_t start_bcax_ ## NAME []; \
1798 extern uint32_t end_bcax_ ## NAME []; \
1799 emit_insns (start_bcax_ ## NAME, \
1800 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1801 __asm__ (".section .text.__ppcbcax\n\t" \
1802 "start_bcax_" #NAME ":\n\t" \
1803 INSNS "\n\t" \
1804 "end_bcax_" #NAME ":\n\t" \
1805 ".previous\n\t"); \
1806 } while (0)
1807
1808 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1809 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1810
1811 /*
1812
1813 Bytecode execution stack frame - 32-bit
1814
1815 | LR save area (SP + 4)
1816 SP' -> +- Back chain (SP + 0)
1817 | Save r31 for access saved arguments
1818 | Save r30 for bytecode stack pointer
1819 | Save r4 for incoming argument *value
1820 | Save r3 for incoming argument regs
1821 r30 -> +- Bytecode execution stack
1822 |
1823 | 64-byte (8 doublewords) at initial.
1824 | Expand stack as needed.
1825 |
1826 +-
1827 | Some padding for minimum stack frame and 16-byte alignment.
1828 | 16 bytes.
1829 SP +- Back-chain (SP')
1830
1831 initial frame size
1832 = 16 + (4 * 4) + 64
1833 = 96
1834
1835 r30 is the stack-pointer for bytecode machine.
1836 It should point to next-empty, so we can use LDU for pop.
1837 r3 is used for cache of the high part of TOP value.
1838 It was the first argument, pointer to regs.
1839 r4 is used for cache of the low part of TOP value.
1840 It was the second argument, pointer to the result.
1841 We should set *result = TOP after leaving this function.
1842
1843 Note:
1844 * To restore stack at epilogue
1845 => sp = r31
1846 * To check stack is big enough for bytecode execution.
1847 => r30 - 8 > SP + 8
1848 * To return execution result.
1849 => 0(r4) = TOP
1850
1851 */
1852
1853 /* Regardless of endian, register 3 is always high part, 4 is low part.
1854 These defines are used when the register pair is stored/loaded.
1855 Likewise, to simplify code, have a similiar define for 5:6. */
1856
1857 #if __BYTE_ORDER == __LITTLE_ENDIAN
1858 #define TOP_FIRST "4"
1859 #define TOP_SECOND "3"
1860 #define TMP_FIRST "6"
1861 #define TMP_SECOND "5"
1862 #else
1863 #define TOP_FIRST "3"
1864 #define TOP_SECOND "4"
1865 #define TMP_FIRST "5"
1866 #define TMP_SECOND "6"
1867 #endif
1868
1869 /* Emit prologue in inferior memory. See above comments. */
1870
1871 static void
1872 ppc_emit_prologue (void)
1873 {
1874 EMIT_ASM (/* Save return address. */
1875 "mflr 0 \n"
1876 "stw 0, 4(1) \n"
1877 /* Adjust SP. 96 is the initial frame size. */
1878 "stwu 1, -96(1) \n"
1879 /* Save r30 and incoming arguments. */
1880 "stw 31, 96-4(1) \n"
1881 "stw 30, 96-8(1) \n"
1882 "stw 4, 96-12(1) \n"
1883 "stw 3, 96-16(1) \n"
1884 /* Point r31 to original r1 for access arguments. */
1885 "addi 31, 1, 96 \n"
1886 /* Set r30 to pointing stack-top. */
1887 "addi 30, 1, 64 \n"
1888 /* Initial r3/TOP to 0. */
1889 "li 3, 0 \n"
1890 "li 4, 0 \n");
1891 }
1892
1893 /* Emit epilogue in inferior memory. See above comments. */
1894
1895 static void
1896 ppc_emit_epilogue (void)
1897 {
1898 EMIT_ASM (/* *result = TOP */
1899 "lwz 5, -12(31) \n"
1900 "stw " TOP_FIRST ", 0(5) \n"
1901 "stw " TOP_SECOND ", 4(5) \n"
1902 /* Restore registers. */
1903 "lwz 31, -4(31) \n"
1904 "lwz 30, -8(31) \n"
1905 /* Restore SP. */
1906 "lwz 1, 0(1) \n"
1907 /* Restore LR. */
1908 "lwz 0, 4(1) \n"
1909 /* Return 0 for no-error. */
1910 "li 3, 0 \n"
1911 "mtlr 0 \n"
1912 "blr \n");
1913 }
1914
1915 /* TOP = stack[--sp] + TOP */
1916
1917 static void
1918 ppc_emit_add (void)
1919 {
1920 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1921 "lwz " TMP_SECOND ", 4(30)\n"
1922 "addc 4, 6, 4 \n"
1923 "adde 3, 5, 3 \n");
1924 }
1925
1926 /* TOP = stack[--sp] - TOP */
1927
1928 static void
1929 ppc_emit_sub (void)
1930 {
1931 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1932 "lwz " TMP_SECOND ", 4(30) \n"
1933 "subfc 4, 4, 6 \n"
1934 "subfe 3, 3, 5 \n");
1935 }
1936
1937 /* TOP = stack[--sp] * TOP */
1938
1939 static void
1940 ppc_emit_mul (void)
1941 {
1942 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1943 "lwz " TMP_SECOND ", 4(30) \n"
1944 "mulhwu 7, 6, 4 \n"
1945 "mullw 3, 6, 3 \n"
1946 "mullw 5, 4, 5 \n"
1947 "mullw 4, 6, 4 \n"
1948 "add 3, 5, 3 \n"
1949 "add 3, 7, 3 \n");
1950 }
1951
1952 /* TOP = stack[--sp] << TOP */
1953
1954 static void
1955 ppc_emit_lsh (void)
1956 {
1957 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1958 "lwz " TMP_SECOND ", 4(30) \n"
1959 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1960 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1961 "slw 5, 5, 4\n" /* Shift high part left */
1962 "slw 4, 6, 4\n" /* Shift low part left */
1963 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1964 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1965 "or 3, 5, 3\n"
1966 "or 3, 7, 3\n"); /* Assemble high part */
1967 }
1968
1969 /* Top = stack[--sp] >> TOP
1970 (Arithmetic shift right) */
1971
1972 static void
1973 ppc_emit_rsh_signed (void)
1974 {
1975 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1976 "lwz " TMP_SECOND ", 4(30) \n"
1977 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1978 "sraw 3, 5, 4\n" /* Shift high part right */
1979 "cmpwi 7, 1\n"
1980 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1981 "sraw 4, 5, 7\n" /* Shift high to low */
1982 "b 2f\n"
1983 "1:\n"
1984 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1985 "srw 4, 6, 4\n" /* Shift low part right */
1986 "slw 5, 5, 7\n" /* Shift high to low */
1987 "or 4, 4, 5\n" /* Assemble low part */
1988 "2:\n");
1989 }
1990
1991 /* Top = stack[--sp] >> TOP
1992 (Logical shift right) */
1993
1994 static void
1995 ppc_emit_rsh_unsigned (void)
1996 {
1997 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1998 "lwz " TMP_SECOND ", 4(30) \n"
1999 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2000 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2001 "srw 6, 6, 4\n" /* Shift low part right */
2002 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2003 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2004 "or 6, 6, 3\n"
2005 "srw 3, 5, 4\n" /* Shift high part right */
2006 "or 4, 6, 7\n"); /* Assemble low part */
2007 }
2008
2009 /* Emit code for signed-extension specified by ARG. */
2010
2011 static void
2012 ppc_emit_ext (int arg)
2013 {
2014 switch (arg)
2015 {
2016 case 8:
2017 EMIT_ASM ("extsb 4, 4\n"
2018 "srawi 3, 4, 31");
2019 break;
2020 case 16:
2021 EMIT_ASM ("extsh 4, 4\n"
2022 "srawi 3, 4, 31");
2023 break;
2024 case 32:
2025 EMIT_ASM ("srawi 3, 4, 31");
2026 break;
2027 default:
2028 emit_error = 1;
2029 }
2030 }
2031
2032 /* Emit code for zero-extension specified by ARG. */
2033
2034 static void
2035 ppc_emit_zero_ext (int arg)
2036 {
2037 switch (arg)
2038 {
2039 case 8:
2040 EMIT_ASM ("clrlwi 4,4,24\n"
2041 "li 3, 0\n");
2042 break;
2043 case 16:
2044 EMIT_ASM ("clrlwi 4,4,16\n"
2045 "li 3, 0\n");
2046 break;
2047 case 32:
2048 EMIT_ASM ("li 3, 0");
2049 break;
2050 default:
2051 emit_error = 1;
2052 }
2053 }
2054
2055 /* TOP = !TOP
2056 i.e., TOP = (TOP == 0) ? 1 : 0; */
2057
2058 static void
2059 ppc_emit_log_not (void)
2060 {
2061 EMIT_ASM ("or 4, 3, 4 \n"
2062 "cntlzw 4, 4 \n"
2063 "srwi 4, 4, 5 \n"
2064 "li 3, 0 \n");
2065 }
2066
2067 /* TOP = stack[--sp] & TOP */
2068
2069 static void
2070 ppc_emit_bit_and (void)
2071 {
2072 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2073 "lwz " TMP_SECOND ", 4(30) \n"
2074 "and 4, 6, 4 \n"
2075 "and 3, 5, 3 \n");
2076 }
2077
2078 /* TOP = stack[--sp] | TOP */
2079
2080 static void
2081 ppc_emit_bit_or (void)
2082 {
2083 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2084 "lwz " TMP_SECOND ", 4(30) \n"
2085 "or 4, 6, 4 \n"
2086 "or 3, 5, 3 \n");
2087 }
2088
2089 /* TOP = stack[--sp] ^ TOP */
2090
2091 static void
2092 ppc_emit_bit_xor (void)
2093 {
2094 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2095 "lwz " TMP_SECOND ", 4(30) \n"
2096 "xor 4, 6, 4 \n"
2097 "xor 3, 5, 3 \n");
2098 }
2099
2100 /* TOP = ~TOP
2101 i.e., TOP = ~(TOP | TOP) */
2102
2103 static void
2104 ppc_emit_bit_not (void)
2105 {
2106 EMIT_ASM ("nor 3, 3, 3 \n"
2107 "nor 4, 4, 4 \n");
2108 }
2109
2110 /* TOP = stack[--sp] == TOP */
2111
2112 static void
2113 ppc_emit_equal (void)
2114 {
2115 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2116 "lwz " TMP_SECOND ", 4(30) \n"
2117 "xor 4, 6, 4 \n"
2118 "xor 3, 5, 3 \n"
2119 "or 4, 3, 4 \n"
2120 "cntlzw 4, 4 \n"
2121 "srwi 4, 4, 5 \n"
2122 "li 3, 0 \n");
2123 }
2124
2125 /* TOP = stack[--sp] < TOP
2126 (Signed comparison) */
2127
2128 static void
2129 ppc_emit_less_signed (void)
2130 {
2131 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2132 "lwz " TMP_SECOND ", 4(30) \n"
2133 "cmplw 6, 6, 4 \n"
2134 "cmpw 7, 5, 3 \n"
2135 /* CR6 bit 0 = low less and high equal */
2136 "crand 6*4+0, 6*4+0, 7*4+2\n"
2137 /* CR7 bit 0 = (low less and high equal) or high less */
2138 "cror 7*4+0, 7*4+0, 6*4+0\n"
2139 "mfcr 4 \n"
2140 "rlwinm 4, 4, 29, 31, 31 \n"
2141 "li 3, 0 \n");
2142 }
2143
2144 /* TOP = stack[--sp] < TOP
2145 (Unsigned comparison) */
2146
2147 static void
2148 ppc_emit_less_unsigned (void)
2149 {
2150 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2151 "lwz " TMP_SECOND ", 4(30) \n"
2152 "cmplw 6, 6, 4 \n"
2153 "cmplw 7, 5, 3 \n"
2154 /* CR6 bit 0 = low less and high equal */
2155 "crand 6*4+0, 6*4+0, 7*4+2\n"
2156 /* CR7 bit 0 = (low less and high equal) or high less */
2157 "cror 7*4+0, 7*4+0, 6*4+0\n"
2158 "mfcr 4 \n"
2159 "rlwinm 4, 4, 29, 31, 31 \n"
2160 "li 3, 0 \n");
2161 }
2162
2163 /* Access the memory address in TOP in size of SIZE.
2164 Zero-extend the read value. */
2165
2166 static void
2167 ppc_emit_ref (int size)
2168 {
2169 switch (size)
2170 {
2171 case 1:
2172 EMIT_ASM ("lbz 4, 0(4)\n"
2173 "li 3, 0");
2174 break;
2175 case 2:
2176 EMIT_ASM ("lhz 4, 0(4)\n"
2177 "li 3, 0");
2178 break;
2179 case 4:
2180 EMIT_ASM ("lwz 4, 0(4)\n"
2181 "li 3, 0");
2182 break;
2183 case 8:
2184 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2185 EMIT_ASM ("lwz 3, 4(4)\n"
2186 "lwz 4, 0(4)");
2187 else
2188 EMIT_ASM ("lwz 3, 0(4)\n"
2189 "lwz 4, 4(4)");
2190 break;
2191 }
2192 }
2193
2194 /* TOP = NUM */
2195
2196 static void
2197 ppc_emit_const (LONGEST num)
2198 {
2199 uint32_t buf[10];
2200 uint32_t *p = buf;
2201
2202 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2203 p += gen_limm (p, 4, num & 0xffffffff, 0);
2204
2205 emit_insns (buf, p - buf);
2206 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2207 }
2208
2209 /* Set TOP to the value of register REG by calling get_raw_reg function
2210 with two argument, collected buffer and register number. */
2211
2212 static void
2213 ppc_emit_reg (int reg)
2214 {
2215 uint32_t buf[13];
2216 uint32_t *p = buf;
2217
2218 /* fctx->regs is passed in r3 and then saved in -16(31). */
2219 p += GEN_LWZ (p, 3, 31, -16);
2220 p += GEN_LI (p, 4, reg); /* li r4, reg */
2221 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2222
2223 emit_insns (buf, p - buf);
2224 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2225
2226 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2227 {
2228 EMIT_ASM ("mr 5, 4\n"
2229 "mr 4, 3\n"
2230 "mr 3, 5\n");
2231 }
2232 }
2233
2234 /* TOP = stack[--sp] */
2235
2236 static void
2237 ppc_emit_pop (void)
2238 {
2239 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2240 "lwz " TOP_SECOND ", 4(30) \n");
2241 }
2242
2243 /* stack[sp++] = TOP
2244
2245 Because we may use up bytecode stack, expand 8 doublewords more
2246 if needed. */
2247
2248 static void
2249 ppc_emit_stack_flush (void)
2250 {
2251 /* Make sure bytecode stack is big enough before push.
2252 Otherwise, expand 64-byte more. */
2253
2254 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2255 " stw " TOP_SECOND ", 4(30)\n"
2256 " addi 5, 30, -(8 + 8) \n"
2257 " cmpw 7, 5, 1 \n"
2258 " bgt 7, 1f \n"
2259 " stwu 31, -64(1) \n"
2260 "1:addi 30, 30, -8 \n");
2261 }
2262
2263 /* Swap TOP and stack[sp-1] */
2264
2265 static void
2266 ppc_emit_swap (void)
2267 {
2268 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2269 "lwz " TMP_SECOND ", 12(30) \n"
2270 "stw " TOP_FIRST ", 8(30) \n"
2271 "stw " TOP_SECOND ", 12(30) \n"
2272 "mr 3, 5 \n"
2273 "mr 4, 6 \n");
2274 }
2275
2276 /* Discard N elements in the stack. Also used for ppc64. */
2277
2278 static void
2279 ppc_emit_stack_adjust (int n)
2280 {
2281 uint32_t buf[6];
2282 uint32_t *p = buf;
2283
2284 n = n << 3;
2285 if ((n >> 15) != 0)
2286 {
2287 emit_error = 1;
2288 return;
2289 }
2290
2291 p += GEN_ADDI (p, 30, 30, n);
2292
2293 emit_insns (buf, p - buf);
2294 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2295 }
2296
2297 /* Call function FN. */
2298
2299 static void
2300 ppc_emit_call (CORE_ADDR fn)
2301 {
2302 uint32_t buf[11];
2303 uint32_t *p = buf;
2304
2305 p += gen_call (p, fn, 0, 0);
2306
2307 emit_insns (buf, p - buf);
2308 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2309 }
2310
2311 /* FN's prototype is `LONGEST(*fn)(int)'.
2312 TOP = fn (arg1)
2313 */
2314
2315 static void
2316 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2317 {
2318 uint32_t buf[15];
2319 uint32_t *p = buf;
2320
2321 /* Setup argument. arg1 is a 16-bit value. */
2322 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2323 p += gen_call (p, fn, 0, 0);
2324
2325 emit_insns (buf, p - buf);
2326 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2327
2328 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2329 {
2330 EMIT_ASM ("mr 5, 4\n"
2331 "mr 4, 3\n"
2332 "mr 3, 5\n");
2333 }
2334 }
2335
2336 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2337 fn (arg1, TOP)
2338
2339 TOP should be preserved/restored before/after the call. */
2340
2341 static void
2342 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2343 {
2344 uint32_t buf[21];
2345 uint32_t *p = buf;
2346
2347 /* Save TOP. 0(30) is next-empty. */
2348 p += GEN_STW (p, 3, 30, 0);
2349 p += GEN_STW (p, 4, 30, 4);
2350
2351 /* Setup argument. arg1 is a 16-bit value. */
2352 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2353 {
2354 p += GEN_MR (p, 5, 4);
2355 p += GEN_MR (p, 6, 3);
2356 }
2357 else
2358 {
2359 p += GEN_MR (p, 5, 3);
2360 p += GEN_MR (p, 6, 4);
2361 }
2362 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2363 p += gen_call (p, fn, 0, 0);
2364
2365 /* Restore TOP */
2366 p += GEN_LWZ (p, 3, 30, 0);
2367 p += GEN_LWZ (p, 4, 30, 4);
2368
2369 emit_insns (buf, p - buf);
2370 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2371 }
2372
2373 /* Note in the following goto ops:
2374
2375 When emitting goto, the target address is later relocated by
2376 write_goto_address. OFFSET_P is the offset of the branch instruction
2377 in the code sequence, and SIZE_P is how to relocate the instruction,
2378 recognized by ppc_write_goto_address. In current implementation,
2379 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2380 */
2381
2382 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2383
2384 static void
2385 ppc_emit_if_goto (int *offset_p, int *size_p)
2386 {
2387 EMIT_ASM ("or. 3, 3, 4 \n"
2388 "lwzu " TOP_FIRST ", 8(30) \n"
2389 "lwz " TOP_SECOND ", 4(30) \n"
2390 "1:bne 0, 1b \n");
2391
2392 if (offset_p)
2393 *offset_p = 12;
2394 if (size_p)
2395 *size_p = 14;
2396 }
2397
2398 /* Unconditional goto. Also used for ppc64. */
2399
2400 static void
2401 ppc_emit_goto (int *offset_p, int *size_p)
2402 {
2403 EMIT_ASM ("1:b 1b");
2404
2405 if (offset_p)
2406 *offset_p = 0;
2407 if (size_p)
2408 *size_p = 24;
2409 }
2410
2411 /* Goto if stack[--sp] == TOP */
2412
2413 static void
2414 ppc_emit_eq_goto (int *offset_p, int *size_p)
2415 {
2416 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2417 "lwz " TMP_SECOND ", 4(30) \n"
2418 "xor 4, 6, 4 \n"
2419 "xor 3, 5, 3 \n"
2420 "or. 3, 3, 4 \n"
2421 "lwzu " TOP_FIRST ", 8(30) \n"
2422 "lwz " TOP_SECOND ", 4(30) \n"
2423 "1:beq 0, 1b \n");
2424
2425 if (offset_p)
2426 *offset_p = 28;
2427 if (size_p)
2428 *size_p = 14;
2429 }
2430
2431 /* Goto if stack[--sp] != TOP */
2432
2433 static void
2434 ppc_emit_ne_goto (int *offset_p, int *size_p)
2435 {
2436 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2437 "lwz " TMP_SECOND ", 4(30) \n"
2438 "xor 4, 6, 4 \n"
2439 "xor 3, 5, 3 \n"
2440 "or. 3, 3, 4 \n"
2441 "lwzu " TOP_FIRST ", 8(30) \n"
2442 "lwz " TOP_SECOND ", 4(30) \n"
2443 "1:bne 0, 1b \n");
2444
2445 if (offset_p)
2446 *offset_p = 28;
2447 if (size_p)
2448 *size_p = 14;
2449 }
2450
2451 /* Goto if stack[--sp] < TOP */
2452
2453 static void
2454 ppc_emit_lt_goto (int *offset_p, int *size_p)
2455 {
2456 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2457 "lwz " TMP_SECOND ", 4(30) \n"
2458 "cmplw 6, 6, 4 \n"
2459 "cmpw 7, 5, 3 \n"
2460 /* CR6 bit 0 = low less and high equal */
2461 "crand 6*4+0, 6*4+0, 7*4+2\n"
2462 /* CR7 bit 0 = (low less and high equal) or high less */
2463 "cror 7*4+0, 7*4+0, 6*4+0\n"
2464 "lwzu " TOP_FIRST ", 8(30) \n"
2465 "lwz " TOP_SECOND ", 4(30)\n"
2466 "1:blt 7, 1b \n");
2467
2468 if (offset_p)
2469 *offset_p = 32;
2470 if (size_p)
2471 *size_p = 14;
2472 }
2473
2474 /* Goto if stack[--sp] <= TOP */
2475
2476 static void
2477 ppc_emit_le_goto (int *offset_p, int *size_p)
2478 {
2479 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2480 "lwz " TMP_SECOND ", 4(30) \n"
2481 "cmplw 6, 6, 4 \n"
2482 "cmpw 7, 5, 3 \n"
2483 /* CR6 bit 0 = low less/equal and high equal */
2484 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2485 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2486 "cror 7*4+0, 7*4+0, 6*4+0\n"
2487 "lwzu " TOP_FIRST ", 8(30) \n"
2488 "lwz " TOP_SECOND ", 4(30)\n"
2489 "1:blt 7, 1b \n");
2490
2491 if (offset_p)
2492 *offset_p = 32;
2493 if (size_p)
2494 *size_p = 14;
2495 }
2496
2497 /* Goto if stack[--sp] > TOP */
2498
2499 static void
2500 ppc_emit_gt_goto (int *offset_p, int *size_p)
2501 {
2502 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2503 "lwz " TMP_SECOND ", 4(30) \n"
2504 "cmplw 6, 6, 4 \n"
2505 "cmpw 7, 5, 3 \n"
2506 /* CR6 bit 0 = low greater and high equal */
2507 "crand 6*4+0, 6*4+1, 7*4+2\n"
2508 /* CR7 bit 0 = (low greater and high equal) or high greater */
2509 "cror 7*4+0, 7*4+1, 6*4+0\n"
2510 "lwzu " TOP_FIRST ", 8(30) \n"
2511 "lwz " TOP_SECOND ", 4(30)\n"
2512 "1:blt 7, 1b \n");
2513
2514 if (offset_p)
2515 *offset_p = 32;
2516 if (size_p)
2517 *size_p = 14;
2518 }
2519
2520 /* Goto if stack[--sp] >= TOP */
2521
2522 static void
2523 ppc_emit_ge_goto (int *offset_p, int *size_p)
2524 {
2525 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2526 "lwz " TMP_SECOND ", 4(30) \n"
2527 "cmplw 6, 6, 4 \n"
2528 "cmpw 7, 5, 3 \n"
2529 /* CR6 bit 0 = low ge and high equal */
2530 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2531 /* CR7 bit 0 = (low ge and high equal) or high greater */
2532 "cror 7*4+0, 7*4+1, 6*4+0\n"
2533 "lwzu " TOP_FIRST ", 8(30)\n"
2534 "lwz " TOP_SECOND ", 4(30)\n"
2535 "1:blt 7, 1b \n");
2536
2537 if (offset_p)
2538 *offset_p = 32;
2539 if (size_p)
2540 *size_p = 14;
2541 }
2542
2543 /* Relocate previous emitted branch instruction. FROM is the address
2544 of the branch instruction, TO is the goto target address, and SIZE
2545 if the value we set by *SIZE_P before. Currently, it is either
2546 24 or 14 of branch and conditional-branch instruction.
2547 Also used for ppc64. */
2548
2549 static void
2550 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2551 {
2552 long rel = to - from;
2553 uint32_t insn;
2554 int opcd;
2555
2556 read_inferior_memory (from, (unsigned char *) &insn, 4);
2557 opcd = (insn >> 26) & 0x3f;
2558
2559 switch (size)
2560 {
2561 case 14:
2562 if (opcd != 16
2563 || (rel >= (1 << 15) || rel < -(1 << 15)))
2564 emit_error = 1;
2565 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2566 break;
2567 case 24:
2568 if (opcd != 18
2569 || (rel >= (1 << 25) || rel < -(1 << 25)))
2570 emit_error = 1;
2571 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2572 break;
2573 default:
2574 emit_error = 1;
2575 }
2576
2577 if (!emit_error)
2578 target_write_memory (from, (unsigned char *) &insn, 4);
2579 }
2580
2581 /* Table of emit ops for 32-bit. */
2582
2583 static struct emit_ops ppc_emit_ops_impl =
2584 {
2585 ppc_emit_prologue,
2586 ppc_emit_epilogue,
2587 ppc_emit_add,
2588 ppc_emit_sub,
2589 ppc_emit_mul,
2590 ppc_emit_lsh,
2591 ppc_emit_rsh_signed,
2592 ppc_emit_rsh_unsigned,
2593 ppc_emit_ext,
2594 ppc_emit_log_not,
2595 ppc_emit_bit_and,
2596 ppc_emit_bit_or,
2597 ppc_emit_bit_xor,
2598 ppc_emit_bit_not,
2599 ppc_emit_equal,
2600 ppc_emit_less_signed,
2601 ppc_emit_less_unsigned,
2602 ppc_emit_ref,
2603 ppc_emit_if_goto,
2604 ppc_emit_goto,
2605 ppc_write_goto_address,
2606 ppc_emit_const,
2607 ppc_emit_call,
2608 ppc_emit_reg,
2609 ppc_emit_pop,
2610 ppc_emit_stack_flush,
2611 ppc_emit_zero_ext,
2612 ppc_emit_swap,
2613 ppc_emit_stack_adjust,
2614 ppc_emit_int_call_1,
2615 ppc_emit_void_call_2,
2616 ppc_emit_eq_goto,
2617 ppc_emit_ne_goto,
2618 ppc_emit_lt_goto,
2619 ppc_emit_le_goto,
2620 ppc_emit_gt_goto,
2621 ppc_emit_ge_goto
2622 };
2623
2624 #ifdef __powerpc64__
2625
2626 /*
2627
2628 Bytecode execution stack frame - 64-bit
2629
2630 | LR save area (SP + 16)
2631 | CR save area (SP + 8)
2632 SP' -> +- Back chain (SP + 0)
2633 | Save r31 for access saved arguments
2634 | Save r30 for bytecode stack pointer
2635 | Save r4 for incoming argument *value
2636 | Save r3 for incoming argument regs
2637 r30 -> +- Bytecode execution stack
2638 |
2639 | 64-byte (8 doublewords) at initial.
2640 | Expand stack as needed.
2641 |
2642 +-
2643 | Some padding for minimum stack frame.
2644 | 112 for ELFv1.
2645 SP +- Back-chain (SP')
2646
2647 initial frame size
2648 = 112 + (4 * 8) + 64
2649 = 208
2650
2651 r30 is the stack-pointer for bytecode machine.
2652 It should point to next-empty, so we can use LDU for pop.
2653 r3 is used for cache of TOP value.
2654 It was the first argument, pointer to regs.
2655 r4 is the second argument, pointer to the result.
2656 We should set *result = TOP after leaving this function.
2657
2658 Note:
2659 * To restore stack at epilogue
2660 => sp = r31
2661 * To check stack is big enough for bytecode execution.
2662 => r30 - 8 > SP + 112
2663 * To return execution result.
2664 => 0(r4) = TOP
2665
2666 */
2667
2668 /* Emit prologue in inferior memory. See above comments. */
2669
2670 static void
2671 ppc64v1_emit_prologue (void)
2672 {
2673 /* On ELFv1, function pointers really point to function descriptor,
2674 so emit one here. We don't care about contents of words 1 and 2,
2675 so let them just overlap out code. */
2676 uint64_t opd = current_insn_ptr + 8;
2677 uint32_t buf[2];
2678
2679 /* Mind the strict aliasing rules. */
2680 memcpy (buf, &opd, sizeof buf);
2681 emit_insns(buf, 2);
2682 EMIT_ASM (/* Save return address. */
2683 "mflr 0 \n"
2684 "std 0, 16(1) \n"
2685 /* Save r30 and incoming arguments. */
2686 "std 31, -8(1) \n"
2687 "std 30, -16(1) \n"
2688 "std 4, -24(1) \n"
2689 "std 3, -32(1) \n"
2690 /* Point r31 to current r1 for access arguments. */
2691 "mr 31, 1 \n"
2692 /* Adjust SP. 208 is the initial frame size. */
2693 "stdu 1, -208(1) \n"
2694 /* Set r30 to pointing stack-top. */
2695 "addi 30, 1, 168 \n"
2696 /* Initial r3/TOP to 0. */
2697 "li 3, 0 \n");
2698 }
2699
2700 /* Emit prologue in inferior memory. See above comments. */
2701
2702 static void
2703 ppc64v2_emit_prologue (void)
2704 {
2705 EMIT_ASM (/* Save return address. */
2706 "mflr 0 \n"
2707 "std 0, 16(1) \n"
2708 /* Save r30 and incoming arguments. */
2709 "std 31, -8(1) \n"
2710 "std 30, -16(1) \n"
2711 "std 4, -24(1) \n"
2712 "std 3, -32(1) \n"
2713 /* Point r31 to current r1 for access arguments. */
2714 "mr 31, 1 \n"
2715 /* Adjust SP. 208 is the initial frame size. */
2716 "stdu 1, -208(1) \n"
2717 /* Set r30 to pointing stack-top. */
2718 "addi 30, 1, 168 \n"
2719 /* Initial r3/TOP to 0. */
2720 "li 3, 0 \n");
2721 }
2722
2723 /* Emit epilogue in inferior memory. See above comments. */
2724
2725 static void
2726 ppc64_emit_epilogue (void)
2727 {
2728 EMIT_ASM (/* Restore SP. */
2729 "ld 1, 0(1) \n"
2730 /* *result = TOP */
2731 "ld 4, -24(1) \n"
2732 "std 3, 0(4) \n"
2733 /* Restore registers. */
2734 "ld 31, -8(1) \n"
2735 "ld 30, -16(1) \n"
2736 /* Restore LR. */
2737 "ld 0, 16(1) \n"
2738 /* Return 0 for no-error. */
2739 "li 3, 0 \n"
2740 "mtlr 0 \n"
2741 "blr \n");
2742 }
2743
2744 /* TOP = stack[--sp] + TOP */
2745
2746 static void
2747 ppc64_emit_add (void)
2748 {
2749 EMIT_ASM ("ldu 4, 8(30) \n"
2750 "add 3, 4, 3 \n");
2751 }
2752
2753 /* TOP = stack[--sp] - TOP */
2754
2755 static void
2756 ppc64_emit_sub (void)
2757 {
2758 EMIT_ASM ("ldu 4, 8(30) \n"
2759 "sub 3, 4, 3 \n");
2760 }
2761
2762 /* TOP = stack[--sp] * TOP */
2763
2764 static void
2765 ppc64_emit_mul (void)
2766 {
2767 EMIT_ASM ("ldu 4, 8(30) \n"
2768 "mulld 3, 4, 3 \n");
2769 }
2770
2771 /* TOP = stack[--sp] << TOP */
2772
2773 static void
2774 ppc64_emit_lsh (void)
2775 {
2776 EMIT_ASM ("ldu 4, 8(30) \n"
2777 "sld 3, 4, 3 \n");
2778 }
2779
2780 /* Top = stack[--sp] >> TOP
2781 (Arithmetic shift right) */
2782
2783 static void
2784 ppc64_emit_rsh_signed (void)
2785 {
2786 EMIT_ASM ("ldu 4, 8(30) \n"
2787 "srad 3, 4, 3 \n");
2788 }
2789
2790 /* Top = stack[--sp] >> TOP
2791 (Logical shift right) */
2792
2793 static void
2794 ppc64_emit_rsh_unsigned (void)
2795 {
2796 EMIT_ASM ("ldu 4, 8(30) \n"
2797 "srd 3, 4, 3 \n");
2798 }
2799
2800 /* Emit code for signed-extension specified by ARG. */
2801
2802 static void
2803 ppc64_emit_ext (int arg)
2804 {
2805 switch (arg)
2806 {
2807 case 8:
2808 EMIT_ASM ("extsb 3, 3");
2809 break;
2810 case 16:
2811 EMIT_ASM ("extsh 3, 3");
2812 break;
2813 case 32:
2814 EMIT_ASM ("extsw 3, 3");
2815 break;
2816 default:
2817 emit_error = 1;
2818 }
2819 }
2820
2821 /* Emit code for zero-extension specified by ARG. */
2822
2823 static void
2824 ppc64_emit_zero_ext (int arg)
2825 {
2826 switch (arg)
2827 {
2828 case 8:
2829 EMIT_ASM ("rldicl 3,3,0,56");
2830 break;
2831 case 16:
2832 EMIT_ASM ("rldicl 3,3,0,48");
2833 break;
2834 case 32:
2835 EMIT_ASM ("rldicl 3,3,0,32");
2836 break;
2837 default:
2838 emit_error = 1;
2839 }
2840 }
2841
2842 /* TOP = !TOP
2843 i.e., TOP = (TOP == 0) ? 1 : 0; */
2844
2845 static void
2846 ppc64_emit_log_not (void)
2847 {
2848 EMIT_ASM ("cntlzd 3, 3 \n"
2849 "srdi 3, 3, 6 \n");
2850 }
2851
2852 /* TOP = stack[--sp] & TOP */
2853
2854 static void
2855 ppc64_emit_bit_and (void)
2856 {
2857 EMIT_ASM ("ldu 4, 8(30) \n"
2858 "and 3, 4, 3 \n");
2859 }
2860
2861 /* TOP = stack[--sp] | TOP */
2862
2863 static void
2864 ppc64_emit_bit_or (void)
2865 {
2866 EMIT_ASM ("ldu 4, 8(30) \n"
2867 "or 3, 4, 3 \n");
2868 }
2869
2870 /* TOP = stack[--sp] ^ TOP */
2871
2872 static void
2873 ppc64_emit_bit_xor (void)
2874 {
2875 EMIT_ASM ("ldu 4, 8(30) \n"
2876 "xor 3, 4, 3 \n");
2877 }
2878
2879 /* TOP = ~TOP
2880 i.e., TOP = ~(TOP | TOP) */
2881
2882 static void
2883 ppc64_emit_bit_not (void)
2884 {
2885 EMIT_ASM ("nor 3, 3, 3 \n");
2886 }
2887
2888 /* TOP = stack[--sp] == TOP */
2889
2890 static void
2891 ppc64_emit_equal (void)
2892 {
2893 EMIT_ASM ("ldu 4, 8(30) \n"
2894 "xor 3, 3, 4 \n"
2895 "cntlzd 3, 3 \n"
2896 "srdi 3, 3, 6 \n");
2897 }
2898
2899 /* TOP = stack[--sp] < TOP
2900 (Signed comparison) */
2901
2902 static void
2903 ppc64_emit_less_signed (void)
2904 {
2905 EMIT_ASM ("ldu 4, 8(30) \n"
2906 "cmpd 7, 4, 3 \n"
2907 "mfcr 3 \n"
2908 "rlwinm 3, 3, 29, 31, 31 \n");
2909 }
2910
2911 /* TOP = stack[--sp] < TOP
2912 (Unsigned comparison) */
2913
2914 static void
2915 ppc64_emit_less_unsigned (void)
2916 {
2917 EMIT_ASM ("ldu 4, 8(30) \n"
2918 "cmpld 7, 4, 3 \n"
2919 "mfcr 3 \n"
2920 "rlwinm 3, 3, 29, 31, 31 \n");
2921 }
2922
2923 /* Access the memory address in TOP in size of SIZE.
2924 Zero-extend the read value. */
2925
2926 static void
2927 ppc64_emit_ref (int size)
2928 {
2929 switch (size)
2930 {
2931 case 1:
2932 EMIT_ASM ("lbz 3, 0(3)");
2933 break;
2934 case 2:
2935 EMIT_ASM ("lhz 3, 0(3)");
2936 break;
2937 case 4:
2938 EMIT_ASM ("lwz 3, 0(3)");
2939 break;
2940 case 8:
2941 EMIT_ASM ("ld 3, 0(3)");
2942 break;
2943 }
2944 }
2945
2946 /* TOP = NUM */
2947
2948 static void
2949 ppc64_emit_const (LONGEST num)
2950 {
2951 uint32_t buf[5];
2952 uint32_t *p = buf;
2953
2954 p += gen_limm (p, 3, num, 1);
2955
2956 emit_insns (buf, p - buf);
2957 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2958 }
2959
2960 /* Set TOP to the value of register REG by calling get_raw_reg function
2961 with two argument, collected buffer and register number. */
2962
2963 static void
2964 ppc64v1_emit_reg (int reg)
2965 {
2966 uint32_t buf[15];
2967 uint32_t *p = buf;
2968
2969 /* fctx->regs is passed in r3 and then saved in 176(1). */
2970 p += GEN_LD (p, 3, 31, -32);
2971 p += GEN_LI (p, 4, reg);
2972 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2973 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2974 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2975
2976 emit_insns (buf, p - buf);
2977 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2978 }
2979
2980 /* Likewise, for ELFv2. */
2981
2982 static void
2983 ppc64v2_emit_reg (int reg)
2984 {
2985 uint32_t buf[12];
2986 uint32_t *p = buf;
2987
2988 /* fctx->regs is passed in r3 and then saved in 176(1). */
2989 p += GEN_LD (p, 3, 31, -32);
2990 p += GEN_LI (p, 4, reg);
2991 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2992 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2993 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2994
2995 emit_insns (buf, p - buf);
2996 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2997 }
2998
2999 /* TOP = stack[--sp] */
3000
3001 static void
3002 ppc64_emit_pop (void)
3003 {
3004 EMIT_ASM ("ldu 3, 8(30)");
3005 }
3006
3007 /* stack[sp++] = TOP
3008
3009 Because we may use up bytecode stack, expand 8 doublewords more
3010 if needed. */
3011
3012 static void
3013 ppc64_emit_stack_flush (void)
3014 {
3015 /* Make sure bytecode stack is big enough before push.
3016 Otherwise, expand 64-byte more. */
3017
3018 EMIT_ASM (" std 3, 0(30) \n"
3019 " addi 4, 30, -(112 + 8) \n"
3020 " cmpd 7, 4, 1 \n"
3021 " bgt 7, 1f \n"
3022 " stdu 31, -64(1) \n"
3023 "1:addi 30, 30, -8 \n");
3024 }
3025
3026 /* Swap TOP and stack[sp-1] */
3027
3028 static void
3029 ppc64_emit_swap (void)
3030 {
3031 EMIT_ASM ("ld 4, 8(30) \n"
3032 "std 3, 8(30) \n"
3033 "mr 3, 4 \n");
3034 }
3035
3036 /* Call function FN - ELFv1. */
3037
3038 static void
3039 ppc64v1_emit_call (CORE_ADDR fn)
3040 {
3041 uint32_t buf[13];
3042 uint32_t *p = buf;
3043
3044 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3045 p += gen_call (p, fn, 1, 1);
3046 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3047
3048 emit_insns (buf, p - buf);
3049 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3050 }
3051
3052 /* Call function FN - ELFv2. */
3053
3054 static void
3055 ppc64v2_emit_call (CORE_ADDR fn)
3056 {
3057 uint32_t buf[10];
3058 uint32_t *p = buf;
3059
3060 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3061 p += gen_call (p, fn, 1, 0);
3062 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3063
3064 emit_insns (buf, p - buf);
3065 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3066 }
3067
3068 /* FN's prototype is `LONGEST(*fn)(int)'.
3069 TOP = fn (arg1)
3070 */
3071
3072 static void
3073 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3074 {
3075 uint32_t buf[13];
3076 uint32_t *p = buf;
3077
3078 /* Setup argument. arg1 is a 16-bit value. */
3079 p += gen_limm (p, 3, arg1, 1);
3080 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3081 p += gen_call (p, fn, 1, 1);
3082 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3083
3084 emit_insns (buf, p - buf);
3085 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3086 }
3087
3088 /* Likewise for ELFv2. */
3089
3090 static void
3091 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3092 {
3093 uint32_t buf[10];
3094 uint32_t *p = buf;
3095
3096 /* Setup argument. arg1 is a 16-bit value. */
3097 p += gen_limm (p, 3, arg1, 1);
3098 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3099 p += gen_call (p, fn, 1, 0);
3100 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3101
3102 emit_insns (buf, p - buf);
3103 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3104 }
3105
3106 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3107 fn (arg1, TOP)
3108
3109 TOP should be preserved/restored before/after the call. */
3110
3111 static void
3112 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3113 {
3114 uint32_t buf[17];
3115 uint32_t *p = buf;
3116
3117 /* Save TOP. 0(30) is next-empty. */
3118 p += GEN_STD (p, 3, 30, 0);
3119
3120 /* Setup argument. arg1 is a 16-bit value. */
3121 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3122 p += gen_limm (p, 3, arg1, 1);
3123 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3124 p += gen_call (p, fn, 1, 1);
3125 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3126
3127 /* Restore TOP */
3128 p += GEN_LD (p, 3, 30, 0);
3129
3130 emit_insns (buf, p - buf);
3131 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3132 }
3133
3134 /* Likewise for ELFv2. */
3135
3136 static void
3137 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3138 {
3139 uint32_t buf[14];
3140 uint32_t *p = buf;
3141
3142 /* Save TOP. 0(30) is next-empty. */
3143 p += GEN_STD (p, 3, 30, 0);
3144
3145 /* Setup argument. arg1 is a 16-bit value. */
3146 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3147 p += gen_limm (p, 3, arg1, 1);
3148 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3149 p += gen_call (p, fn, 1, 0);
3150 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3151
3152 /* Restore TOP */
3153 p += GEN_LD (p, 3, 30, 0);
3154
3155 emit_insns (buf, p - buf);
3156 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3157 }
3158
3159 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3160
3161 static void
3162 ppc64_emit_if_goto (int *offset_p, int *size_p)
3163 {
3164 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3165 "ldu 3, 8(30) \n"
3166 "1:bne 7, 1b \n");
3167
3168 if (offset_p)
3169 *offset_p = 8;
3170 if (size_p)
3171 *size_p = 14;
3172 }
3173
3174 /* Goto if stack[--sp] == TOP */
3175
3176 static void
3177 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3178 {
3179 EMIT_ASM ("ldu 4, 8(30) \n"
3180 "cmpd 7, 4, 3 \n"
3181 "ldu 3, 8(30) \n"
3182 "1:beq 7, 1b \n");
3183
3184 if (offset_p)
3185 *offset_p = 12;
3186 if (size_p)
3187 *size_p = 14;
3188 }
3189
3190 /* Goto if stack[--sp] != TOP */
3191
3192 static void
3193 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3194 {
3195 EMIT_ASM ("ldu 4, 8(30) \n"
3196 "cmpd 7, 4, 3 \n"
3197 "ldu 3, 8(30) \n"
3198 "1:bne 7, 1b \n");
3199
3200 if (offset_p)
3201 *offset_p = 12;
3202 if (size_p)
3203 *size_p = 14;
3204 }
3205
3206 /* Goto if stack[--sp] < TOP */
3207
3208 static void
3209 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3210 {
3211 EMIT_ASM ("ldu 4, 8(30) \n"
3212 "cmpd 7, 4, 3 \n"
3213 "ldu 3, 8(30) \n"
3214 "1:blt 7, 1b \n");
3215
3216 if (offset_p)
3217 *offset_p = 12;
3218 if (size_p)
3219 *size_p = 14;
3220 }
3221
3222 /* Goto if stack[--sp] <= TOP */
3223
3224 static void
3225 ppc64_emit_le_goto (int *offset_p, int *size_p)
3226 {
3227 EMIT_ASM ("ldu 4, 8(30) \n"
3228 "cmpd 7, 4, 3 \n"
3229 "ldu 3, 8(30) \n"
3230 "1:ble 7, 1b \n");
3231
3232 if (offset_p)
3233 *offset_p = 12;
3234 if (size_p)
3235 *size_p = 14;
3236 }
3237
3238 /* Goto if stack[--sp] > TOP */
3239
3240 static void
3241 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3242 {
3243 EMIT_ASM ("ldu 4, 8(30) \n"
3244 "cmpd 7, 4, 3 \n"
3245 "ldu 3, 8(30) \n"
3246 "1:bgt 7, 1b \n");
3247
3248 if (offset_p)
3249 *offset_p = 12;
3250 if (size_p)
3251 *size_p = 14;
3252 }
3253
3254 /* Goto if stack[--sp] >= TOP */
3255
3256 static void
3257 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3258 {
3259 EMIT_ASM ("ldu 4, 8(30) \n"
3260 "cmpd 7, 4, 3 \n"
3261 "ldu 3, 8(30) \n"
3262 "1:bge 7, 1b \n");
3263
3264 if (offset_p)
3265 *offset_p = 12;
3266 if (size_p)
3267 *size_p = 14;
3268 }
3269
3270 /* Table of emit ops for 64-bit ELFv1. */
3271
3272 static struct emit_ops ppc64v1_emit_ops_impl =
3273 {
3274 ppc64v1_emit_prologue,
3275 ppc64_emit_epilogue,
3276 ppc64_emit_add,
3277 ppc64_emit_sub,
3278 ppc64_emit_mul,
3279 ppc64_emit_lsh,
3280 ppc64_emit_rsh_signed,
3281 ppc64_emit_rsh_unsigned,
3282 ppc64_emit_ext,
3283 ppc64_emit_log_not,
3284 ppc64_emit_bit_and,
3285 ppc64_emit_bit_or,
3286 ppc64_emit_bit_xor,
3287 ppc64_emit_bit_not,
3288 ppc64_emit_equal,
3289 ppc64_emit_less_signed,
3290 ppc64_emit_less_unsigned,
3291 ppc64_emit_ref,
3292 ppc64_emit_if_goto,
3293 ppc_emit_goto,
3294 ppc_write_goto_address,
3295 ppc64_emit_const,
3296 ppc64v1_emit_call,
3297 ppc64v1_emit_reg,
3298 ppc64_emit_pop,
3299 ppc64_emit_stack_flush,
3300 ppc64_emit_zero_ext,
3301 ppc64_emit_swap,
3302 ppc_emit_stack_adjust,
3303 ppc64v1_emit_int_call_1,
3304 ppc64v1_emit_void_call_2,
3305 ppc64_emit_eq_goto,
3306 ppc64_emit_ne_goto,
3307 ppc64_emit_lt_goto,
3308 ppc64_emit_le_goto,
3309 ppc64_emit_gt_goto,
3310 ppc64_emit_ge_goto
3311 };
3312
3313 /* Table of emit ops for 64-bit ELFv2. */
3314
3315 static struct emit_ops ppc64v2_emit_ops_impl =
3316 {
3317 ppc64v2_emit_prologue,
3318 ppc64_emit_epilogue,
3319 ppc64_emit_add,
3320 ppc64_emit_sub,
3321 ppc64_emit_mul,
3322 ppc64_emit_lsh,
3323 ppc64_emit_rsh_signed,
3324 ppc64_emit_rsh_unsigned,
3325 ppc64_emit_ext,
3326 ppc64_emit_log_not,
3327 ppc64_emit_bit_and,
3328 ppc64_emit_bit_or,
3329 ppc64_emit_bit_xor,
3330 ppc64_emit_bit_not,
3331 ppc64_emit_equal,
3332 ppc64_emit_less_signed,
3333 ppc64_emit_less_unsigned,
3334 ppc64_emit_ref,
3335 ppc64_emit_if_goto,
3336 ppc_emit_goto,
3337 ppc_write_goto_address,
3338 ppc64_emit_const,
3339 ppc64v2_emit_call,
3340 ppc64v2_emit_reg,
3341 ppc64_emit_pop,
3342 ppc64_emit_stack_flush,
3343 ppc64_emit_zero_ext,
3344 ppc64_emit_swap,
3345 ppc_emit_stack_adjust,
3346 ppc64v2_emit_int_call_1,
3347 ppc64v2_emit_void_call_2,
3348 ppc64_emit_eq_goto,
3349 ppc64_emit_ne_goto,
3350 ppc64_emit_lt_goto,
3351 ppc64_emit_le_goto,
3352 ppc64_emit_gt_goto,
3353 ppc64_emit_ge_goto
3354 };
3355
3356 #endif
3357
3358 /* Implementation of linux_target_ops method "emit_ops". */
3359
3360 static struct emit_ops *
3361 ppc_emit_ops (void)
3362 {
3363 #ifdef __powerpc64__
3364 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3365
3366 if (register_size (regcache->tdesc, 0) == 8)
3367 {
3368 if (is_elfv2_inferior ())
3369 return &ppc64v2_emit_ops_impl;
3370 else
3371 return &ppc64v1_emit_ops_impl;
3372 }
3373 #endif
3374 return &ppc_emit_ops_impl;
3375 }
3376
3377 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3378
3379 static int
3380 ppc_get_ipa_tdesc_idx (void)
3381 {
3382 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3383 const struct target_desc *tdesc = regcache->tdesc;
3384
3385 #ifdef __powerpc64__
3386 if (tdesc == tdesc_powerpc_64l)
3387 return PPC_TDESC_BASE;
3388 if (tdesc == tdesc_powerpc_altivec64l)
3389 return PPC_TDESC_ALTIVEC;
3390 if (tdesc == tdesc_powerpc_vsx64l)
3391 return PPC_TDESC_VSX;
3392 if (tdesc == tdesc_powerpc_isa205_64l)
3393 return PPC_TDESC_ISA205;
3394 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3395 return PPC_TDESC_ISA205_ALTIVEC;
3396 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3397 return PPC_TDESC_ISA205_VSX;
3398 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3399 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3400 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3401 return PPC_TDESC_ISA207_VSX;
3402 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3403 return PPC_TDESC_ISA207_HTM_VSX;
3404 #endif
3405
3406 if (tdesc == tdesc_powerpc_32l)
3407 return PPC_TDESC_BASE;
3408 if (tdesc == tdesc_powerpc_altivec32l)
3409 return PPC_TDESC_ALTIVEC;
3410 if (tdesc == tdesc_powerpc_vsx32l)
3411 return PPC_TDESC_VSX;
3412 if (tdesc == tdesc_powerpc_isa205_32l)
3413 return PPC_TDESC_ISA205;
3414 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3415 return PPC_TDESC_ISA205_ALTIVEC;
3416 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3417 return PPC_TDESC_ISA205_VSX;
3418 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3419 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3420 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3421 return PPC_TDESC_ISA207_VSX;
3422 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3423 return PPC_TDESC_ISA207_HTM_VSX;
3424 if (tdesc == tdesc_powerpc_e500l)
3425 return PPC_TDESC_E500;
3426
3427 return 0;
3428 }
3429
3430 struct linux_target_ops the_low_target = {
3431 NULL, /* new_process */
3432 NULL, /* delete_process */
3433 NULL, /* new_thread */
3434 NULL, /* delete_thread */
3435 NULL, /* new_fork */
3436 NULL, /* prepare_to_resume */
3437 NULL, /* process_qsupported */
3438 ppc_supports_tracepoints,
3439 ppc_get_thread_area,
3440 ppc_install_fast_tracepoint_jump_pad,
3441 ppc_emit_ops,
3442 ppc_get_min_fast_tracepoint_insn_len,
3443 NULL, /* supports_range_stepping */
3444 ppc_supports_hardware_single_step,
3445 NULL, /* get_syscall_trapinfo */
3446 ppc_get_ipa_tdesc_idx,
3447 };
3448
3449 /* The linux target ops object. */
3450
3451 linux_process_target *the_linux_target = &the_ppc_target;
3452
3453 void
3454 initialize_low_arch (void)
3455 {
3456 /* Initialize the Linux target descriptions. */
3457
3458 init_registers_powerpc_32l ();
3459 init_registers_powerpc_altivec32l ();
3460 init_registers_powerpc_vsx32l ();
3461 init_registers_powerpc_isa205_32l ();
3462 init_registers_powerpc_isa205_altivec32l ();
3463 init_registers_powerpc_isa205_vsx32l ();
3464 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3465 init_registers_powerpc_isa207_vsx32l ();
3466 init_registers_powerpc_isa207_htm_vsx32l ();
3467 init_registers_powerpc_e500l ();
3468 #if __powerpc64__
3469 init_registers_powerpc_64l ();
3470 init_registers_powerpc_altivec64l ();
3471 init_registers_powerpc_vsx64l ();
3472 init_registers_powerpc_isa205_64l ();
3473 init_registers_powerpc_isa205_altivec64l ();
3474 init_registers_powerpc_isa205_vsx64l ();
3475 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3476 init_registers_powerpc_isa207_vsx64l ();
3477 init_registers_powerpc_isa207_htm_vsx64l ();
3478 #endif
3479
3480 initialize_regsets_info (&ppc_regsets_info);
3481 }
This page took 0.17242 seconds and 5 git commands to generate.