gdbserver/linux-low: turn 'supports_z_point_type' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 bool supports_z_point_type (char z_type) override;
58
59 protected:
60
61 void low_arch_setup () override;
62
63 bool low_cannot_fetch_register (int regno) override;
64
65 bool low_cannot_store_register (int regno) override;
66
67 bool low_supports_breakpoints () override;
68
69 CORE_ADDR low_get_pc (regcache *regcache) override;
70
71 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
72
73 bool low_breakpoint_at (CORE_ADDR pc) override;
74 };
75
76 /* The singleton target ops object. */
77
78 static ppc_target the_ppc_target;
79
80 /* Holds the AT_HWCAP auxv entry. */
81
82 static unsigned long ppc_hwcap;
83
84 /* Holds the AT_HWCAP2 auxv entry. */
85
86 static unsigned long ppc_hwcap2;
87
88
89 #define ppc_num_regs 73
90
91 #ifdef __powerpc64__
92 /* We use a constant for FPSCR instead of PT_FPSCR, because
93 many shipped PPC64 kernels had the wrong value in ptrace.h. */
94 static int ppc_regmap[] =
95 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
96 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
97 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
98 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
99 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
100 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
101 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
102 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
103 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
104 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
105 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
106 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
107 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
108 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
109 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
110 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
111 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
112 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
113 PT_ORIG_R3 * 8, PT_TRAP * 8 };
114 #else
115 /* Currently, don't check/send MQ. */
116 static int ppc_regmap[] =
117 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
118 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
119 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
120 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
121 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
122 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
123 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
124 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
125 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
126 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
127 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
128 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
129 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
130 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
131 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
132 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
133 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
134 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
135 PT_ORIG_R3 * 4, PT_TRAP * 4
136 };
137
138 static int ppc_regmap_e500[] =
139 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
140 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
141 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
142 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
143 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
144 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
145 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
146 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
147 -1, -1, -1, -1,
148 -1, -1, -1, -1,
149 -1, -1, -1, -1,
150 -1, -1, -1, -1,
151 -1, -1, -1, -1,
152 -1, -1, -1, -1,
153 -1, -1, -1, -1,
154 -1, -1, -1, -1,
155 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
156 PT_CTR * 4, PT_XER * 4, -1,
157 PT_ORIG_R3 * 4, PT_TRAP * 4
158 };
159 #endif
160
161 /* Check whether the kernel provides a register set with number
162 REGSET_ID of size REGSETSIZE for process/thread TID. */
163
164 static int
165 ppc_check_regset (int tid, int regset_id, int regsetsize)
166 {
167 void *buf = alloca (regsetsize);
168 struct iovec iov;
169
170 iov.iov_base = buf;
171 iov.iov_len = regsetsize;
172
173 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
174 || errno == ENODATA)
175 return 1;
176 return 0;
177 }
178
179 bool
180 ppc_target::low_cannot_store_register (int regno)
181 {
182 const struct target_desc *tdesc = current_process ()->tdesc;
183
184 #ifndef __powerpc64__
185 /* Some kernels do not allow us to store fpscr. */
186 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
187 && regno == find_regno (tdesc, "fpscr"))
188 return true;
189 #endif
190
191 /* Some kernels do not allow us to store orig_r3 or trap. */
192 if (regno == find_regno (tdesc, "orig_r3")
193 || regno == find_regno (tdesc, "trap"))
194 return true;
195
196 return false;
197 }
198
199 bool
200 ppc_target::low_cannot_fetch_register (int regno)
201 {
202 return false;
203 }
204
205 static void
206 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
207 {
208 memset (buf, 0, sizeof (long));
209
210 if (__BYTE_ORDER == __LITTLE_ENDIAN)
211 {
212 /* Little-endian values always sit at the left end of the buffer. */
213 collect_register (regcache, regno, buf);
214 }
215 else if (__BYTE_ORDER == __BIG_ENDIAN)
216 {
217 /* Big-endian values sit at the right end of the buffer. In case of
218 registers whose sizes are smaller than sizeof (long), we must use a
219 padding to access them correctly. */
220 int size = register_size (regcache->tdesc, regno);
221
222 if (size < sizeof (long))
223 collect_register (regcache, regno, buf + sizeof (long) - size);
224 else
225 collect_register (regcache, regno, buf);
226 }
227 else
228 perror_with_name ("Unexpected byte order");
229 }
230
231 static void
232 ppc_supply_ptrace_register (struct regcache *regcache,
233 int regno, const char *buf)
234 {
235 if (__BYTE_ORDER == __LITTLE_ENDIAN)
236 {
237 /* Little-endian values always sit at the left end of the buffer. */
238 supply_register (regcache, regno, buf);
239 }
240 else if (__BYTE_ORDER == __BIG_ENDIAN)
241 {
242 /* Big-endian values sit at the right end of the buffer. In case of
243 registers whose sizes are smaller than sizeof (long), we must use a
244 padding to access them correctly. */
245 int size = register_size (regcache->tdesc, regno);
246
247 if (size < sizeof (long))
248 supply_register (regcache, regno, buf + sizeof (long) - size);
249 else
250 supply_register (regcache, regno, buf);
251 }
252 else
253 perror_with_name ("Unexpected byte order");
254 }
255
256 bool
257 ppc_target::low_supports_breakpoints ()
258 {
259 return true;
260 }
261
262 CORE_ADDR
263 ppc_target::low_get_pc (regcache *regcache)
264 {
265 if (register_size (regcache->tdesc, 0) == 4)
266 {
267 unsigned int pc;
268 collect_register_by_name (regcache, "pc", &pc);
269 return (CORE_ADDR) pc;
270 }
271 else
272 {
273 unsigned long pc;
274 collect_register_by_name (regcache, "pc", &pc);
275 return (CORE_ADDR) pc;
276 }
277 }
278
279 void
280 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
281 {
282 if (register_size (regcache->tdesc, 0) == 4)
283 {
284 unsigned int newpc = pc;
285 supply_register_by_name (regcache, "pc", &newpc);
286 }
287 else
288 {
289 unsigned long newpc = pc;
290 supply_register_by_name (regcache, "pc", &newpc);
291 }
292 }
293
294 #ifndef __powerpc64__
295 static int ppc_regmap_adjusted;
296 #endif
297
298
299 /* Correct in either endianness.
300 This instruction is "twge r2, r2", which GDB uses as a software
301 breakpoint. */
302 static const unsigned int ppc_breakpoint = 0x7d821008;
303 #define ppc_breakpoint_len 4
304
305 /* Implementation of target ops method "sw_breakpoint_from_kind". */
306
307 const gdb_byte *
308 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
309 {
310 *size = ppc_breakpoint_len;
311 return (const gdb_byte *) &ppc_breakpoint;
312 }
313
314 bool
315 ppc_target::low_breakpoint_at (CORE_ADDR where)
316 {
317 unsigned int insn;
318
319 read_memory (where, (unsigned char *) &insn, 4);
320 if (insn == ppc_breakpoint)
321 return true;
322 /* If necessary, recognize more trap instructions here. GDB only uses
323 the one. */
324
325 return false;
326 }
327
328 /* Implement supports_z_point_type target-ops.
329 Returns true if type Z_TYPE breakpoint is supported.
330
331 Handling software breakpoint at server side, so tracepoints
332 and breakpoints can be inserted at the same location. */
333
334 bool
335 ppc_target::supports_z_point_type (char z_type)
336 {
337 switch (z_type)
338 {
339 case Z_PACKET_SW_BP:
340 return true;
341 case Z_PACKET_HW_BP:
342 case Z_PACKET_WRITE_WP:
343 case Z_PACKET_ACCESS_WP:
344 default:
345 return false;
346 }
347 }
348
349 /* Implement insert_point target-ops.
350 Returns 0 on success, -1 on failure and 1 on unsupported. */
351
352 static int
353 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
354 int size, struct raw_breakpoint *bp)
355 {
356 switch (type)
357 {
358 case raw_bkpt_type_sw:
359 return insert_memory_breakpoint (bp);
360
361 case raw_bkpt_type_hw:
362 case raw_bkpt_type_write_wp:
363 case raw_bkpt_type_access_wp:
364 default:
365 /* Unsupported. */
366 return 1;
367 }
368 }
369
370 /* Implement remove_point target-ops.
371 Returns 0 on success, -1 on failure and 1 on unsupported. */
372
373 static int
374 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
375 int size, struct raw_breakpoint *bp)
376 {
377 switch (type)
378 {
379 case raw_bkpt_type_sw:
380 return remove_memory_breakpoint (bp);
381
382 case raw_bkpt_type_hw:
383 case raw_bkpt_type_write_wp:
384 case raw_bkpt_type_access_wp:
385 default:
386 /* Unsupported. */
387 return 1;
388 }
389 }
390
391 /* Provide only a fill function for the general register set. ps_lgetregs
392 will use this for NPTL support. */
393
394 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
395 {
396 int i;
397
398 for (i = 0; i < 32; i++)
399 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
400
401 for (i = 64; i < 70; i++)
402 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
403
404 for (i = 71; i < 73; i++)
405 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
406 }
407
408 /* Program Priority Register regset fill function. */
409
410 static void
411 ppc_fill_pprregset (struct regcache *regcache, void *buf)
412 {
413 char *ppr = (char *) buf;
414
415 collect_register_by_name (regcache, "ppr", ppr);
416 }
417
418 /* Program Priority Register regset store function. */
419
420 static void
421 ppc_store_pprregset (struct regcache *regcache, const void *buf)
422 {
423 const char *ppr = (const char *) buf;
424
425 supply_register_by_name (regcache, "ppr", ppr);
426 }
427
428 /* Data Stream Control Register regset fill function. */
429
430 static void
431 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
432 {
433 char *dscr = (char *) buf;
434
435 collect_register_by_name (regcache, "dscr", dscr);
436 }
437
438 /* Data Stream Control Register regset store function. */
439
440 static void
441 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
442 {
443 const char *dscr = (const char *) buf;
444
445 supply_register_by_name (regcache, "dscr", dscr);
446 }
447
448 /* Target Address Register regset fill function. */
449
450 static void
451 ppc_fill_tarregset (struct regcache *regcache, void *buf)
452 {
453 char *tar = (char *) buf;
454
455 collect_register_by_name (regcache, "tar", tar);
456 }
457
458 /* Target Address Register regset store function. */
459
460 static void
461 ppc_store_tarregset (struct regcache *regcache, const void *buf)
462 {
463 const char *tar = (const char *) buf;
464
465 supply_register_by_name (regcache, "tar", tar);
466 }
467
468 /* Event-Based Branching regset store function. Unless the inferior
469 has a perf event open, ptrace can return in error when reading and
470 writing to the regset, with ENODATA. For reading, the registers
471 will correctly show as unavailable. For writing, gdbserver
472 currently only caches any register writes from P and G packets and
473 the stub always tries to write all the regsets when resuming the
474 inferior, which would result in frequent warnings. For this
475 reason, we don't define a fill function. This also means that the
476 client-side regcache will be dirty if the user tries to write to
477 the EBB registers. G packets that the client sends to write to
478 unrelated registers will also include data for EBB registers, even
479 if they are unavailable. */
480
481 static void
482 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
483 {
484 const char *regset = (const char *) buf;
485
486 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
487 .dat file is BESCR, EBBHR, EBBRR. */
488 supply_register_by_name (regcache, "ebbrr", &regset[0]);
489 supply_register_by_name (regcache, "ebbhr", &regset[8]);
490 supply_register_by_name (regcache, "bescr", &regset[16]);
491 }
492
493 /* Performance Monitoring Unit regset fill function. */
494
495 static void
496 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
497 {
498 char *regset = (char *) buf;
499
500 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
501 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
502 collect_register_by_name (regcache, "siar", &regset[0]);
503 collect_register_by_name (regcache, "sdar", &regset[8]);
504 collect_register_by_name (regcache, "sier", &regset[16]);
505 collect_register_by_name (regcache, "mmcr2", &regset[24]);
506 collect_register_by_name (regcache, "mmcr0", &regset[32]);
507 }
508
509 /* Performance Monitoring Unit regset store function. */
510
511 static void
512 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
513 {
514 const char *regset = (const char *) buf;
515
516 supply_register_by_name (regcache, "siar", &regset[0]);
517 supply_register_by_name (regcache, "sdar", &regset[8]);
518 supply_register_by_name (regcache, "sier", &regset[16]);
519 supply_register_by_name (regcache, "mmcr2", &regset[24]);
520 supply_register_by_name (regcache, "mmcr0", &regset[32]);
521 }
522
523 /* Hardware Transactional Memory special-purpose register regset fill
524 function. */
525
526 static void
527 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
528 {
529 int i, base;
530 char *regset = (char *) buf;
531
532 base = find_regno (regcache->tdesc, "tfhar");
533 for (i = 0; i < 3; i++)
534 collect_register (regcache, base + i, &regset[i * 8]);
535 }
536
537 /* Hardware Transactional Memory special-purpose register regset store
538 function. */
539
540 static void
541 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
542 {
543 int i, base;
544 const char *regset = (const char *) buf;
545
546 base = find_regno (regcache->tdesc, "tfhar");
547 for (i = 0; i < 3; i++)
548 supply_register (regcache, base + i, &regset[i * 8]);
549 }
550
551 /* For the same reasons as the EBB regset, none of the HTM
552 checkpointed regsets have a fill function. These registers are
553 only available if the inferior is in a transaction. */
554
555 /* Hardware Transactional Memory checkpointed general-purpose regset
556 store function. */
557
558 static void
559 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
560 {
561 int i, base, size, endian_offset;
562 const char *regset = (const char *) buf;
563
564 base = find_regno (regcache->tdesc, "cr0");
565 size = register_size (regcache->tdesc, base);
566
567 gdb_assert (size == 4 || size == 8);
568
569 for (i = 0; i < 32; i++)
570 supply_register (regcache, base + i, &regset[i * size]);
571
572 endian_offset = 0;
573
574 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
575 endian_offset = 4;
576
577 supply_register_by_name (regcache, "ccr",
578 &regset[PT_CCR * size + endian_offset]);
579
580 supply_register_by_name (regcache, "cxer",
581 &regset[PT_XER * size + endian_offset]);
582
583 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
584 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
585 }
586
587 /* Hardware Transactional Memory checkpointed floating-point regset
588 store function. */
589
590 static void
591 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
592 {
593 int i, base;
594 const char *regset = (const char *) buf;
595
596 base = find_regno (regcache->tdesc, "cf0");
597
598 for (i = 0; i < 32; i++)
599 supply_register (regcache, base + i, &regset[i * 8]);
600
601 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
602 }
603
604 /* Hardware Transactional Memory checkpointed vector regset store
605 function. */
606
607 static void
608 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
609 {
610 int i, base;
611 const char *regset = (const char *) buf;
612 int vscr_offset = 0;
613
614 base = find_regno (regcache->tdesc, "cvr0");
615
616 for (i = 0; i < 32; i++)
617 supply_register (regcache, base + i, &regset[i * 16]);
618
619 if (__BYTE_ORDER == __BIG_ENDIAN)
620 vscr_offset = 12;
621
622 supply_register_by_name (regcache, "cvscr",
623 &regset[32 * 16 + vscr_offset]);
624
625 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
626 }
627
628 /* Hardware Transactional Memory checkpointed vector-scalar regset
629 store function. */
630
631 static void
632 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
633 {
634 int i, base;
635 const char *regset = (const char *) buf;
636
637 base = find_regno (regcache->tdesc, "cvs0h");
638 for (i = 0; i < 32; i++)
639 supply_register (regcache, base + i, &regset[i * 8]);
640 }
641
642 /* Hardware Transactional Memory checkpointed Program Priority
643 Register regset store function. */
644
645 static void
646 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
647 {
648 const char *cppr = (const char *) buf;
649
650 supply_register_by_name (regcache, "cppr", cppr);
651 }
652
653 /* Hardware Transactional Memory checkpointed Data Stream Control
654 Register regset store function. */
655
656 static void
657 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
658 {
659 const char *cdscr = (const char *) buf;
660
661 supply_register_by_name (regcache, "cdscr", cdscr);
662 }
663
664 /* Hardware Transactional Memory checkpointed Target Address Register
665 regset store function. */
666
667 static void
668 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
669 {
670 const char *ctar = (const char *) buf;
671
672 supply_register_by_name (regcache, "ctar", ctar);
673 }
674
675 static void
676 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
677 {
678 int i, base;
679 char *regset = (char *) buf;
680
681 base = find_regno (regcache->tdesc, "vs0h");
682 for (i = 0; i < 32; i++)
683 collect_register (regcache, base + i, &regset[i * 8]);
684 }
685
686 static void
687 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
688 {
689 int i, base;
690 const char *regset = (const char *) buf;
691
692 base = find_regno (regcache->tdesc, "vs0h");
693 for (i = 0; i < 32; i++)
694 supply_register (regcache, base + i, &regset[i * 8]);
695 }
696
697 static void
698 ppc_fill_vrregset (struct regcache *regcache, void *buf)
699 {
700 int i, base;
701 char *regset = (char *) buf;
702 int vscr_offset = 0;
703
704 base = find_regno (regcache->tdesc, "vr0");
705 for (i = 0; i < 32; i++)
706 collect_register (regcache, base + i, &regset[i * 16]);
707
708 if (__BYTE_ORDER == __BIG_ENDIAN)
709 vscr_offset = 12;
710
711 collect_register_by_name (regcache, "vscr",
712 &regset[32 * 16 + vscr_offset]);
713
714 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
715 }
716
717 static void
718 ppc_store_vrregset (struct regcache *regcache, const void *buf)
719 {
720 int i, base;
721 const char *regset = (const char *) buf;
722 int vscr_offset = 0;
723
724 base = find_regno (regcache->tdesc, "vr0");
725 for (i = 0; i < 32; i++)
726 supply_register (regcache, base + i, &regset[i * 16]);
727
728 if (__BYTE_ORDER == __BIG_ENDIAN)
729 vscr_offset = 12;
730
731 supply_register_by_name (regcache, "vscr",
732 &regset[32 * 16 + vscr_offset]);
733 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
734 }
735
736 struct gdb_evrregset_t
737 {
738 unsigned long evr[32];
739 unsigned long long acc;
740 unsigned long spefscr;
741 };
742
743 static void
744 ppc_fill_evrregset (struct regcache *regcache, void *buf)
745 {
746 int i, ev0;
747 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
748
749 ev0 = find_regno (regcache->tdesc, "ev0h");
750 for (i = 0; i < 32; i++)
751 collect_register (regcache, ev0 + i, &regset->evr[i]);
752
753 collect_register_by_name (regcache, "acc", &regset->acc);
754 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
755 }
756
757 static void
758 ppc_store_evrregset (struct regcache *regcache, const void *buf)
759 {
760 int i, ev0;
761 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
762
763 ev0 = find_regno (regcache->tdesc, "ev0h");
764 for (i = 0; i < 32; i++)
765 supply_register (regcache, ev0 + i, &regset->evr[i]);
766
767 supply_register_by_name (regcache, "acc", &regset->acc);
768 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
769 }
770
771 /* Support for hardware single step. */
772
773 static int
774 ppc_supports_hardware_single_step (void)
775 {
776 return 1;
777 }
778
779 static struct regset_info ppc_regsets[] = {
780 /* List the extra register sets before GENERAL_REGS. That way we will
781 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
782 general registers. Some kernels support these, but not the newer
783 PPC_PTRACE_GETREGS. */
784 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
785 NULL, ppc_store_tm_ctarregset },
786 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
787 NULL, ppc_store_tm_cdscrregset },
788 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
789 NULL, ppc_store_tm_cpprregset },
790 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
791 NULL, ppc_store_tm_cvsxregset },
792 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
793 NULL, ppc_store_tm_cvrregset },
794 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
795 NULL, ppc_store_tm_cfprregset },
796 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
797 NULL, ppc_store_tm_cgprregset },
798 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
799 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
800 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
801 NULL, ppc_store_ebbregset },
802 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
803 ppc_fill_pmuregset, ppc_store_pmuregset },
804 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
805 ppc_fill_tarregset, ppc_store_tarregset },
806 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
807 ppc_fill_pprregset, ppc_store_pprregset },
808 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
809 ppc_fill_dscrregset, ppc_store_dscrregset },
810 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
811 ppc_fill_vsxregset, ppc_store_vsxregset },
812 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
813 ppc_fill_vrregset, ppc_store_vrregset },
814 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
815 ppc_fill_evrregset, ppc_store_evrregset },
816 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
817 NULL_REGSET
818 };
819
820 static struct usrregs_info ppc_usrregs_info =
821 {
822 ppc_num_regs,
823 ppc_regmap,
824 };
825
826 static struct regsets_info ppc_regsets_info =
827 {
828 ppc_regsets, /* regsets */
829 0, /* num_regsets */
830 NULL, /* disabled_regsets */
831 };
832
833 static struct regs_info myregs_info =
834 {
835 NULL, /* regset_bitmap */
836 &ppc_usrregs_info,
837 &ppc_regsets_info
838 };
839
840 const regs_info *
841 ppc_target::get_regs_info ()
842 {
843 return &myregs_info;
844 }
845
846 void
847 ppc_target::low_arch_setup ()
848 {
849 const struct target_desc *tdesc;
850 struct regset_info *regset;
851 struct ppc_linux_features features = ppc_linux_no_features;
852
853 int tid = lwpid_of (current_thread);
854
855 features.wordsize = ppc_linux_target_wordsize (tid);
856
857 if (features.wordsize == 4)
858 tdesc = tdesc_powerpc_32l;
859 else
860 tdesc = tdesc_powerpc_64l;
861
862 current_process ()->tdesc = tdesc;
863
864 /* The value of current_process ()->tdesc needs to be set for this
865 call. */
866 ppc_hwcap = linux_get_hwcap (features.wordsize);
867 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
868
869 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
870
871 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
872 features.vsx = true;
873
874 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
875 features.altivec = true;
876
877 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
878 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
879 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
880 {
881 features.ppr_dscr = true;
882 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
883 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
884 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
885 && ppc_check_regset (tid, NT_PPC_TAR,
886 PPC_LINUX_SIZEOF_TARREGSET)
887 && ppc_check_regset (tid, NT_PPC_EBB,
888 PPC_LINUX_SIZEOF_EBBREGSET)
889 && ppc_check_regset (tid, NT_PPC_PMU,
890 PPC_LINUX_SIZEOF_PMUREGSET))
891 {
892 features.isa207 = true;
893 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
894 && ppc_check_regset (tid, NT_PPC_TM_SPR,
895 PPC_LINUX_SIZEOF_TM_SPRREGSET))
896 features.htm = true;
897 }
898 }
899
900 tdesc = ppc_linux_match_description (features);
901
902 /* On 32-bit machines, check for SPE registers.
903 Set the low target's regmap field as appropriately. */
904 #ifndef __powerpc64__
905 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
906 tdesc = tdesc_powerpc_e500l;
907
908 if (!ppc_regmap_adjusted)
909 {
910 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
911 ppc_usrregs_info.regmap = ppc_regmap_e500;
912
913 /* If the FPSCR is 64-bit wide, we need to fetch the whole
914 64-bit slot and not just its second word. The PT_FPSCR
915 supplied in a 32-bit GDB compilation doesn't reflect
916 this. */
917 if (register_size (tdesc, 70) == 8)
918 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
919
920 ppc_regmap_adjusted = 1;
921 }
922 #endif
923
924 current_process ()->tdesc = tdesc;
925
926 for (regset = ppc_regsets; regset->size >= 0; regset++)
927 switch (regset->get_request)
928 {
929 case PTRACE_GETVRREGS:
930 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
931 break;
932 case PTRACE_GETVSXREGS:
933 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
934 break;
935 case PTRACE_GETEVRREGS:
936 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
937 regset->size = 32 * 4 + 8 + 4;
938 else
939 regset->size = 0;
940 break;
941 case PTRACE_GETREGSET:
942 switch (regset->nt_type)
943 {
944 case NT_PPC_PPR:
945 regset->size = (features.ppr_dscr ?
946 PPC_LINUX_SIZEOF_PPRREGSET : 0);
947 break;
948 case NT_PPC_DSCR:
949 regset->size = (features.ppr_dscr ?
950 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
951 break;
952 case NT_PPC_TAR:
953 regset->size = (features.isa207 ?
954 PPC_LINUX_SIZEOF_TARREGSET : 0);
955 break;
956 case NT_PPC_EBB:
957 regset->size = (features.isa207 ?
958 PPC_LINUX_SIZEOF_EBBREGSET : 0);
959 break;
960 case NT_PPC_PMU:
961 regset->size = (features.isa207 ?
962 PPC_LINUX_SIZEOF_PMUREGSET : 0);
963 break;
964 case NT_PPC_TM_SPR:
965 regset->size = (features.htm ?
966 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
967 break;
968 case NT_PPC_TM_CGPR:
969 if (features.wordsize == 4)
970 regset->size = (features.htm ?
971 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
972 else
973 regset->size = (features.htm ?
974 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
975 break;
976 case NT_PPC_TM_CFPR:
977 regset->size = (features.htm ?
978 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
979 break;
980 case NT_PPC_TM_CVMX:
981 regset->size = (features.htm ?
982 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
983 break;
984 case NT_PPC_TM_CVSX:
985 regset->size = (features.htm ?
986 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
987 break;
988 case NT_PPC_TM_CPPR:
989 regset->size = (features.htm ?
990 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
991 break;
992 case NT_PPC_TM_CDSCR:
993 regset->size = (features.htm ?
994 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
995 break;
996 case NT_PPC_TM_CTAR:
997 regset->size = (features.htm ?
998 PPC_LINUX_SIZEOF_CTARREGSET : 0);
999 break;
1000 default:
1001 break;
1002 }
1003 break;
1004 default:
1005 break;
1006 }
1007 }
1008
1009 /* Implementation of linux_target_ops method "supports_tracepoints". */
1010
1011 static int
1012 ppc_supports_tracepoints (void)
1013 {
1014 return 1;
1015 }
1016
1017 /* Get the thread area address. This is used to recognize which
1018 thread is which when tracing with the in-process agent library. We
1019 don't read anything from the address, and treat it as opaque; it's
1020 the address itself that we assume is unique per-thread. */
1021
1022 static int
1023 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1024 {
1025 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1026 struct thread_info *thr = get_lwp_thread (lwp);
1027 struct regcache *regcache = get_thread_regcache (thr, 1);
1028 ULONGEST tp = 0;
1029
1030 #ifdef __powerpc64__
1031 if (register_size (regcache->tdesc, 0) == 8)
1032 collect_register_by_name (regcache, "r13", &tp);
1033 else
1034 #endif
1035 collect_register_by_name (regcache, "r2", &tp);
1036
1037 *addr = tp;
1038
1039 return 0;
1040 }
1041
1042 #ifdef __powerpc64__
1043
1044 /* Older glibc doesn't provide this. */
1045
1046 #ifndef EF_PPC64_ABI
1047 #define EF_PPC64_ABI 3
1048 #endif
1049
1050 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1051 inferiors. */
1052
1053 static int
1054 is_elfv2_inferior (void)
1055 {
1056 /* To be used as fallback if we're unable to determine the right result -
1057 assume inferior uses the same ABI as gdbserver. */
1058 #if _CALL_ELF == 2
1059 const int def_res = 1;
1060 #else
1061 const int def_res = 0;
1062 #endif
1063 CORE_ADDR phdr;
1064 Elf64_Ehdr ehdr;
1065
1066 const struct target_desc *tdesc = current_process ()->tdesc;
1067 int wordsize = register_size (tdesc, 0);
1068
1069 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1070 return def_res;
1071
1072 /* Assume ELF header is at the beginning of the page where program headers
1073 are located. If it doesn't look like one, bail. */
1074
1075 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1076 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1077 return def_res;
1078
1079 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1080 }
1081
1082 #endif
1083
1084 /* Generate a ds-form instruction in BUF and return the number of bytes written
1085
1086 0 6 11 16 30 32
1087 | OPCD | RST | RA | DS |XO| */
1088
1089 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1090 static int
1091 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1092 {
1093 uint32_t insn;
1094
1095 gdb_assert ((opcd & ~0x3f) == 0);
1096 gdb_assert ((rst & ~0x1f) == 0);
1097 gdb_assert ((ra & ~0x1f) == 0);
1098 gdb_assert ((xo & ~0x3) == 0);
1099
1100 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1101 *buf = (opcd << 26) | insn;
1102 return 1;
1103 }
1104
1105 /* Followings are frequently used ds-form instructions. */
1106
1107 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1108 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1109 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1110 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1111
1112 /* Generate a d-form instruction in BUF.
1113
1114 0 6 11 16 32
1115 | OPCD | RST | RA | D | */
1116
1117 static int
1118 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1119 {
1120 uint32_t insn;
1121
1122 gdb_assert ((opcd & ~0x3f) == 0);
1123 gdb_assert ((rst & ~0x1f) == 0);
1124 gdb_assert ((ra & ~0x1f) == 0);
1125
1126 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1127 *buf = (opcd << 26) | insn;
1128 return 1;
1129 }
1130
1131 /* Followings are frequently used d-form instructions. */
1132
1133 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1134 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1135 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1136 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1137 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1138 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1139 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1140 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1141 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1142
1143 /* Generate a xfx-form instruction in BUF and return the number of bytes
1144 written.
1145
1146 0 6 11 21 31 32
1147 | OPCD | RST | RI | XO |/| */
1148
1149 static int
1150 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1151 {
1152 uint32_t insn;
1153 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1154
1155 gdb_assert ((opcd & ~0x3f) == 0);
1156 gdb_assert ((rst & ~0x1f) == 0);
1157 gdb_assert ((xo & ~0x3ff) == 0);
1158
1159 insn = (rst << 21) | (n << 11) | (xo << 1);
1160 *buf = (opcd << 26) | insn;
1161 return 1;
1162 }
1163
1164 /* Followings are frequently used xfx-form instructions. */
1165
1166 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1167 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1168 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1169 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1170 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1171 E & 0xf, 598)
1172 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1173
1174
1175 /* Generate a x-form instruction in BUF and return the number of bytes written.
1176
1177 0 6 11 16 21 31 32
1178 | OPCD | RST | RA | RB | XO |RC| */
1179
1180 static int
1181 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1182 {
1183 uint32_t insn;
1184
1185 gdb_assert ((opcd & ~0x3f) == 0);
1186 gdb_assert ((rst & ~0x1f) == 0);
1187 gdb_assert ((ra & ~0x1f) == 0);
1188 gdb_assert ((rb & ~0x1f) == 0);
1189 gdb_assert ((xo & ~0x3ff) == 0);
1190 gdb_assert ((rc & ~1) == 0);
1191
1192 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1193 *buf = (opcd << 26) | insn;
1194 return 1;
1195 }
1196
1197 /* Followings are frequently used x-form instructions. */
1198
1199 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1200 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1201 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1202 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1203 /* Assume bf = cr7. */
1204 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1205
1206
1207 /* Generate a md-form instruction in BUF and return the number of bytes written.
1208
1209 0 6 11 16 21 27 30 31 32
1210 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1211
1212 static int
1213 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1214 int xo, int rc)
1215 {
1216 uint32_t insn;
1217 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1218 unsigned int sh0_4 = sh & 0x1f;
1219 unsigned int sh5 = (sh >> 5) & 1;
1220
1221 gdb_assert ((opcd & ~0x3f) == 0);
1222 gdb_assert ((rs & ~0x1f) == 0);
1223 gdb_assert ((ra & ~0x1f) == 0);
1224 gdb_assert ((sh & ~0x3f) == 0);
1225 gdb_assert ((mb & ~0x3f) == 0);
1226 gdb_assert ((xo & ~0x7) == 0);
1227 gdb_assert ((rc & ~0x1) == 0);
1228
1229 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1230 | (sh5 << 1) | (xo << 2) | (rc & 1);
1231 *buf = (opcd << 26) | insn;
1232 return 1;
1233 }
1234
1235 /* The following are frequently used md-form instructions. */
1236
1237 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1238 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1239 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1240 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1241
1242 /* Generate a i-form instruction in BUF and return the number of bytes written.
1243
1244 0 6 30 31 32
1245 | OPCD | LI |AA|LK| */
1246
1247 static int
1248 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1249 {
1250 uint32_t insn;
1251
1252 gdb_assert ((opcd & ~0x3f) == 0);
1253
1254 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1255 *buf = (opcd << 26) | insn;
1256 return 1;
1257 }
1258
1259 /* The following are frequently used i-form instructions. */
1260
1261 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1262 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1263
1264 /* Generate a b-form instruction in BUF and return the number of bytes written.
1265
1266 0 6 11 16 30 31 32
1267 | OPCD | BO | BI | BD |AA|LK| */
1268
1269 static int
1270 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1271 int aa, int lk)
1272 {
1273 uint32_t insn;
1274
1275 gdb_assert ((opcd & ~0x3f) == 0);
1276 gdb_assert ((bo & ~0x1f) == 0);
1277 gdb_assert ((bi & ~0x1f) == 0);
1278
1279 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1280 *buf = (opcd << 26) | insn;
1281 return 1;
1282 }
1283
1284 /* The following are frequently used b-form instructions. */
1285 /* Assume bi = cr7. */
1286 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1287
1288 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1289 respectively. They are primary used for save/restore GPRs in jump-pad,
1290 not used for bytecode compiling. */
1291
1292 #ifdef __powerpc64__
1293 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1294 GEN_LD (buf, rt, ra, si) : \
1295 GEN_LWZ (buf, rt, ra, si))
1296 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1297 GEN_STD (buf, rt, ra, si) : \
1298 GEN_STW (buf, rt, ra, si))
1299 #else
1300 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1301 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1302 #endif
1303
1304 /* Generate a sequence of instructions to load IMM in the register REG.
1305 Write the instructions in BUF and return the number of bytes written. */
1306
1307 static int
1308 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1309 {
1310 uint32_t *p = buf;
1311
1312 if ((imm + 32768) < 65536)
1313 {
1314 /* li reg, imm[15:0] */
1315 p += GEN_LI (p, reg, imm);
1316 }
1317 else if ((imm >> 32) == 0)
1318 {
1319 /* lis reg, imm[31:16]
1320 ori reg, reg, imm[15:0]
1321 rldicl reg, reg, 0, 32 */
1322 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1323 if ((imm & 0xffff) != 0)
1324 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1325 /* Clear upper 32-bit if sign-bit is set. */
1326 if (imm & (1u << 31) && is_64)
1327 p += GEN_RLDICL (p, reg, reg, 0, 32);
1328 }
1329 else
1330 {
1331 gdb_assert (is_64);
1332 /* lis reg, <imm[63:48]>
1333 ori reg, reg, <imm[48:32]>
1334 rldicr reg, reg, 32, 31
1335 oris reg, reg, <imm[31:16]>
1336 ori reg, reg, <imm[15:0]> */
1337 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1338 if (((imm >> 32) & 0xffff) != 0)
1339 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1340 p += GEN_RLDICR (p, reg, reg, 32, 31);
1341 if (((imm >> 16) & 0xffff) != 0)
1342 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1343 if ((imm & 0xffff) != 0)
1344 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1345 }
1346
1347 return p - buf;
1348 }
1349
1350 /* Generate a sequence for atomically exchange at location LOCK.
1351 This code sequence clobbers r6, r7, r8. LOCK is the location for
1352 the atomic-xchg, OLD_VALUE is expected old value stored in the
1353 location, and R_NEW is a register for the new value. */
1354
1355 static int
1356 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1357 int is_64)
1358 {
1359 const int r_lock = 6;
1360 const int r_old = 7;
1361 const int r_tmp = 8;
1362 uint32_t *p = buf;
1363
1364 /*
1365 1: lwarx TMP, 0, LOCK
1366 cmpwi TMP, OLD
1367 bne 1b
1368 stwcx. NEW, 0, LOCK
1369 bne 1b */
1370
1371 p += gen_limm (p, r_lock, lock, is_64);
1372 p += gen_limm (p, r_old, old_value, is_64);
1373
1374 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1375 p += GEN_CMPW (p, r_tmp, r_old);
1376 p += GEN_BNE (p, -8);
1377 p += GEN_STWCX (p, r_new, 0, r_lock);
1378 p += GEN_BNE (p, -16);
1379
1380 return p - buf;
1381 }
1382
1383 /* Generate a sequence of instructions for calling a function
1384 at address of FN. Return the number of bytes are written in BUF. */
1385
1386 static int
1387 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1388 {
1389 uint32_t *p = buf;
1390
1391 /* Must be called by r12 for caller to calculate TOC address. */
1392 p += gen_limm (p, 12, fn, is_64);
1393 if (is_opd)
1394 {
1395 p += GEN_LOAD (p, 11, 12, 16, is_64);
1396 p += GEN_LOAD (p, 2, 12, 8, is_64);
1397 p += GEN_LOAD (p, 12, 12, 0, is_64);
1398 }
1399 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1400 *p++ = 0x4e800421; /* bctrl */
1401
1402 return p - buf;
1403 }
1404
1405 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1406 of instruction. This function is used to adjust pc-relative instructions
1407 when copying. */
1408
1409 static void
1410 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1411 {
1412 uint32_t insn, op6;
1413 long rel, newrel;
1414
1415 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1416 op6 = PPC_OP6 (insn);
1417
1418 if (op6 == 18 && (insn & 2) == 0)
1419 {
1420 /* branch && AA = 0 */
1421 rel = PPC_LI (insn);
1422 newrel = (oldloc - *to) + rel;
1423
1424 /* Out of range. Cannot relocate instruction. */
1425 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1426 return;
1427
1428 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1429 }
1430 else if (op6 == 16 && (insn & 2) == 0)
1431 {
1432 /* conditional branch && AA = 0 */
1433
1434 /* If the new relocation is too big for even a 26-bit unconditional
1435 branch, there is nothing we can do. Just abort.
1436
1437 Otherwise, if it can be fit in 16-bit conditional branch, just
1438 copy the instruction and relocate the address.
1439
1440 If the it's big for conditional-branch (16-bit), try to invert the
1441 condition and jump with 26-bit branch. For example,
1442
1443 beq .Lgoto
1444 INSN1
1445
1446 =>
1447
1448 bne 1f (+8)
1449 b .Lgoto
1450 1:INSN1
1451
1452 After this transform, we are actually jump from *TO+4 instead of *TO,
1453 so check the relocation again because it will be 1-insn farther then
1454 before if *TO is after OLDLOC.
1455
1456
1457 For BDNZT (or so) is transformed from
1458
1459 bdnzt eq, .Lgoto
1460 INSN1
1461
1462 =>
1463
1464 bdz 1f (+12)
1465 bf eq, 1f (+8)
1466 b .Lgoto
1467 1:INSN1
1468
1469 See also "BO field encodings". */
1470
1471 rel = PPC_BD (insn);
1472 newrel = (oldloc - *to) + rel;
1473
1474 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1475 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1476 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1477 {
1478 newrel -= 4;
1479
1480 /* Out of range. Cannot relocate instruction. */
1481 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1482 return;
1483
1484 if ((PPC_BO (insn) & 0x14) == 0x4)
1485 insn ^= (1 << 24);
1486 else if ((PPC_BO (insn) & 0x14) == 0x10)
1487 insn ^= (1 << 22);
1488
1489 /* Jump over the unconditional branch. */
1490 insn = (insn & ~0xfffc) | 0x8;
1491 target_write_memory (*to, (unsigned char *) &insn, 4);
1492 *to += 4;
1493
1494 /* Build a unconditional branch and copy LK bit. */
1495 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1496 target_write_memory (*to, (unsigned char *) &insn, 4);
1497 *to += 4;
1498
1499 return;
1500 }
1501 else if ((PPC_BO (insn) & 0x14) == 0)
1502 {
1503 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1504 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1505
1506 newrel -= 8;
1507
1508 /* Out of range. Cannot relocate instruction. */
1509 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1510 return;
1511
1512 /* Copy BI field. */
1513 bf_insn |= (insn & 0x1f0000);
1514
1515 /* Invert condition. */
1516 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1517 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1518
1519 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1520 *to += 4;
1521 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1522 *to += 4;
1523
1524 /* Build a unconditional branch and copy LK bit. */
1525 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1526 target_write_memory (*to, (unsigned char *) &insn, 4);
1527 *to += 4;
1528
1529 return;
1530 }
1531 else /* (BO & 0x14) == 0x14, branch always. */
1532 {
1533 /* Out of range. Cannot relocate instruction. */
1534 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1535 return;
1536
1537 /* Build a unconditional branch and copy LK bit. */
1538 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1539 target_write_memory (*to, (unsigned char *) &insn, 4);
1540 *to += 4;
1541
1542 return;
1543 }
1544 }
1545
1546 target_write_memory (*to, (unsigned char *) &insn, 4);
1547 *to += 4;
1548 }
1549
1550 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1551 See target.h for details. */
1552
1553 static int
1554 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1555 CORE_ADDR collector,
1556 CORE_ADDR lockaddr,
1557 ULONGEST orig_size,
1558 CORE_ADDR *jump_entry,
1559 CORE_ADDR *trampoline,
1560 ULONGEST *trampoline_size,
1561 unsigned char *jjump_pad_insn,
1562 ULONGEST *jjump_pad_insn_size,
1563 CORE_ADDR *adjusted_insn_addr,
1564 CORE_ADDR *adjusted_insn_addr_end,
1565 char *err)
1566 {
1567 uint32_t buf[256];
1568 uint32_t *p = buf;
1569 int j, offset;
1570 CORE_ADDR buildaddr = *jump_entry;
1571 const CORE_ADDR entryaddr = *jump_entry;
1572 int rsz, min_frame, frame_size, tp_reg;
1573 #ifdef __powerpc64__
1574 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1575 int is_64 = register_size (regcache->tdesc, 0) == 8;
1576 int is_opd = is_64 && !is_elfv2_inferior ();
1577 #else
1578 int is_64 = 0, is_opd = 0;
1579 #endif
1580
1581 #ifdef __powerpc64__
1582 if (is_64)
1583 {
1584 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1585 rsz = 8;
1586 min_frame = 112;
1587 frame_size = (40 * rsz) + min_frame;
1588 tp_reg = 13;
1589 }
1590 else
1591 {
1592 #endif
1593 rsz = 4;
1594 min_frame = 16;
1595 frame_size = (40 * rsz) + min_frame;
1596 tp_reg = 2;
1597 #ifdef __powerpc64__
1598 }
1599 #endif
1600
1601 /* Stack frame layout for this jump pad,
1602
1603 High thread_area (r13/r2) |
1604 tpoint - collecting_t obj
1605 PC/<tpaddr> | +36
1606 CTR | +35
1607 LR | +34
1608 XER | +33
1609 CR | +32
1610 R31 |
1611 R29 |
1612 ... |
1613 R1 | +1
1614 R0 - collected registers
1615 ... |
1616 ... |
1617 Low Back-chain -
1618
1619
1620 The code flow of this jump pad,
1621
1622 1. Adjust SP
1623 2. Save GPR and SPR
1624 3. Prepare argument
1625 4. Call gdb_collector
1626 5. Restore GPR and SPR
1627 6. Restore SP
1628 7. Build a jump for back to the program
1629 8. Copy/relocate original instruction
1630 9. Build a jump for replacing original instruction. */
1631
1632 /* Adjust stack pointer. */
1633 if (is_64)
1634 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1635 else
1636 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1637
1638 /* Store GPRs. Save R1 later, because it had just been modified, but
1639 we want the original value. */
1640 for (j = 2; j < 32; j++)
1641 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1642 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1643 /* Set r0 to the original value of r1 before adjusting stack frame,
1644 and then save it. */
1645 p += GEN_ADDI (p, 0, 1, frame_size);
1646 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1647
1648 /* Save CR, XER, LR, and CTR. */
1649 p += GEN_MFCR (p, 3); /* mfcr r3 */
1650 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1651 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1652 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1653 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1654 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1655 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1656 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1657
1658 /* Save PC<tpaddr> */
1659 p += gen_limm (p, 3, tpaddr, is_64);
1660 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1661
1662
1663 /* Setup arguments to collector. */
1664 /* Set r4 to collected registers. */
1665 p += GEN_ADDI (p, 4, 1, min_frame);
1666 /* Set r3 to TPOINT. */
1667 p += gen_limm (p, 3, tpoint, is_64);
1668
1669 /* Prepare collecting_t object for lock. */
1670 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1671 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1672 /* Set R5 to collecting object. */
1673 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1674
1675 p += GEN_LWSYNC (p);
1676 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1677 p += GEN_LWSYNC (p);
1678
1679 /* Call to collector. */
1680 p += gen_call (p, collector, is_64, is_opd);
1681
1682 /* Simply write 0 to release the lock. */
1683 p += gen_limm (p, 3, lockaddr, is_64);
1684 p += gen_limm (p, 4, 0, is_64);
1685 p += GEN_LWSYNC (p);
1686 p += GEN_STORE (p, 4, 3, 0, is_64);
1687
1688 /* Restore stack and registers. */
1689 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1690 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1691 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1692 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1693 p += GEN_MTCR (p, 3); /* mtcr r3 */
1694 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1695 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1696 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1697
1698 /* Restore GPRs. */
1699 for (j = 2; j < 32; j++)
1700 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1701 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1702 /* Restore SP. */
1703 p += GEN_ADDI (p, 1, 1, frame_size);
1704
1705 /* Flush instructions to inferior memory. */
1706 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1707
1708 /* Now, insert the original instruction to execute in the jump pad. */
1709 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1710 *adjusted_insn_addr_end = *adjusted_insn_addr;
1711 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1712
1713 /* Verify the relocation size. If should be 4 for normal copy,
1714 8 or 12 for some conditional branch. */
1715 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1716 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1717 {
1718 sprintf (err, "E.Unexpected instruction length = %d"
1719 "when relocate instruction.",
1720 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1721 return 1;
1722 }
1723
1724 buildaddr = *adjusted_insn_addr_end;
1725 p = buf;
1726 /* Finally, write a jump back to the program. */
1727 offset = (tpaddr + 4) - buildaddr;
1728 if (offset >= (1 << 25) || offset < -(1 << 25))
1729 {
1730 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1731 "(offset 0x%x > 26-bit).", offset);
1732 return 1;
1733 }
1734 /* b <tpaddr+4> */
1735 p += GEN_B (p, offset);
1736 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1737 *jump_entry = buildaddr + (p - buf) * 4;
1738
1739 /* The jump pad is now built. Wire in a jump to our jump pad. This
1740 is always done last (by our caller actually), so that we can
1741 install fast tracepoints with threads running. This relies on
1742 the agent's atomic write support. */
1743 offset = entryaddr - tpaddr;
1744 if (offset >= (1 << 25) || offset < -(1 << 25))
1745 {
1746 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1747 "(offset 0x%x > 26-bit).", offset);
1748 return 1;
1749 }
1750 /* b <jentry> */
1751 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1752 *jjump_pad_insn_size = 4;
1753
1754 return 0;
1755 }
1756
1757 /* Returns the minimum instruction length for installing a tracepoint. */
1758
1759 static int
1760 ppc_get_min_fast_tracepoint_insn_len (void)
1761 {
1762 return 4;
1763 }
1764
1765 /* Emits a given buffer into the target at current_insn_ptr. Length
1766 is in units of 32-bit words. */
1767
1768 static void
1769 emit_insns (uint32_t *buf, int n)
1770 {
1771 n = n * sizeof (uint32_t);
1772 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1773 current_insn_ptr += n;
1774 }
1775
1776 #define __EMIT_ASM(NAME, INSNS) \
1777 do \
1778 { \
1779 extern uint32_t start_bcax_ ## NAME []; \
1780 extern uint32_t end_bcax_ ## NAME []; \
1781 emit_insns (start_bcax_ ## NAME, \
1782 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1783 __asm__ (".section .text.__ppcbcax\n\t" \
1784 "start_bcax_" #NAME ":\n\t" \
1785 INSNS "\n\t" \
1786 "end_bcax_" #NAME ":\n\t" \
1787 ".previous\n\t"); \
1788 } while (0)
1789
1790 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1791 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1792
1793 /*
1794
1795 Bytecode execution stack frame - 32-bit
1796
1797 | LR save area (SP + 4)
1798 SP' -> +- Back chain (SP + 0)
1799 | Save r31 for access saved arguments
1800 | Save r30 for bytecode stack pointer
1801 | Save r4 for incoming argument *value
1802 | Save r3 for incoming argument regs
1803 r30 -> +- Bytecode execution stack
1804 |
1805 | 64-byte (8 doublewords) at initial.
1806 | Expand stack as needed.
1807 |
1808 +-
1809 | Some padding for minimum stack frame and 16-byte alignment.
1810 | 16 bytes.
1811 SP +- Back-chain (SP')
1812
1813 initial frame size
1814 = 16 + (4 * 4) + 64
1815 = 96
1816
1817 r30 is the stack-pointer for bytecode machine.
1818 It should point to next-empty, so we can use LDU for pop.
1819 r3 is used for cache of the high part of TOP value.
1820 It was the first argument, pointer to regs.
1821 r4 is used for cache of the low part of TOP value.
1822 It was the second argument, pointer to the result.
1823 We should set *result = TOP after leaving this function.
1824
1825 Note:
1826 * To restore stack at epilogue
1827 => sp = r31
1828 * To check stack is big enough for bytecode execution.
1829 => r30 - 8 > SP + 8
1830 * To return execution result.
1831 => 0(r4) = TOP
1832
1833 */
1834
1835 /* Regardless of endian, register 3 is always high part, 4 is low part.
1836 These defines are used when the register pair is stored/loaded.
1837 Likewise, to simplify code, have a similiar define for 5:6. */
1838
1839 #if __BYTE_ORDER == __LITTLE_ENDIAN
1840 #define TOP_FIRST "4"
1841 #define TOP_SECOND "3"
1842 #define TMP_FIRST "6"
1843 #define TMP_SECOND "5"
1844 #else
1845 #define TOP_FIRST "3"
1846 #define TOP_SECOND "4"
1847 #define TMP_FIRST "5"
1848 #define TMP_SECOND "6"
1849 #endif
1850
1851 /* Emit prologue in inferior memory. See above comments. */
1852
1853 static void
1854 ppc_emit_prologue (void)
1855 {
1856 EMIT_ASM (/* Save return address. */
1857 "mflr 0 \n"
1858 "stw 0, 4(1) \n"
1859 /* Adjust SP. 96 is the initial frame size. */
1860 "stwu 1, -96(1) \n"
1861 /* Save r30 and incoming arguments. */
1862 "stw 31, 96-4(1) \n"
1863 "stw 30, 96-8(1) \n"
1864 "stw 4, 96-12(1) \n"
1865 "stw 3, 96-16(1) \n"
1866 /* Point r31 to original r1 for access arguments. */
1867 "addi 31, 1, 96 \n"
1868 /* Set r30 to pointing stack-top. */
1869 "addi 30, 1, 64 \n"
1870 /* Initial r3/TOP to 0. */
1871 "li 3, 0 \n"
1872 "li 4, 0 \n");
1873 }
1874
1875 /* Emit epilogue in inferior memory. See above comments. */
1876
1877 static void
1878 ppc_emit_epilogue (void)
1879 {
1880 EMIT_ASM (/* *result = TOP */
1881 "lwz 5, -12(31) \n"
1882 "stw " TOP_FIRST ", 0(5) \n"
1883 "stw " TOP_SECOND ", 4(5) \n"
1884 /* Restore registers. */
1885 "lwz 31, -4(31) \n"
1886 "lwz 30, -8(31) \n"
1887 /* Restore SP. */
1888 "lwz 1, 0(1) \n"
1889 /* Restore LR. */
1890 "lwz 0, 4(1) \n"
1891 /* Return 0 for no-error. */
1892 "li 3, 0 \n"
1893 "mtlr 0 \n"
1894 "blr \n");
1895 }
1896
1897 /* TOP = stack[--sp] + TOP */
1898
1899 static void
1900 ppc_emit_add (void)
1901 {
1902 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1903 "lwz " TMP_SECOND ", 4(30)\n"
1904 "addc 4, 6, 4 \n"
1905 "adde 3, 5, 3 \n");
1906 }
1907
1908 /* TOP = stack[--sp] - TOP */
1909
1910 static void
1911 ppc_emit_sub (void)
1912 {
1913 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1914 "lwz " TMP_SECOND ", 4(30) \n"
1915 "subfc 4, 4, 6 \n"
1916 "subfe 3, 3, 5 \n");
1917 }
1918
1919 /* TOP = stack[--sp] * TOP */
1920
1921 static void
1922 ppc_emit_mul (void)
1923 {
1924 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1925 "lwz " TMP_SECOND ", 4(30) \n"
1926 "mulhwu 7, 6, 4 \n"
1927 "mullw 3, 6, 3 \n"
1928 "mullw 5, 4, 5 \n"
1929 "mullw 4, 6, 4 \n"
1930 "add 3, 5, 3 \n"
1931 "add 3, 7, 3 \n");
1932 }
1933
1934 /* TOP = stack[--sp] << TOP */
1935
1936 static void
1937 ppc_emit_lsh (void)
1938 {
1939 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1940 "lwz " TMP_SECOND ", 4(30) \n"
1941 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1942 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1943 "slw 5, 5, 4\n" /* Shift high part left */
1944 "slw 4, 6, 4\n" /* Shift low part left */
1945 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1946 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1947 "or 3, 5, 3\n"
1948 "or 3, 7, 3\n"); /* Assemble high part */
1949 }
1950
1951 /* Top = stack[--sp] >> TOP
1952 (Arithmetic shift right) */
1953
1954 static void
1955 ppc_emit_rsh_signed (void)
1956 {
1957 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1958 "lwz " TMP_SECOND ", 4(30) \n"
1959 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1960 "sraw 3, 5, 4\n" /* Shift high part right */
1961 "cmpwi 7, 1\n"
1962 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1963 "sraw 4, 5, 7\n" /* Shift high to low */
1964 "b 2f\n"
1965 "1:\n"
1966 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1967 "srw 4, 6, 4\n" /* Shift low part right */
1968 "slw 5, 5, 7\n" /* Shift high to low */
1969 "or 4, 4, 5\n" /* Assemble low part */
1970 "2:\n");
1971 }
1972
1973 /* Top = stack[--sp] >> TOP
1974 (Logical shift right) */
1975
1976 static void
1977 ppc_emit_rsh_unsigned (void)
1978 {
1979 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1980 "lwz " TMP_SECOND ", 4(30) \n"
1981 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1982 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1983 "srw 6, 6, 4\n" /* Shift low part right */
1984 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1985 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1986 "or 6, 6, 3\n"
1987 "srw 3, 5, 4\n" /* Shift high part right */
1988 "or 4, 6, 7\n"); /* Assemble low part */
1989 }
1990
1991 /* Emit code for signed-extension specified by ARG. */
1992
1993 static void
1994 ppc_emit_ext (int arg)
1995 {
1996 switch (arg)
1997 {
1998 case 8:
1999 EMIT_ASM ("extsb 4, 4\n"
2000 "srawi 3, 4, 31");
2001 break;
2002 case 16:
2003 EMIT_ASM ("extsh 4, 4\n"
2004 "srawi 3, 4, 31");
2005 break;
2006 case 32:
2007 EMIT_ASM ("srawi 3, 4, 31");
2008 break;
2009 default:
2010 emit_error = 1;
2011 }
2012 }
2013
2014 /* Emit code for zero-extension specified by ARG. */
2015
2016 static void
2017 ppc_emit_zero_ext (int arg)
2018 {
2019 switch (arg)
2020 {
2021 case 8:
2022 EMIT_ASM ("clrlwi 4,4,24\n"
2023 "li 3, 0\n");
2024 break;
2025 case 16:
2026 EMIT_ASM ("clrlwi 4,4,16\n"
2027 "li 3, 0\n");
2028 break;
2029 case 32:
2030 EMIT_ASM ("li 3, 0");
2031 break;
2032 default:
2033 emit_error = 1;
2034 }
2035 }
2036
2037 /* TOP = !TOP
2038 i.e., TOP = (TOP == 0) ? 1 : 0; */
2039
2040 static void
2041 ppc_emit_log_not (void)
2042 {
2043 EMIT_ASM ("or 4, 3, 4 \n"
2044 "cntlzw 4, 4 \n"
2045 "srwi 4, 4, 5 \n"
2046 "li 3, 0 \n");
2047 }
2048
2049 /* TOP = stack[--sp] & TOP */
2050
2051 static void
2052 ppc_emit_bit_and (void)
2053 {
2054 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2055 "lwz " TMP_SECOND ", 4(30) \n"
2056 "and 4, 6, 4 \n"
2057 "and 3, 5, 3 \n");
2058 }
2059
2060 /* TOP = stack[--sp] | TOP */
2061
2062 static void
2063 ppc_emit_bit_or (void)
2064 {
2065 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2066 "lwz " TMP_SECOND ", 4(30) \n"
2067 "or 4, 6, 4 \n"
2068 "or 3, 5, 3 \n");
2069 }
2070
2071 /* TOP = stack[--sp] ^ TOP */
2072
2073 static void
2074 ppc_emit_bit_xor (void)
2075 {
2076 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2077 "lwz " TMP_SECOND ", 4(30) \n"
2078 "xor 4, 6, 4 \n"
2079 "xor 3, 5, 3 \n");
2080 }
2081
2082 /* TOP = ~TOP
2083 i.e., TOP = ~(TOP | TOP) */
2084
2085 static void
2086 ppc_emit_bit_not (void)
2087 {
2088 EMIT_ASM ("nor 3, 3, 3 \n"
2089 "nor 4, 4, 4 \n");
2090 }
2091
2092 /* TOP = stack[--sp] == TOP */
2093
2094 static void
2095 ppc_emit_equal (void)
2096 {
2097 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2098 "lwz " TMP_SECOND ", 4(30) \n"
2099 "xor 4, 6, 4 \n"
2100 "xor 3, 5, 3 \n"
2101 "or 4, 3, 4 \n"
2102 "cntlzw 4, 4 \n"
2103 "srwi 4, 4, 5 \n"
2104 "li 3, 0 \n");
2105 }
2106
2107 /* TOP = stack[--sp] < TOP
2108 (Signed comparison) */
2109
2110 static void
2111 ppc_emit_less_signed (void)
2112 {
2113 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2114 "lwz " TMP_SECOND ", 4(30) \n"
2115 "cmplw 6, 6, 4 \n"
2116 "cmpw 7, 5, 3 \n"
2117 /* CR6 bit 0 = low less and high equal */
2118 "crand 6*4+0, 6*4+0, 7*4+2\n"
2119 /* CR7 bit 0 = (low less and high equal) or high less */
2120 "cror 7*4+0, 7*4+0, 6*4+0\n"
2121 "mfcr 4 \n"
2122 "rlwinm 4, 4, 29, 31, 31 \n"
2123 "li 3, 0 \n");
2124 }
2125
2126 /* TOP = stack[--sp] < TOP
2127 (Unsigned comparison) */
2128
2129 static void
2130 ppc_emit_less_unsigned (void)
2131 {
2132 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2133 "lwz " TMP_SECOND ", 4(30) \n"
2134 "cmplw 6, 6, 4 \n"
2135 "cmplw 7, 5, 3 \n"
2136 /* CR6 bit 0 = low less and high equal */
2137 "crand 6*4+0, 6*4+0, 7*4+2\n"
2138 /* CR7 bit 0 = (low less and high equal) or high less */
2139 "cror 7*4+0, 7*4+0, 6*4+0\n"
2140 "mfcr 4 \n"
2141 "rlwinm 4, 4, 29, 31, 31 \n"
2142 "li 3, 0 \n");
2143 }
2144
2145 /* Access the memory address in TOP in size of SIZE.
2146 Zero-extend the read value. */
2147
2148 static void
2149 ppc_emit_ref (int size)
2150 {
2151 switch (size)
2152 {
2153 case 1:
2154 EMIT_ASM ("lbz 4, 0(4)\n"
2155 "li 3, 0");
2156 break;
2157 case 2:
2158 EMIT_ASM ("lhz 4, 0(4)\n"
2159 "li 3, 0");
2160 break;
2161 case 4:
2162 EMIT_ASM ("lwz 4, 0(4)\n"
2163 "li 3, 0");
2164 break;
2165 case 8:
2166 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2167 EMIT_ASM ("lwz 3, 4(4)\n"
2168 "lwz 4, 0(4)");
2169 else
2170 EMIT_ASM ("lwz 3, 0(4)\n"
2171 "lwz 4, 4(4)");
2172 break;
2173 }
2174 }
2175
2176 /* TOP = NUM */
2177
2178 static void
2179 ppc_emit_const (LONGEST num)
2180 {
2181 uint32_t buf[10];
2182 uint32_t *p = buf;
2183
2184 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2185 p += gen_limm (p, 4, num & 0xffffffff, 0);
2186
2187 emit_insns (buf, p - buf);
2188 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2189 }
2190
2191 /* Set TOP to the value of register REG by calling get_raw_reg function
2192 with two argument, collected buffer and register number. */
2193
2194 static void
2195 ppc_emit_reg (int reg)
2196 {
2197 uint32_t buf[13];
2198 uint32_t *p = buf;
2199
2200 /* fctx->regs is passed in r3 and then saved in -16(31). */
2201 p += GEN_LWZ (p, 3, 31, -16);
2202 p += GEN_LI (p, 4, reg); /* li r4, reg */
2203 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2204
2205 emit_insns (buf, p - buf);
2206 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2207
2208 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2209 {
2210 EMIT_ASM ("mr 5, 4\n"
2211 "mr 4, 3\n"
2212 "mr 3, 5\n");
2213 }
2214 }
2215
2216 /* TOP = stack[--sp] */
2217
2218 static void
2219 ppc_emit_pop (void)
2220 {
2221 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2222 "lwz " TOP_SECOND ", 4(30) \n");
2223 }
2224
2225 /* stack[sp++] = TOP
2226
2227 Because we may use up bytecode stack, expand 8 doublewords more
2228 if needed. */
2229
2230 static void
2231 ppc_emit_stack_flush (void)
2232 {
2233 /* Make sure bytecode stack is big enough before push.
2234 Otherwise, expand 64-byte more. */
2235
2236 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2237 " stw " TOP_SECOND ", 4(30)\n"
2238 " addi 5, 30, -(8 + 8) \n"
2239 " cmpw 7, 5, 1 \n"
2240 " bgt 7, 1f \n"
2241 " stwu 31, -64(1) \n"
2242 "1:addi 30, 30, -8 \n");
2243 }
2244
2245 /* Swap TOP and stack[sp-1] */
2246
2247 static void
2248 ppc_emit_swap (void)
2249 {
2250 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2251 "lwz " TMP_SECOND ", 12(30) \n"
2252 "stw " TOP_FIRST ", 8(30) \n"
2253 "stw " TOP_SECOND ", 12(30) \n"
2254 "mr 3, 5 \n"
2255 "mr 4, 6 \n");
2256 }
2257
2258 /* Discard N elements in the stack. Also used for ppc64. */
2259
2260 static void
2261 ppc_emit_stack_adjust (int n)
2262 {
2263 uint32_t buf[6];
2264 uint32_t *p = buf;
2265
2266 n = n << 3;
2267 if ((n >> 15) != 0)
2268 {
2269 emit_error = 1;
2270 return;
2271 }
2272
2273 p += GEN_ADDI (p, 30, 30, n);
2274
2275 emit_insns (buf, p - buf);
2276 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2277 }
2278
2279 /* Call function FN. */
2280
2281 static void
2282 ppc_emit_call (CORE_ADDR fn)
2283 {
2284 uint32_t buf[11];
2285 uint32_t *p = buf;
2286
2287 p += gen_call (p, fn, 0, 0);
2288
2289 emit_insns (buf, p - buf);
2290 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2291 }
2292
2293 /* FN's prototype is `LONGEST(*fn)(int)'.
2294 TOP = fn (arg1)
2295 */
2296
2297 static void
2298 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2299 {
2300 uint32_t buf[15];
2301 uint32_t *p = buf;
2302
2303 /* Setup argument. arg1 is a 16-bit value. */
2304 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2305 p += gen_call (p, fn, 0, 0);
2306
2307 emit_insns (buf, p - buf);
2308 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2309
2310 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2311 {
2312 EMIT_ASM ("mr 5, 4\n"
2313 "mr 4, 3\n"
2314 "mr 3, 5\n");
2315 }
2316 }
2317
2318 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2319 fn (arg1, TOP)
2320
2321 TOP should be preserved/restored before/after the call. */
2322
2323 static void
2324 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2325 {
2326 uint32_t buf[21];
2327 uint32_t *p = buf;
2328
2329 /* Save TOP. 0(30) is next-empty. */
2330 p += GEN_STW (p, 3, 30, 0);
2331 p += GEN_STW (p, 4, 30, 4);
2332
2333 /* Setup argument. arg1 is a 16-bit value. */
2334 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2335 {
2336 p += GEN_MR (p, 5, 4);
2337 p += GEN_MR (p, 6, 3);
2338 }
2339 else
2340 {
2341 p += GEN_MR (p, 5, 3);
2342 p += GEN_MR (p, 6, 4);
2343 }
2344 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2345 p += gen_call (p, fn, 0, 0);
2346
2347 /* Restore TOP */
2348 p += GEN_LWZ (p, 3, 30, 0);
2349 p += GEN_LWZ (p, 4, 30, 4);
2350
2351 emit_insns (buf, p - buf);
2352 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2353 }
2354
2355 /* Note in the following goto ops:
2356
2357 When emitting goto, the target address is later relocated by
2358 write_goto_address. OFFSET_P is the offset of the branch instruction
2359 in the code sequence, and SIZE_P is how to relocate the instruction,
2360 recognized by ppc_write_goto_address. In current implementation,
2361 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2362 */
2363
2364 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2365
2366 static void
2367 ppc_emit_if_goto (int *offset_p, int *size_p)
2368 {
2369 EMIT_ASM ("or. 3, 3, 4 \n"
2370 "lwzu " TOP_FIRST ", 8(30) \n"
2371 "lwz " TOP_SECOND ", 4(30) \n"
2372 "1:bne 0, 1b \n");
2373
2374 if (offset_p)
2375 *offset_p = 12;
2376 if (size_p)
2377 *size_p = 14;
2378 }
2379
2380 /* Unconditional goto. Also used for ppc64. */
2381
2382 static void
2383 ppc_emit_goto (int *offset_p, int *size_p)
2384 {
2385 EMIT_ASM ("1:b 1b");
2386
2387 if (offset_p)
2388 *offset_p = 0;
2389 if (size_p)
2390 *size_p = 24;
2391 }
2392
2393 /* Goto if stack[--sp] == TOP */
2394
2395 static void
2396 ppc_emit_eq_goto (int *offset_p, int *size_p)
2397 {
2398 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2399 "lwz " TMP_SECOND ", 4(30) \n"
2400 "xor 4, 6, 4 \n"
2401 "xor 3, 5, 3 \n"
2402 "or. 3, 3, 4 \n"
2403 "lwzu " TOP_FIRST ", 8(30) \n"
2404 "lwz " TOP_SECOND ", 4(30) \n"
2405 "1:beq 0, 1b \n");
2406
2407 if (offset_p)
2408 *offset_p = 28;
2409 if (size_p)
2410 *size_p = 14;
2411 }
2412
2413 /* Goto if stack[--sp] != TOP */
2414
2415 static void
2416 ppc_emit_ne_goto (int *offset_p, int *size_p)
2417 {
2418 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2419 "lwz " TMP_SECOND ", 4(30) \n"
2420 "xor 4, 6, 4 \n"
2421 "xor 3, 5, 3 \n"
2422 "or. 3, 3, 4 \n"
2423 "lwzu " TOP_FIRST ", 8(30) \n"
2424 "lwz " TOP_SECOND ", 4(30) \n"
2425 "1:bne 0, 1b \n");
2426
2427 if (offset_p)
2428 *offset_p = 28;
2429 if (size_p)
2430 *size_p = 14;
2431 }
2432
2433 /* Goto if stack[--sp] < TOP */
2434
2435 static void
2436 ppc_emit_lt_goto (int *offset_p, int *size_p)
2437 {
2438 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2439 "lwz " TMP_SECOND ", 4(30) \n"
2440 "cmplw 6, 6, 4 \n"
2441 "cmpw 7, 5, 3 \n"
2442 /* CR6 bit 0 = low less and high equal */
2443 "crand 6*4+0, 6*4+0, 7*4+2\n"
2444 /* CR7 bit 0 = (low less and high equal) or high less */
2445 "cror 7*4+0, 7*4+0, 6*4+0\n"
2446 "lwzu " TOP_FIRST ", 8(30) \n"
2447 "lwz " TOP_SECOND ", 4(30)\n"
2448 "1:blt 7, 1b \n");
2449
2450 if (offset_p)
2451 *offset_p = 32;
2452 if (size_p)
2453 *size_p = 14;
2454 }
2455
2456 /* Goto if stack[--sp] <= TOP */
2457
2458 static void
2459 ppc_emit_le_goto (int *offset_p, int *size_p)
2460 {
2461 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2462 "lwz " TMP_SECOND ", 4(30) \n"
2463 "cmplw 6, 6, 4 \n"
2464 "cmpw 7, 5, 3 \n"
2465 /* CR6 bit 0 = low less/equal and high equal */
2466 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2467 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2468 "cror 7*4+0, 7*4+0, 6*4+0\n"
2469 "lwzu " TOP_FIRST ", 8(30) \n"
2470 "lwz " TOP_SECOND ", 4(30)\n"
2471 "1:blt 7, 1b \n");
2472
2473 if (offset_p)
2474 *offset_p = 32;
2475 if (size_p)
2476 *size_p = 14;
2477 }
2478
2479 /* Goto if stack[--sp] > TOP */
2480
2481 static void
2482 ppc_emit_gt_goto (int *offset_p, int *size_p)
2483 {
2484 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2485 "lwz " TMP_SECOND ", 4(30) \n"
2486 "cmplw 6, 6, 4 \n"
2487 "cmpw 7, 5, 3 \n"
2488 /* CR6 bit 0 = low greater and high equal */
2489 "crand 6*4+0, 6*4+1, 7*4+2\n"
2490 /* CR7 bit 0 = (low greater and high equal) or high greater */
2491 "cror 7*4+0, 7*4+1, 6*4+0\n"
2492 "lwzu " TOP_FIRST ", 8(30) \n"
2493 "lwz " TOP_SECOND ", 4(30)\n"
2494 "1:blt 7, 1b \n");
2495
2496 if (offset_p)
2497 *offset_p = 32;
2498 if (size_p)
2499 *size_p = 14;
2500 }
2501
2502 /* Goto if stack[--sp] >= TOP */
2503
2504 static void
2505 ppc_emit_ge_goto (int *offset_p, int *size_p)
2506 {
2507 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2508 "lwz " TMP_SECOND ", 4(30) \n"
2509 "cmplw 6, 6, 4 \n"
2510 "cmpw 7, 5, 3 \n"
2511 /* CR6 bit 0 = low ge and high equal */
2512 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2513 /* CR7 bit 0 = (low ge and high equal) or high greater */
2514 "cror 7*4+0, 7*4+1, 6*4+0\n"
2515 "lwzu " TOP_FIRST ", 8(30)\n"
2516 "lwz " TOP_SECOND ", 4(30)\n"
2517 "1:blt 7, 1b \n");
2518
2519 if (offset_p)
2520 *offset_p = 32;
2521 if (size_p)
2522 *size_p = 14;
2523 }
2524
2525 /* Relocate previous emitted branch instruction. FROM is the address
2526 of the branch instruction, TO is the goto target address, and SIZE
2527 if the value we set by *SIZE_P before. Currently, it is either
2528 24 or 14 of branch and conditional-branch instruction.
2529 Also used for ppc64. */
2530
2531 static void
2532 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2533 {
2534 long rel = to - from;
2535 uint32_t insn;
2536 int opcd;
2537
2538 read_inferior_memory (from, (unsigned char *) &insn, 4);
2539 opcd = (insn >> 26) & 0x3f;
2540
2541 switch (size)
2542 {
2543 case 14:
2544 if (opcd != 16
2545 || (rel >= (1 << 15) || rel < -(1 << 15)))
2546 emit_error = 1;
2547 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2548 break;
2549 case 24:
2550 if (opcd != 18
2551 || (rel >= (1 << 25) || rel < -(1 << 25)))
2552 emit_error = 1;
2553 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2554 break;
2555 default:
2556 emit_error = 1;
2557 }
2558
2559 if (!emit_error)
2560 target_write_memory (from, (unsigned char *) &insn, 4);
2561 }
2562
2563 /* Table of emit ops for 32-bit. */
2564
2565 static struct emit_ops ppc_emit_ops_impl =
2566 {
2567 ppc_emit_prologue,
2568 ppc_emit_epilogue,
2569 ppc_emit_add,
2570 ppc_emit_sub,
2571 ppc_emit_mul,
2572 ppc_emit_lsh,
2573 ppc_emit_rsh_signed,
2574 ppc_emit_rsh_unsigned,
2575 ppc_emit_ext,
2576 ppc_emit_log_not,
2577 ppc_emit_bit_and,
2578 ppc_emit_bit_or,
2579 ppc_emit_bit_xor,
2580 ppc_emit_bit_not,
2581 ppc_emit_equal,
2582 ppc_emit_less_signed,
2583 ppc_emit_less_unsigned,
2584 ppc_emit_ref,
2585 ppc_emit_if_goto,
2586 ppc_emit_goto,
2587 ppc_write_goto_address,
2588 ppc_emit_const,
2589 ppc_emit_call,
2590 ppc_emit_reg,
2591 ppc_emit_pop,
2592 ppc_emit_stack_flush,
2593 ppc_emit_zero_ext,
2594 ppc_emit_swap,
2595 ppc_emit_stack_adjust,
2596 ppc_emit_int_call_1,
2597 ppc_emit_void_call_2,
2598 ppc_emit_eq_goto,
2599 ppc_emit_ne_goto,
2600 ppc_emit_lt_goto,
2601 ppc_emit_le_goto,
2602 ppc_emit_gt_goto,
2603 ppc_emit_ge_goto
2604 };
2605
2606 #ifdef __powerpc64__
2607
2608 /*
2609
2610 Bytecode execution stack frame - 64-bit
2611
2612 | LR save area (SP + 16)
2613 | CR save area (SP + 8)
2614 SP' -> +- Back chain (SP + 0)
2615 | Save r31 for access saved arguments
2616 | Save r30 for bytecode stack pointer
2617 | Save r4 for incoming argument *value
2618 | Save r3 for incoming argument regs
2619 r30 -> +- Bytecode execution stack
2620 |
2621 | 64-byte (8 doublewords) at initial.
2622 | Expand stack as needed.
2623 |
2624 +-
2625 | Some padding for minimum stack frame.
2626 | 112 for ELFv1.
2627 SP +- Back-chain (SP')
2628
2629 initial frame size
2630 = 112 + (4 * 8) + 64
2631 = 208
2632
2633 r30 is the stack-pointer for bytecode machine.
2634 It should point to next-empty, so we can use LDU for pop.
2635 r3 is used for cache of TOP value.
2636 It was the first argument, pointer to regs.
2637 r4 is the second argument, pointer to the result.
2638 We should set *result = TOP after leaving this function.
2639
2640 Note:
2641 * To restore stack at epilogue
2642 => sp = r31
2643 * To check stack is big enough for bytecode execution.
2644 => r30 - 8 > SP + 112
2645 * To return execution result.
2646 => 0(r4) = TOP
2647
2648 */
2649
2650 /* Emit prologue in inferior memory. See above comments. */
2651
2652 static void
2653 ppc64v1_emit_prologue (void)
2654 {
2655 /* On ELFv1, function pointers really point to function descriptor,
2656 so emit one here. We don't care about contents of words 1 and 2,
2657 so let them just overlap out code. */
2658 uint64_t opd = current_insn_ptr + 8;
2659 uint32_t buf[2];
2660
2661 /* Mind the strict aliasing rules. */
2662 memcpy (buf, &opd, sizeof buf);
2663 emit_insns(buf, 2);
2664 EMIT_ASM (/* Save return address. */
2665 "mflr 0 \n"
2666 "std 0, 16(1) \n"
2667 /* Save r30 and incoming arguments. */
2668 "std 31, -8(1) \n"
2669 "std 30, -16(1) \n"
2670 "std 4, -24(1) \n"
2671 "std 3, -32(1) \n"
2672 /* Point r31 to current r1 for access arguments. */
2673 "mr 31, 1 \n"
2674 /* Adjust SP. 208 is the initial frame size. */
2675 "stdu 1, -208(1) \n"
2676 /* Set r30 to pointing stack-top. */
2677 "addi 30, 1, 168 \n"
2678 /* Initial r3/TOP to 0. */
2679 "li 3, 0 \n");
2680 }
2681
2682 /* Emit prologue in inferior memory. See above comments. */
2683
2684 static void
2685 ppc64v2_emit_prologue (void)
2686 {
2687 EMIT_ASM (/* Save return address. */
2688 "mflr 0 \n"
2689 "std 0, 16(1) \n"
2690 /* Save r30 and incoming arguments. */
2691 "std 31, -8(1) \n"
2692 "std 30, -16(1) \n"
2693 "std 4, -24(1) \n"
2694 "std 3, -32(1) \n"
2695 /* Point r31 to current r1 for access arguments. */
2696 "mr 31, 1 \n"
2697 /* Adjust SP. 208 is the initial frame size. */
2698 "stdu 1, -208(1) \n"
2699 /* Set r30 to pointing stack-top. */
2700 "addi 30, 1, 168 \n"
2701 /* Initial r3/TOP to 0. */
2702 "li 3, 0 \n");
2703 }
2704
2705 /* Emit epilogue in inferior memory. See above comments. */
2706
2707 static void
2708 ppc64_emit_epilogue (void)
2709 {
2710 EMIT_ASM (/* Restore SP. */
2711 "ld 1, 0(1) \n"
2712 /* *result = TOP */
2713 "ld 4, -24(1) \n"
2714 "std 3, 0(4) \n"
2715 /* Restore registers. */
2716 "ld 31, -8(1) \n"
2717 "ld 30, -16(1) \n"
2718 /* Restore LR. */
2719 "ld 0, 16(1) \n"
2720 /* Return 0 for no-error. */
2721 "li 3, 0 \n"
2722 "mtlr 0 \n"
2723 "blr \n");
2724 }
2725
2726 /* TOP = stack[--sp] + TOP */
2727
2728 static void
2729 ppc64_emit_add (void)
2730 {
2731 EMIT_ASM ("ldu 4, 8(30) \n"
2732 "add 3, 4, 3 \n");
2733 }
2734
2735 /* TOP = stack[--sp] - TOP */
2736
2737 static void
2738 ppc64_emit_sub (void)
2739 {
2740 EMIT_ASM ("ldu 4, 8(30) \n"
2741 "sub 3, 4, 3 \n");
2742 }
2743
2744 /* TOP = stack[--sp] * TOP */
2745
2746 static void
2747 ppc64_emit_mul (void)
2748 {
2749 EMIT_ASM ("ldu 4, 8(30) \n"
2750 "mulld 3, 4, 3 \n");
2751 }
2752
2753 /* TOP = stack[--sp] << TOP */
2754
2755 static void
2756 ppc64_emit_lsh (void)
2757 {
2758 EMIT_ASM ("ldu 4, 8(30) \n"
2759 "sld 3, 4, 3 \n");
2760 }
2761
2762 /* Top = stack[--sp] >> TOP
2763 (Arithmetic shift right) */
2764
2765 static void
2766 ppc64_emit_rsh_signed (void)
2767 {
2768 EMIT_ASM ("ldu 4, 8(30) \n"
2769 "srad 3, 4, 3 \n");
2770 }
2771
2772 /* Top = stack[--sp] >> TOP
2773 (Logical shift right) */
2774
2775 static void
2776 ppc64_emit_rsh_unsigned (void)
2777 {
2778 EMIT_ASM ("ldu 4, 8(30) \n"
2779 "srd 3, 4, 3 \n");
2780 }
2781
2782 /* Emit code for signed-extension specified by ARG. */
2783
2784 static void
2785 ppc64_emit_ext (int arg)
2786 {
2787 switch (arg)
2788 {
2789 case 8:
2790 EMIT_ASM ("extsb 3, 3");
2791 break;
2792 case 16:
2793 EMIT_ASM ("extsh 3, 3");
2794 break;
2795 case 32:
2796 EMIT_ASM ("extsw 3, 3");
2797 break;
2798 default:
2799 emit_error = 1;
2800 }
2801 }
2802
2803 /* Emit code for zero-extension specified by ARG. */
2804
2805 static void
2806 ppc64_emit_zero_ext (int arg)
2807 {
2808 switch (arg)
2809 {
2810 case 8:
2811 EMIT_ASM ("rldicl 3,3,0,56");
2812 break;
2813 case 16:
2814 EMIT_ASM ("rldicl 3,3,0,48");
2815 break;
2816 case 32:
2817 EMIT_ASM ("rldicl 3,3,0,32");
2818 break;
2819 default:
2820 emit_error = 1;
2821 }
2822 }
2823
2824 /* TOP = !TOP
2825 i.e., TOP = (TOP == 0) ? 1 : 0; */
2826
2827 static void
2828 ppc64_emit_log_not (void)
2829 {
2830 EMIT_ASM ("cntlzd 3, 3 \n"
2831 "srdi 3, 3, 6 \n");
2832 }
2833
2834 /* TOP = stack[--sp] & TOP */
2835
2836 static void
2837 ppc64_emit_bit_and (void)
2838 {
2839 EMIT_ASM ("ldu 4, 8(30) \n"
2840 "and 3, 4, 3 \n");
2841 }
2842
2843 /* TOP = stack[--sp] | TOP */
2844
2845 static void
2846 ppc64_emit_bit_or (void)
2847 {
2848 EMIT_ASM ("ldu 4, 8(30) \n"
2849 "or 3, 4, 3 \n");
2850 }
2851
2852 /* TOP = stack[--sp] ^ TOP */
2853
2854 static void
2855 ppc64_emit_bit_xor (void)
2856 {
2857 EMIT_ASM ("ldu 4, 8(30) \n"
2858 "xor 3, 4, 3 \n");
2859 }
2860
2861 /* TOP = ~TOP
2862 i.e., TOP = ~(TOP | TOP) */
2863
2864 static void
2865 ppc64_emit_bit_not (void)
2866 {
2867 EMIT_ASM ("nor 3, 3, 3 \n");
2868 }
2869
2870 /* TOP = stack[--sp] == TOP */
2871
2872 static void
2873 ppc64_emit_equal (void)
2874 {
2875 EMIT_ASM ("ldu 4, 8(30) \n"
2876 "xor 3, 3, 4 \n"
2877 "cntlzd 3, 3 \n"
2878 "srdi 3, 3, 6 \n");
2879 }
2880
2881 /* TOP = stack[--sp] < TOP
2882 (Signed comparison) */
2883
2884 static void
2885 ppc64_emit_less_signed (void)
2886 {
2887 EMIT_ASM ("ldu 4, 8(30) \n"
2888 "cmpd 7, 4, 3 \n"
2889 "mfcr 3 \n"
2890 "rlwinm 3, 3, 29, 31, 31 \n");
2891 }
2892
2893 /* TOP = stack[--sp] < TOP
2894 (Unsigned comparison) */
2895
2896 static void
2897 ppc64_emit_less_unsigned (void)
2898 {
2899 EMIT_ASM ("ldu 4, 8(30) \n"
2900 "cmpld 7, 4, 3 \n"
2901 "mfcr 3 \n"
2902 "rlwinm 3, 3, 29, 31, 31 \n");
2903 }
2904
2905 /* Access the memory address in TOP in size of SIZE.
2906 Zero-extend the read value. */
2907
2908 static void
2909 ppc64_emit_ref (int size)
2910 {
2911 switch (size)
2912 {
2913 case 1:
2914 EMIT_ASM ("lbz 3, 0(3)");
2915 break;
2916 case 2:
2917 EMIT_ASM ("lhz 3, 0(3)");
2918 break;
2919 case 4:
2920 EMIT_ASM ("lwz 3, 0(3)");
2921 break;
2922 case 8:
2923 EMIT_ASM ("ld 3, 0(3)");
2924 break;
2925 }
2926 }
2927
2928 /* TOP = NUM */
2929
2930 static void
2931 ppc64_emit_const (LONGEST num)
2932 {
2933 uint32_t buf[5];
2934 uint32_t *p = buf;
2935
2936 p += gen_limm (p, 3, num, 1);
2937
2938 emit_insns (buf, p - buf);
2939 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2940 }
2941
2942 /* Set TOP to the value of register REG by calling get_raw_reg function
2943 with two argument, collected buffer and register number. */
2944
2945 static void
2946 ppc64v1_emit_reg (int reg)
2947 {
2948 uint32_t buf[15];
2949 uint32_t *p = buf;
2950
2951 /* fctx->regs is passed in r3 and then saved in 176(1). */
2952 p += GEN_LD (p, 3, 31, -32);
2953 p += GEN_LI (p, 4, reg);
2954 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2955 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2956 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2957
2958 emit_insns (buf, p - buf);
2959 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2960 }
2961
2962 /* Likewise, for ELFv2. */
2963
2964 static void
2965 ppc64v2_emit_reg (int reg)
2966 {
2967 uint32_t buf[12];
2968 uint32_t *p = buf;
2969
2970 /* fctx->regs is passed in r3 and then saved in 176(1). */
2971 p += GEN_LD (p, 3, 31, -32);
2972 p += GEN_LI (p, 4, reg);
2973 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2974 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2975 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2976
2977 emit_insns (buf, p - buf);
2978 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2979 }
2980
2981 /* TOP = stack[--sp] */
2982
2983 static void
2984 ppc64_emit_pop (void)
2985 {
2986 EMIT_ASM ("ldu 3, 8(30)");
2987 }
2988
2989 /* stack[sp++] = TOP
2990
2991 Because we may use up bytecode stack, expand 8 doublewords more
2992 if needed. */
2993
2994 static void
2995 ppc64_emit_stack_flush (void)
2996 {
2997 /* Make sure bytecode stack is big enough before push.
2998 Otherwise, expand 64-byte more. */
2999
3000 EMIT_ASM (" std 3, 0(30) \n"
3001 " addi 4, 30, -(112 + 8) \n"
3002 " cmpd 7, 4, 1 \n"
3003 " bgt 7, 1f \n"
3004 " stdu 31, -64(1) \n"
3005 "1:addi 30, 30, -8 \n");
3006 }
3007
3008 /* Swap TOP and stack[sp-1] */
3009
3010 static void
3011 ppc64_emit_swap (void)
3012 {
3013 EMIT_ASM ("ld 4, 8(30) \n"
3014 "std 3, 8(30) \n"
3015 "mr 3, 4 \n");
3016 }
3017
3018 /* Call function FN - ELFv1. */
3019
3020 static void
3021 ppc64v1_emit_call (CORE_ADDR fn)
3022 {
3023 uint32_t buf[13];
3024 uint32_t *p = buf;
3025
3026 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3027 p += gen_call (p, fn, 1, 1);
3028 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3029
3030 emit_insns (buf, p - buf);
3031 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3032 }
3033
3034 /* Call function FN - ELFv2. */
3035
3036 static void
3037 ppc64v2_emit_call (CORE_ADDR fn)
3038 {
3039 uint32_t buf[10];
3040 uint32_t *p = buf;
3041
3042 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3043 p += gen_call (p, fn, 1, 0);
3044 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3045
3046 emit_insns (buf, p - buf);
3047 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3048 }
3049
3050 /* FN's prototype is `LONGEST(*fn)(int)'.
3051 TOP = fn (arg1)
3052 */
3053
3054 static void
3055 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3056 {
3057 uint32_t buf[13];
3058 uint32_t *p = buf;
3059
3060 /* Setup argument. arg1 is a 16-bit value. */
3061 p += gen_limm (p, 3, arg1, 1);
3062 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3063 p += gen_call (p, fn, 1, 1);
3064 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3065
3066 emit_insns (buf, p - buf);
3067 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3068 }
3069
3070 /* Likewise for ELFv2. */
3071
3072 static void
3073 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3074 {
3075 uint32_t buf[10];
3076 uint32_t *p = buf;
3077
3078 /* Setup argument. arg1 is a 16-bit value. */
3079 p += gen_limm (p, 3, arg1, 1);
3080 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3081 p += gen_call (p, fn, 1, 0);
3082 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3083
3084 emit_insns (buf, p - buf);
3085 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3086 }
3087
3088 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3089 fn (arg1, TOP)
3090
3091 TOP should be preserved/restored before/after the call. */
3092
3093 static void
3094 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3095 {
3096 uint32_t buf[17];
3097 uint32_t *p = buf;
3098
3099 /* Save TOP. 0(30) is next-empty. */
3100 p += GEN_STD (p, 3, 30, 0);
3101
3102 /* Setup argument. arg1 is a 16-bit value. */
3103 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3104 p += gen_limm (p, 3, arg1, 1);
3105 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3106 p += gen_call (p, fn, 1, 1);
3107 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3108
3109 /* Restore TOP */
3110 p += GEN_LD (p, 3, 30, 0);
3111
3112 emit_insns (buf, p - buf);
3113 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3114 }
3115
3116 /* Likewise for ELFv2. */
3117
3118 static void
3119 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3120 {
3121 uint32_t buf[14];
3122 uint32_t *p = buf;
3123
3124 /* Save TOP. 0(30) is next-empty. */
3125 p += GEN_STD (p, 3, 30, 0);
3126
3127 /* Setup argument. arg1 is a 16-bit value. */
3128 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3129 p += gen_limm (p, 3, arg1, 1);
3130 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3131 p += gen_call (p, fn, 1, 0);
3132 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3133
3134 /* Restore TOP */
3135 p += GEN_LD (p, 3, 30, 0);
3136
3137 emit_insns (buf, p - buf);
3138 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3139 }
3140
3141 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3142
3143 static void
3144 ppc64_emit_if_goto (int *offset_p, int *size_p)
3145 {
3146 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3147 "ldu 3, 8(30) \n"
3148 "1:bne 7, 1b \n");
3149
3150 if (offset_p)
3151 *offset_p = 8;
3152 if (size_p)
3153 *size_p = 14;
3154 }
3155
3156 /* Goto if stack[--sp] == TOP */
3157
3158 static void
3159 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3160 {
3161 EMIT_ASM ("ldu 4, 8(30) \n"
3162 "cmpd 7, 4, 3 \n"
3163 "ldu 3, 8(30) \n"
3164 "1:beq 7, 1b \n");
3165
3166 if (offset_p)
3167 *offset_p = 12;
3168 if (size_p)
3169 *size_p = 14;
3170 }
3171
3172 /* Goto if stack[--sp] != TOP */
3173
3174 static void
3175 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3176 {
3177 EMIT_ASM ("ldu 4, 8(30) \n"
3178 "cmpd 7, 4, 3 \n"
3179 "ldu 3, 8(30) \n"
3180 "1:bne 7, 1b \n");
3181
3182 if (offset_p)
3183 *offset_p = 12;
3184 if (size_p)
3185 *size_p = 14;
3186 }
3187
3188 /* Goto if stack[--sp] < TOP */
3189
3190 static void
3191 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3192 {
3193 EMIT_ASM ("ldu 4, 8(30) \n"
3194 "cmpd 7, 4, 3 \n"
3195 "ldu 3, 8(30) \n"
3196 "1:blt 7, 1b \n");
3197
3198 if (offset_p)
3199 *offset_p = 12;
3200 if (size_p)
3201 *size_p = 14;
3202 }
3203
3204 /* Goto if stack[--sp] <= TOP */
3205
3206 static void
3207 ppc64_emit_le_goto (int *offset_p, int *size_p)
3208 {
3209 EMIT_ASM ("ldu 4, 8(30) \n"
3210 "cmpd 7, 4, 3 \n"
3211 "ldu 3, 8(30) \n"
3212 "1:ble 7, 1b \n");
3213
3214 if (offset_p)
3215 *offset_p = 12;
3216 if (size_p)
3217 *size_p = 14;
3218 }
3219
3220 /* Goto if stack[--sp] > TOP */
3221
3222 static void
3223 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3224 {
3225 EMIT_ASM ("ldu 4, 8(30) \n"
3226 "cmpd 7, 4, 3 \n"
3227 "ldu 3, 8(30) \n"
3228 "1:bgt 7, 1b \n");
3229
3230 if (offset_p)
3231 *offset_p = 12;
3232 if (size_p)
3233 *size_p = 14;
3234 }
3235
3236 /* Goto if stack[--sp] >= TOP */
3237
3238 static void
3239 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3240 {
3241 EMIT_ASM ("ldu 4, 8(30) \n"
3242 "cmpd 7, 4, 3 \n"
3243 "ldu 3, 8(30) \n"
3244 "1:bge 7, 1b \n");
3245
3246 if (offset_p)
3247 *offset_p = 12;
3248 if (size_p)
3249 *size_p = 14;
3250 }
3251
3252 /* Table of emit ops for 64-bit ELFv1. */
3253
3254 static struct emit_ops ppc64v1_emit_ops_impl =
3255 {
3256 ppc64v1_emit_prologue,
3257 ppc64_emit_epilogue,
3258 ppc64_emit_add,
3259 ppc64_emit_sub,
3260 ppc64_emit_mul,
3261 ppc64_emit_lsh,
3262 ppc64_emit_rsh_signed,
3263 ppc64_emit_rsh_unsigned,
3264 ppc64_emit_ext,
3265 ppc64_emit_log_not,
3266 ppc64_emit_bit_and,
3267 ppc64_emit_bit_or,
3268 ppc64_emit_bit_xor,
3269 ppc64_emit_bit_not,
3270 ppc64_emit_equal,
3271 ppc64_emit_less_signed,
3272 ppc64_emit_less_unsigned,
3273 ppc64_emit_ref,
3274 ppc64_emit_if_goto,
3275 ppc_emit_goto,
3276 ppc_write_goto_address,
3277 ppc64_emit_const,
3278 ppc64v1_emit_call,
3279 ppc64v1_emit_reg,
3280 ppc64_emit_pop,
3281 ppc64_emit_stack_flush,
3282 ppc64_emit_zero_ext,
3283 ppc64_emit_swap,
3284 ppc_emit_stack_adjust,
3285 ppc64v1_emit_int_call_1,
3286 ppc64v1_emit_void_call_2,
3287 ppc64_emit_eq_goto,
3288 ppc64_emit_ne_goto,
3289 ppc64_emit_lt_goto,
3290 ppc64_emit_le_goto,
3291 ppc64_emit_gt_goto,
3292 ppc64_emit_ge_goto
3293 };
3294
3295 /* Table of emit ops for 64-bit ELFv2. */
3296
3297 static struct emit_ops ppc64v2_emit_ops_impl =
3298 {
3299 ppc64v2_emit_prologue,
3300 ppc64_emit_epilogue,
3301 ppc64_emit_add,
3302 ppc64_emit_sub,
3303 ppc64_emit_mul,
3304 ppc64_emit_lsh,
3305 ppc64_emit_rsh_signed,
3306 ppc64_emit_rsh_unsigned,
3307 ppc64_emit_ext,
3308 ppc64_emit_log_not,
3309 ppc64_emit_bit_and,
3310 ppc64_emit_bit_or,
3311 ppc64_emit_bit_xor,
3312 ppc64_emit_bit_not,
3313 ppc64_emit_equal,
3314 ppc64_emit_less_signed,
3315 ppc64_emit_less_unsigned,
3316 ppc64_emit_ref,
3317 ppc64_emit_if_goto,
3318 ppc_emit_goto,
3319 ppc_write_goto_address,
3320 ppc64_emit_const,
3321 ppc64v2_emit_call,
3322 ppc64v2_emit_reg,
3323 ppc64_emit_pop,
3324 ppc64_emit_stack_flush,
3325 ppc64_emit_zero_ext,
3326 ppc64_emit_swap,
3327 ppc_emit_stack_adjust,
3328 ppc64v2_emit_int_call_1,
3329 ppc64v2_emit_void_call_2,
3330 ppc64_emit_eq_goto,
3331 ppc64_emit_ne_goto,
3332 ppc64_emit_lt_goto,
3333 ppc64_emit_le_goto,
3334 ppc64_emit_gt_goto,
3335 ppc64_emit_ge_goto
3336 };
3337
3338 #endif
3339
3340 /* Implementation of linux_target_ops method "emit_ops". */
3341
3342 static struct emit_ops *
3343 ppc_emit_ops (void)
3344 {
3345 #ifdef __powerpc64__
3346 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3347
3348 if (register_size (regcache->tdesc, 0) == 8)
3349 {
3350 if (is_elfv2_inferior ())
3351 return &ppc64v2_emit_ops_impl;
3352 else
3353 return &ppc64v1_emit_ops_impl;
3354 }
3355 #endif
3356 return &ppc_emit_ops_impl;
3357 }
3358
3359 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3360
3361 static int
3362 ppc_get_ipa_tdesc_idx (void)
3363 {
3364 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3365 const struct target_desc *tdesc = regcache->tdesc;
3366
3367 #ifdef __powerpc64__
3368 if (tdesc == tdesc_powerpc_64l)
3369 return PPC_TDESC_BASE;
3370 if (tdesc == tdesc_powerpc_altivec64l)
3371 return PPC_TDESC_ALTIVEC;
3372 if (tdesc == tdesc_powerpc_vsx64l)
3373 return PPC_TDESC_VSX;
3374 if (tdesc == tdesc_powerpc_isa205_64l)
3375 return PPC_TDESC_ISA205;
3376 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3377 return PPC_TDESC_ISA205_ALTIVEC;
3378 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3379 return PPC_TDESC_ISA205_VSX;
3380 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3381 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3382 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3383 return PPC_TDESC_ISA207_VSX;
3384 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3385 return PPC_TDESC_ISA207_HTM_VSX;
3386 #endif
3387
3388 if (tdesc == tdesc_powerpc_32l)
3389 return PPC_TDESC_BASE;
3390 if (tdesc == tdesc_powerpc_altivec32l)
3391 return PPC_TDESC_ALTIVEC;
3392 if (tdesc == tdesc_powerpc_vsx32l)
3393 return PPC_TDESC_VSX;
3394 if (tdesc == tdesc_powerpc_isa205_32l)
3395 return PPC_TDESC_ISA205;
3396 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3397 return PPC_TDESC_ISA205_ALTIVEC;
3398 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3399 return PPC_TDESC_ISA205_VSX;
3400 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3401 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3402 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3403 return PPC_TDESC_ISA207_VSX;
3404 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3405 return PPC_TDESC_ISA207_HTM_VSX;
3406 if (tdesc == tdesc_powerpc_e500l)
3407 return PPC_TDESC_E500;
3408
3409 return 0;
3410 }
3411
3412 struct linux_target_ops the_low_target = {
3413 ppc_insert_point,
3414 ppc_remove_point,
3415 NULL,
3416 NULL,
3417 ppc_collect_ptrace_register,
3418 ppc_supply_ptrace_register,
3419 NULL, /* siginfo_fixup */
3420 NULL, /* new_process */
3421 NULL, /* delete_process */
3422 NULL, /* new_thread */
3423 NULL, /* delete_thread */
3424 NULL, /* new_fork */
3425 NULL, /* prepare_to_resume */
3426 NULL, /* process_qsupported */
3427 ppc_supports_tracepoints,
3428 ppc_get_thread_area,
3429 ppc_install_fast_tracepoint_jump_pad,
3430 ppc_emit_ops,
3431 ppc_get_min_fast_tracepoint_insn_len,
3432 NULL, /* supports_range_stepping */
3433 ppc_supports_hardware_single_step,
3434 NULL, /* get_syscall_trapinfo */
3435 ppc_get_ipa_tdesc_idx,
3436 };
3437
3438 /* The linux target ops object. */
3439
3440 linux_process_target *the_linux_target = &the_ppc_target;
3441
3442 void
3443 initialize_low_arch (void)
3444 {
3445 /* Initialize the Linux target descriptions. */
3446
3447 init_registers_powerpc_32l ();
3448 init_registers_powerpc_altivec32l ();
3449 init_registers_powerpc_vsx32l ();
3450 init_registers_powerpc_isa205_32l ();
3451 init_registers_powerpc_isa205_altivec32l ();
3452 init_registers_powerpc_isa205_vsx32l ();
3453 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3454 init_registers_powerpc_isa207_vsx32l ();
3455 init_registers_powerpc_isa207_htm_vsx32l ();
3456 init_registers_powerpc_e500l ();
3457 #if __powerpc64__
3458 init_registers_powerpc_64l ();
3459 init_registers_powerpc_altivec64l ();
3460 init_registers_powerpc_vsx64l ();
3461 init_registers_powerpc_isa205_64l ();
3462 init_registers_powerpc_isa205_altivec64l ();
3463 init_registers_powerpc_isa205_vsx64l ();
3464 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3465 init_registers_powerpc_isa207_vsx64l ();
3466 init_registers_powerpc_isa207_htm_vsx64l ();
3467 #endif
3468
3469 initialize_regsets_info (&ppc_regsets_info);
3470 }
This page took 0.111918 seconds and 4 git commands to generate.