gdbserver/linux-low: turn 'breakpoint_kind_from_{pc, current_state}' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 protected:
56
57 void low_arch_setup () override;
58
59 bool low_cannot_fetch_register (int regno) override;
60
61 bool low_cannot_store_register (int regno) override;
62
63 bool low_supports_breakpoints () override;
64
65 CORE_ADDR low_get_pc (regcache *regcache) override;
66
67 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
68 };
69
70 /* The singleton target ops object. */
71
72 static ppc_target the_ppc_target;
73
74 /* Holds the AT_HWCAP auxv entry. */
75
76 static unsigned long ppc_hwcap;
77
78 /* Holds the AT_HWCAP2 auxv entry. */
79
80 static unsigned long ppc_hwcap2;
81
82
83 #define ppc_num_regs 73
84
85 #ifdef __powerpc64__
86 /* We use a constant for FPSCR instead of PT_FPSCR, because
87 many shipped PPC64 kernels had the wrong value in ptrace.h. */
88 static int ppc_regmap[] =
89 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
90 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
91 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
92 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
93 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
94 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
95 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
96 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
97 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
98 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
99 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
100 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
101 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
102 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
103 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
104 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
105 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
106 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
107 PT_ORIG_R3 * 8, PT_TRAP * 8 };
108 #else
109 /* Currently, don't check/send MQ. */
110 static int ppc_regmap[] =
111 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
112 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
113 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
114 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
115 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
116 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
117 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
118 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
119 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
120 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
121 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
122 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
123 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
124 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
125 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
126 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
127 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
128 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
129 PT_ORIG_R3 * 4, PT_TRAP * 4
130 };
131
132 static int ppc_regmap_e500[] =
133 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
134 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
135 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
136 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
137 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
138 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
139 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
140 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
141 -1, -1, -1, -1,
142 -1, -1, -1, -1,
143 -1, -1, -1, -1,
144 -1, -1, -1, -1,
145 -1, -1, -1, -1,
146 -1, -1, -1, -1,
147 -1, -1, -1, -1,
148 -1, -1, -1, -1,
149 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
150 PT_CTR * 4, PT_XER * 4, -1,
151 PT_ORIG_R3 * 4, PT_TRAP * 4
152 };
153 #endif
154
155 /* Check whether the kernel provides a register set with number
156 REGSET_ID of size REGSETSIZE for process/thread TID. */
157
158 static int
159 ppc_check_regset (int tid, int regset_id, int regsetsize)
160 {
161 void *buf = alloca (regsetsize);
162 struct iovec iov;
163
164 iov.iov_base = buf;
165 iov.iov_len = regsetsize;
166
167 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
168 || errno == ENODATA)
169 return 1;
170 return 0;
171 }
172
173 bool
174 ppc_target::low_cannot_store_register (int regno)
175 {
176 const struct target_desc *tdesc = current_process ()->tdesc;
177
178 #ifndef __powerpc64__
179 /* Some kernels do not allow us to store fpscr. */
180 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
181 && regno == find_regno (tdesc, "fpscr"))
182 return true;
183 #endif
184
185 /* Some kernels do not allow us to store orig_r3 or trap. */
186 if (regno == find_regno (tdesc, "orig_r3")
187 || regno == find_regno (tdesc, "trap"))
188 return true;
189
190 return false;
191 }
192
193 bool
194 ppc_target::low_cannot_fetch_register (int regno)
195 {
196 return false;
197 }
198
199 static void
200 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
201 {
202 memset (buf, 0, sizeof (long));
203
204 if (__BYTE_ORDER == __LITTLE_ENDIAN)
205 {
206 /* Little-endian values always sit at the left end of the buffer. */
207 collect_register (regcache, regno, buf);
208 }
209 else if (__BYTE_ORDER == __BIG_ENDIAN)
210 {
211 /* Big-endian values sit at the right end of the buffer. In case of
212 registers whose sizes are smaller than sizeof (long), we must use a
213 padding to access them correctly. */
214 int size = register_size (regcache->tdesc, regno);
215
216 if (size < sizeof (long))
217 collect_register (regcache, regno, buf + sizeof (long) - size);
218 else
219 collect_register (regcache, regno, buf);
220 }
221 else
222 perror_with_name ("Unexpected byte order");
223 }
224
225 static void
226 ppc_supply_ptrace_register (struct regcache *regcache,
227 int regno, const char *buf)
228 {
229 if (__BYTE_ORDER == __LITTLE_ENDIAN)
230 {
231 /* Little-endian values always sit at the left end of the buffer. */
232 supply_register (regcache, regno, buf);
233 }
234 else if (__BYTE_ORDER == __BIG_ENDIAN)
235 {
236 /* Big-endian values sit at the right end of the buffer. In case of
237 registers whose sizes are smaller than sizeof (long), we must use a
238 padding to access them correctly. */
239 int size = register_size (regcache->tdesc, regno);
240
241 if (size < sizeof (long))
242 supply_register (regcache, regno, buf + sizeof (long) - size);
243 else
244 supply_register (regcache, regno, buf);
245 }
246 else
247 perror_with_name ("Unexpected byte order");
248 }
249
250 bool
251 ppc_target::low_supports_breakpoints ()
252 {
253 return true;
254 }
255
256 CORE_ADDR
257 ppc_target::low_get_pc (regcache *regcache)
258 {
259 if (register_size (regcache->tdesc, 0) == 4)
260 {
261 unsigned int pc;
262 collect_register_by_name (regcache, "pc", &pc);
263 return (CORE_ADDR) pc;
264 }
265 else
266 {
267 unsigned long pc;
268 collect_register_by_name (regcache, "pc", &pc);
269 return (CORE_ADDR) pc;
270 }
271 }
272
273 void
274 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
275 {
276 if (register_size (regcache->tdesc, 0) == 4)
277 {
278 unsigned int newpc = pc;
279 supply_register_by_name (regcache, "pc", &newpc);
280 }
281 else
282 {
283 unsigned long newpc = pc;
284 supply_register_by_name (regcache, "pc", &newpc);
285 }
286 }
287
288 #ifndef __powerpc64__
289 static int ppc_regmap_adjusted;
290 #endif
291
292
293 /* Correct in either endianness.
294 This instruction is "twge r2, r2", which GDB uses as a software
295 breakpoint. */
296 static const unsigned int ppc_breakpoint = 0x7d821008;
297 #define ppc_breakpoint_len 4
298
299 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
300
301 static const gdb_byte *
302 ppc_sw_breakpoint_from_kind (int kind, int *size)
303 {
304 *size = ppc_breakpoint_len;
305 return (const gdb_byte *) &ppc_breakpoint;
306 }
307
308 static int
309 ppc_breakpoint_at (CORE_ADDR where)
310 {
311 unsigned int insn;
312
313 the_target->read_memory (where, (unsigned char *) &insn, 4);
314 if (insn == ppc_breakpoint)
315 return 1;
316 /* If necessary, recognize more trap instructions here. GDB only uses
317 the one. */
318
319 return 0;
320 }
321
322 /* Implement supports_z_point_type target-ops.
323 Returns true if type Z_TYPE breakpoint is supported.
324
325 Handling software breakpoint at server side, so tracepoints
326 and breakpoints can be inserted at the same location. */
327
328 static int
329 ppc_supports_z_point_type (char z_type)
330 {
331 switch (z_type)
332 {
333 case Z_PACKET_SW_BP:
334 return 1;
335 case Z_PACKET_HW_BP:
336 case Z_PACKET_WRITE_WP:
337 case Z_PACKET_ACCESS_WP:
338 default:
339 return 0;
340 }
341 }
342
343 /* Implement insert_point target-ops.
344 Returns 0 on success, -1 on failure and 1 on unsupported. */
345
346 static int
347 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
348 int size, struct raw_breakpoint *bp)
349 {
350 switch (type)
351 {
352 case raw_bkpt_type_sw:
353 return insert_memory_breakpoint (bp);
354
355 case raw_bkpt_type_hw:
356 case raw_bkpt_type_write_wp:
357 case raw_bkpt_type_access_wp:
358 default:
359 /* Unsupported. */
360 return 1;
361 }
362 }
363
364 /* Implement remove_point target-ops.
365 Returns 0 on success, -1 on failure and 1 on unsupported. */
366
367 static int
368 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
369 int size, struct raw_breakpoint *bp)
370 {
371 switch (type)
372 {
373 case raw_bkpt_type_sw:
374 return remove_memory_breakpoint (bp);
375
376 case raw_bkpt_type_hw:
377 case raw_bkpt_type_write_wp:
378 case raw_bkpt_type_access_wp:
379 default:
380 /* Unsupported. */
381 return 1;
382 }
383 }
384
385 /* Provide only a fill function for the general register set. ps_lgetregs
386 will use this for NPTL support. */
387
388 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
389 {
390 int i;
391
392 for (i = 0; i < 32; i++)
393 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
394
395 for (i = 64; i < 70; i++)
396 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
397
398 for (i = 71; i < 73; i++)
399 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
400 }
401
402 /* Program Priority Register regset fill function. */
403
404 static void
405 ppc_fill_pprregset (struct regcache *regcache, void *buf)
406 {
407 char *ppr = (char *) buf;
408
409 collect_register_by_name (regcache, "ppr", ppr);
410 }
411
412 /* Program Priority Register regset store function. */
413
414 static void
415 ppc_store_pprregset (struct regcache *regcache, const void *buf)
416 {
417 const char *ppr = (const char *) buf;
418
419 supply_register_by_name (regcache, "ppr", ppr);
420 }
421
422 /* Data Stream Control Register regset fill function. */
423
424 static void
425 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
426 {
427 char *dscr = (char *) buf;
428
429 collect_register_by_name (regcache, "dscr", dscr);
430 }
431
432 /* Data Stream Control Register regset store function. */
433
434 static void
435 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
436 {
437 const char *dscr = (const char *) buf;
438
439 supply_register_by_name (regcache, "dscr", dscr);
440 }
441
442 /* Target Address Register regset fill function. */
443
444 static void
445 ppc_fill_tarregset (struct regcache *regcache, void *buf)
446 {
447 char *tar = (char *) buf;
448
449 collect_register_by_name (regcache, "tar", tar);
450 }
451
452 /* Target Address Register regset store function. */
453
454 static void
455 ppc_store_tarregset (struct regcache *regcache, const void *buf)
456 {
457 const char *tar = (const char *) buf;
458
459 supply_register_by_name (regcache, "tar", tar);
460 }
461
462 /* Event-Based Branching regset store function. Unless the inferior
463 has a perf event open, ptrace can return in error when reading and
464 writing to the regset, with ENODATA. For reading, the registers
465 will correctly show as unavailable. For writing, gdbserver
466 currently only caches any register writes from P and G packets and
467 the stub always tries to write all the regsets when resuming the
468 inferior, which would result in frequent warnings. For this
469 reason, we don't define a fill function. This also means that the
470 client-side regcache will be dirty if the user tries to write to
471 the EBB registers. G packets that the client sends to write to
472 unrelated registers will also include data for EBB registers, even
473 if they are unavailable. */
474
475 static void
476 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
477 {
478 const char *regset = (const char *) buf;
479
480 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
481 .dat file is BESCR, EBBHR, EBBRR. */
482 supply_register_by_name (regcache, "ebbrr", &regset[0]);
483 supply_register_by_name (regcache, "ebbhr", &regset[8]);
484 supply_register_by_name (regcache, "bescr", &regset[16]);
485 }
486
487 /* Performance Monitoring Unit regset fill function. */
488
489 static void
490 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
491 {
492 char *regset = (char *) buf;
493
494 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
495 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
496 collect_register_by_name (regcache, "siar", &regset[0]);
497 collect_register_by_name (regcache, "sdar", &regset[8]);
498 collect_register_by_name (regcache, "sier", &regset[16]);
499 collect_register_by_name (regcache, "mmcr2", &regset[24]);
500 collect_register_by_name (regcache, "mmcr0", &regset[32]);
501 }
502
503 /* Performance Monitoring Unit regset store function. */
504
505 static void
506 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
507 {
508 const char *regset = (const char *) buf;
509
510 supply_register_by_name (regcache, "siar", &regset[0]);
511 supply_register_by_name (regcache, "sdar", &regset[8]);
512 supply_register_by_name (regcache, "sier", &regset[16]);
513 supply_register_by_name (regcache, "mmcr2", &regset[24]);
514 supply_register_by_name (regcache, "mmcr0", &regset[32]);
515 }
516
517 /* Hardware Transactional Memory special-purpose register regset fill
518 function. */
519
520 static void
521 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
522 {
523 int i, base;
524 char *regset = (char *) buf;
525
526 base = find_regno (regcache->tdesc, "tfhar");
527 for (i = 0; i < 3; i++)
528 collect_register (regcache, base + i, &regset[i * 8]);
529 }
530
531 /* Hardware Transactional Memory special-purpose register regset store
532 function. */
533
534 static void
535 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
536 {
537 int i, base;
538 const char *regset = (const char *) buf;
539
540 base = find_regno (regcache->tdesc, "tfhar");
541 for (i = 0; i < 3; i++)
542 supply_register (regcache, base + i, &regset[i * 8]);
543 }
544
545 /* For the same reasons as the EBB regset, none of the HTM
546 checkpointed regsets have a fill function. These registers are
547 only available if the inferior is in a transaction. */
548
549 /* Hardware Transactional Memory checkpointed general-purpose regset
550 store function. */
551
552 static void
553 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
554 {
555 int i, base, size, endian_offset;
556 const char *regset = (const char *) buf;
557
558 base = find_regno (regcache->tdesc, "cr0");
559 size = register_size (regcache->tdesc, base);
560
561 gdb_assert (size == 4 || size == 8);
562
563 for (i = 0; i < 32; i++)
564 supply_register (regcache, base + i, &regset[i * size]);
565
566 endian_offset = 0;
567
568 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
569 endian_offset = 4;
570
571 supply_register_by_name (regcache, "ccr",
572 &regset[PT_CCR * size + endian_offset]);
573
574 supply_register_by_name (regcache, "cxer",
575 &regset[PT_XER * size + endian_offset]);
576
577 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
578 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
579 }
580
581 /* Hardware Transactional Memory checkpointed floating-point regset
582 store function. */
583
584 static void
585 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
586 {
587 int i, base;
588 const char *regset = (const char *) buf;
589
590 base = find_regno (regcache->tdesc, "cf0");
591
592 for (i = 0; i < 32; i++)
593 supply_register (regcache, base + i, &regset[i * 8]);
594
595 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
596 }
597
598 /* Hardware Transactional Memory checkpointed vector regset store
599 function. */
600
601 static void
602 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
603 {
604 int i, base;
605 const char *regset = (const char *) buf;
606 int vscr_offset = 0;
607
608 base = find_regno (regcache->tdesc, "cvr0");
609
610 for (i = 0; i < 32; i++)
611 supply_register (regcache, base + i, &regset[i * 16]);
612
613 if (__BYTE_ORDER == __BIG_ENDIAN)
614 vscr_offset = 12;
615
616 supply_register_by_name (regcache, "cvscr",
617 &regset[32 * 16 + vscr_offset]);
618
619 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
620 }
621
622 /* Hardware Transactional Memory checkpointed vector-scalar regset
623 store function. */
624
625 static void
626 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
627 {
628 int i, base;
629 const char *regset = (const char *) buf;
630
631 base = find_regno (regcache->tdesc, "cvs0h");
632 for (i = 0; i < 32; i++)
633 supply_register (regcache, base + i, &regset[i * 8]);
634 }
635
636 /* Hardware Transactional Memory checkpointed Program Priority
637 Register regset store function. */
638
639 static void
640 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
641 {
642 const char *cppr = (const char *) buf;
643
644 supply_register_by_name (regcache, "cppr", cppr);
645 }
646
647 /* Hardware Transactional Memory checkpointed Data Stream Control
648 Register regset store function. */
649
650 static void
651 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
652 {
653 const char *cdscr = (const char *) buf;
654
655 supply_register_by_name (regcache, "cdscr", cdscr);
656 }
657
658 /* Hardware Transactional Memory checkpointed Target Address Register
659 regset store function. */
660
661 static void
662 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
663 {
664 const char *ctar = (const char *) buf;
665
666 supply_register_by_name (regcache, "ctar", ctar);
667 }
668
669 static void
670 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
671 {
672 int i, base;
673 char *regset = (char *) buf;
674
675 base = find_regno (regcache->tdesc, "vs0h");
676 for (i = 0; i < 32; i++)
677 collect_register (regcache, base + i, &regset[i * 8]);
678 }
679
680 static void
681 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
682 {
683 int i, base;
684 const char *regset = (const char *) buf;
685
686 base = find_regno (regcache->tdesc, "vs0h");
687 for (i = 0; i < 32; i++)
688 supply_register (regcache, base + i, &regset[i * 8]);
689 }
690
691 static void
692 ppc_fill_vrregset (struct regcache *regcache, void *buf)
693 {
694 int i, base;
695 char *regset = (char *) buf;
696 int vscr_offset = 0;
697
698 base = find_regno (regcache->tdesc, "vr0");
699 for (i = 0; i < 32; i++)
700 collect_register (regcache, base + i, &regset[i * 16]);
701
702 if (__BYTE_ORDER == __BIG_ENDIAN)
703 vscr_offset = 12;
704
705 collect_register_by_name (regcache, "vscr",
706 &regset[32 * 16 + vscr_offset]);
707
708 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
709 }
710
711 static void
712 ppc_store_vrregset (struct regcache *regcache, const void *buf)
713 {
714 int i, base;
715 const char *regset = (const char *) buf;
716 int vscr_offset = 0;
717
718 base = find_regno (regcache->tdesc, "vr0");
719 for (i = 0; i < 32; i++)
720 supply_register (regcache, base + i, &regset[i * 16]);
721
722 if (__BYTE_ORDER == __BIG_ENDIAN)
723 vscr_offset = 12;
724
725 supply_register_by_name (regcache, "vscr",
726 &regset[32 * 16 + vscr_offset]);
727 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
728 }
729
730 struct gdb_evrregset_t
731 {
732 unsigned long evr[32];
733 unsigned long long acc;
734 unsigned long spefscr;
735 };
736
737 static void
738 ppc_fill_evrregset (struct regcache *regcache, void *buf)
739 {
740 int i, ev0;
741 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
742
743 ev0 = find_regno (regcache->tdesc, "ev0h");
744 for (i = 0; i < 32; i++)
745 collect_register (regcache, ev0 + i, &regset->evr[i]);
746
747 collect_register_by_name (regcache, "acc", &regset->acc);
748 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
749 }
750
751 static void
752 ppc_store_evrregset (struct regcache *regcache, const void *buf)
753 {
754 int i, ev0;
755 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
756
757 ev0 = find_regno (regcache->tdesc, "ev0h");
758 for (i = 0; i < 32; i++)
759 supply_register (regcache, ev0 + i, &regset->evr[i]);
760
761 supply_register_by_name (regcache, "acc", &regset->acc);
762 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
763 }
764
765 /* Support for hardware single step. */
766
767 static int
768 ppc_supports_hardware_single_step (void)
769 {
770 return 1;
771 }
772
773 static struct regset_info ppc_regsets[] = {
774 /* List the extra register sets before GENERAL_REGS. That way we will
775 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
776 general registers. Some kernels support these, but not the newer
777 PPC_PTRACE_GETREGS. */
778 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
779 NULL, ppc_store_tm_ctarregset },
780 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
781 NULL, ppc_store_tm_cdscrregset },
782 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
783 NULL, ppc_store_tm_cpprregset },
784 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
785 NULL, ppc_store_tm_cvsxregset },
786 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
787 NULL, ppc_store_tm_cvrregset },
788 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
789 NULL, ppc_store_tm_cfprregset },
790 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
791 NULL, ppc_store_tm_cgprregset },
792 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
793 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
794 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
795 NULL, ppc_store_ebbregset },
796 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
797 ppc_fill_pmuregset, ppc_store_pmuregset },
798 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
799 ppc_fill_tarregset, ppc_store_tarregset },
800 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
801 ppc_fill_pprregset, ppc_store_pprregset },
802 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
803 ppc_fill_dscrregset, ppc_store_dscrregset },
804 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
805 ppc_fill_vsxregset, ppc_store_vsxregset },
806 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
807 ppc_fill_vrregset, ppc_store_vrregset },
808 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
809 ppc_fill_evrregset, ppc_store_evrregset },
810 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
811 NULL_REGSET
812 };
813
814 static struct usrregs_info ppc_usrregs_info =
815 {
816 ppc_num_regs,
817 ppc_regmap,
818 };
819
820 static struct regsets_info ppc_regsets_info =
821 {
822 ppc_regsets, /* regsets */
823 0, /* num_regsets */
824 NULL, /* disabled_regsets */
825 };
826
827 static struct regs_info myregs_info =
828 {
829 NULL, /* regset_bitmap */
830 &ppc_usrregs_info,
831 &ppc_regsets_info
832 };
833
834 const regs_info *
835 ppc_target::get_regs_info ()
836 {
837 return &myregs_info;
838 }
839
840 void
841 ppc_target::low_arch_setup ()
842 {
843 const struct target_desc *tdesc;
844 struct regset_info *regset;
845 struct ppc_linux_features features = ppc_linux_no_features;
846
847 int tid = lwpid_of (current_thread);
848
849 features.wordsize = ppc_linux_target_wordsize (tid);
850
851 if (features.wordsize == 4)
852 tdesc = tdesc_powerpc_32l;
853 else
854 tdesc = tdesc_powerpc_64l;
855
856 current_process ()->tdesc = tdesc;
857
858 /* The value of current_process ()->tdesc needs to be set for this
859 call. */
860 ppc_hwcap = linux_get_hwcap (features.wordsize);
861 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
862
863 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
864
865 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
866 features.vsx = true;
867
868 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
869 features.altivec = true;
870
871 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
872 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
873 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
874 {
875 features.ppr_dscr = true;
876 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
877 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
878 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
879 && ppc_check_regset (tid, NT_PPC_TAR,
880 PPC_LINUX_SIZEOF_TARREGSET)
881 && ppc_check_regset (tid, NT_PPC_EBB,
882 PPC_LINUX_SIZEOF_EBBREGSET)
883 && ppc_check_regset (tid, NT_PPC_PMU,
884 PPC_LINUX_SIZEOF_PMUREGSET))
885 {
886 features.isa207 = true;
887 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
888 && ppc_check_regset (tid, NT_PPC_TM_SPR,
889 PPC_LINUX_SIZEOF_TM_SPRREGSET))
890 features.htm = true;
891 }
892 }
893
894 tdesc = ppc_linux_match_description (features);
895
896 /* On 32-bit machines, check for SPE registers.
897 Set the low target's regmap field as appropriately. */
898 #ifndef __powerpc64__
899 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
900 tdesc = tdesc_powerpc_e500l;
901
902 if (!ppc_regmap_adjusted)
903 {
904 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
905 ppc_usrregs_info.regmap = ppc_regmap_e500;
906
907 /* If the FPSCR is 64-bit wide, we need to fetch the whole
908 64-bit slot and not just its second word. The PT_FPSCR
909 supplied in a 32-bit GDB compilation doesn't reflect
910 this. */
911 if (register_size (tdesc, 70) == 8)
912 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
913
914 ppc_regmap_adjusted = 1;
915 }
916 #endif
917
918 current_process ()->tdesc = tdesc;
919
920 for (regset = ppc_regsets; regset->size >= 0; regset++)
921 switch (regset->get_request)
922 {
923 case PTRACE_GETVRREGS:
924 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
925 break;
926 case PTRACE_GETVSXREGS:
927 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
928 break;
929 case PTRACE_GETEVRREGS:
930 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
931 regset->size = 32 * 4 + 8 + 4;
932 else
933 regset->size = 0;
934 break;
935 case PTRACE_GETREGSET:
936 switch (regset->nt_type)
937 {
938 case NT_PPC_PPR:
939 regset->size = (features.ppr_dscr ?
940 PPC_LINUX_SIZEOF_PPRREGSET : 0);
941 break;
942 case NT_PPC_DSCR:
943 regset->size = (features.ppr_dscr ?
944 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
945 break;
946 case NT_PPC_TAR:
947 regset->size = (features.isa207 ?
948 PPC_LINUX_SIZEOF_TARREGSET : 0);
949 break;
950 case NT_PPC_EBB:
951 regset->size = (features.isa207 ?
952 PPC_LINUX_SIZEOF_EBBREGSET : 0);
953 break;
954 case NT_PPC_PMU:
955 regset->size = (features.isa207 ?
956 PPC_LINUX_SIZEOF_PMUREGSET : 0);
957 break;
958 case NT_PPC_TM_SPR:
959 regset->size = (features.htm ?
960 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
961 break;
962 case NT_PPC_TM_CGPR:
963 if (features.wordsize == 4)
964 regset->size = (features.htm ?
965 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
966 else
967 regset->size = (features.htm ?
968 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
969 break;
970 case NT_PPC_TM_CFPR:
971 regset->size = (features.htm ?
972 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
973 break;
974 case NT_PPC_TM_CVMX:
975 regset->size = (features.htm ?
976 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
977 break;
978 case NT_PPC_TM_CVSX:
979 regset->size = (features.htm ?
980 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
981 break;
982 case NT_PPC_TM_CPPR:
983 regset->size = (features.htm ?
984 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
985 break;
986 case NT_PPC_TM_CDSCR:
987 regset->size = (features.htm ?
988 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
989 break;
990 case NT_PPC_TM_CTAR:
991 regset->size = (features.htm ?
992 PPC_LINUX_SIZEOF_CTARREGSET : 0);
993 break;
994 default:
995 break;
996 }
997 break;
998 default:
999 break;
1000 }
1001 }
1002
1003 /* Implementation of linux_target_ops method "supports_tracepoints". */
1004
1005 static int
1006 ppc_supports_tracepoints (void)
1007 {
1008 return 1;
1009 }
1010
1011 /* Get the thread area address. This is used to recognize which
1012 thread is which when tracing with the in-process agent library. We
1013 don't read anything from the address, and treat it as opaque; it's
1014 the address itself that we assume is unique per-thread. */
1015
1016 static int
1017 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1018 {
1019 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1020 struct thread_info *thr = get_lwp_thread (lwp);
1021 struct regcache *regcache = get_thread_regcache (thr, 1);
1022 ULONGEST tp = 0;
1023
1024 #ifdef __powerpc64__
1025 if (register_size (regcache->tdesc, 0) == 8)
1026 collect_register_by_name (regcache, "r13", &tp);
1027 else
1028 #endif
1029 collect_register_by_name (regcache, "r2", &tp);
1030
1031 *addr = tp;
1032
1033 return 0;
1034 }
1035
1036 #ifdef __powerpc64__
1037
1038 /* Older glibc doesn't provide this. */
1039
1040 #ifndef EF_PPC64_ABI
1041 #define EF_PPC64_ABI 3
1042 #endif
1043
1044 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1045 inferiors. */
1046
1047 static int
1048 is_elfv2_inferior (void)
1049 {
1050 /* To be used as fallback if we're unable to determine the right result -
1051 assume inferior uses the same ABI as gdbserver. */
1052 #if _CALL_ELF == 2
1053 const int def_res = 1;
1054 #else
1055 const int def_res = 0;
1056 #endif
1057 CORE_ADDR phdr;
1058 Elf64_Ehdr ehdr;
1059
1060 const struct target_desc *tdesc = current_process ()->tdesc;
1061 int wordsize = register_size (tdesc, 0);
1062
1063 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1064 return def_res;
1065
1066 /* Assume ELF header is at the beginning of the page where program headers
1067 are located. If it doesn't look like one, bail. */
1068
1069 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1070 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1071 return def_res;
1072
1073 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1074 }
1075
1076 #endif
1077
1078 /* Generate a ds-form instruction in BUF and return the number of bytes written
1079
1080 0 6 11 16 30 32
1081 | OPCD | RST | RA | DS |XO| */
1082
1083 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1084 static int
1085 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1086 {
1087 uint32_t insn;
1088
1089 gdb_assert ((opcd & ~0x3f) == 0);
1090 gdb_assert ((rst & ~0x1f) == 0);
1091 gdb_assert ((ra & ~0x1f) == 0);
1092 gdb_assert ((xo & ~0x3) == 0);
1093
1094 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1095 *buf = (opcd << 26) | insn;
1096 return 1;
1097 }
1098
1099 /* Followings are frequently used ds-form instructions. */
1100
1101 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1102 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1103 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1104 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1105
1106 /* Generate a d-form instruction in BUF.
1107
1108 0 6 11 16 32
1109 | OPCD | RST | RA | D | */
1110
1111 static int
1112 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1113 {
1114 uint32_t insn;
1115
1116 gdb_assert ((opcd & ~0x3f) == 0);
1117 gdb_assert ((rst & ~0x1f) == 0);
1118 gdb_assert ((ra & ~0x1f) == 0);
1119
1120 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1121 *buf = (opcd << 26) | insn;
1122 return 1;
1123 }
1124
1125 /* Followings are frequently used d-form instructions. */
1126
1127 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1128 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1129 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1130 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1131 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1132 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1133 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1134 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1135 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1136
1137 /* Generate a xfx-form instruction in BUF and return the number of bytes
1138 written.
1139
1140 0 6 11 21 31 32
1141 | OPCD | RST | RI | XO |/| */
1142
1143 static int
1144 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1145 {
1146 uint32_t insn;
1147 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1148
1149 gdb_assert ((opcd & ~0x3f) == 0);
1150 gdb_assert ((rst & ~0x1f) == 0);
1151 gdb_assert ((xo & ~0x3ff) == 0);
1152
1153 insn = (rst << 21) | (n << 11) | (xo << 1);
1154 *buf = (opcd << 26) | insn;
1155 return 1;
1156 }
1157
1158 /* Followings are frequently used xfx-form instructions. */
1159
1160 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1161 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1162 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1163 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1164 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1165 E & 0xf, 598)
1166 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1167
1168
1169 /* Generate a x-form instruction in BUF and return the number of bytes written.
1170
1171 0 6 11 16 21 31 32
1172 | OPCD | RST | RA | RB | XO |RC| */
1173
1174 static int
1175 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1176 {
1177 uint32_t insn;
1178
1179 gdb_assert ((opcd & ~0x3f) == 0);
1180 gdb_assert ((rst & ~0x1f) == 0);
1181 gdb_assert ((ra & ~0x1f) == 0);
1182 gdb_assert ((rb & ~0x1f) == 0);
1183 gdb_assert ((xo & ~0x3ff) == 0);
1184 gdb_assert ((rc & ~1) == 0);
1185
1186 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1187 *buf = (opcd << 26) | insn;
1188 return 1;
1189 }
1190
1191 /* Followings are frequently used x-form instructions. */
1192
1193 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1194 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1195 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1196 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1197 /* Assume bf = cr7. */
1198 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1199
1200
1201 /* Generate a md-form instruction in BUF and return the number of bytes written.
1202
1203 0 6 11 16 21 27 30 31 32
1204 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1205
1206 static int
1207 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1208 int xo, int rc)
1209 {
1210 uint32_t insn;
1211 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1212 unsigned int sh0_4 = sh & 0x1f;
1213 unsigned int sh5 = (sh >> 5) & 1;
1214
1215 gdb_assert ((opcd & ~0x3f) == 0);
1216 gdb_assert ((rs & ~0x1f) == 0);
1217 gdb_assert ((ra & ~0x1f) == 0);
1218 gdb_assert ((sh & ~0x3f) == 0);
1219 gdb_assert ((mb & ~0x3f) == 0);
1220 gdb_assert ((xo & ~0x7) == 0);
1221 gdb_assert ((rc & ~0x1) == 0);
1222
1223 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1224 | (sh5 << 1) | (xo << 2) | (rc & 1);
1225 *buf = (opcd << 26) | insn;
1226 return 1;
1227 }
1228
1229 /* The following are frequently used md-form instructions. */
1230
1231 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1232 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1233 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1234 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1235
1236 /* Generate a i-form instruction in BUF and return the number of bytes written.
1237
1238 0 6 30 31 32
1239 | OPCD | LI |AA|LK| */
1240
1241 static int
1242 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1243 {
1244 uint32_t insn;
1245
1246 gdb_assert ((opcd & ~0x3f) == 0);
1247
1248 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1249 *buf = (opcd << 26) | insn;
1250 return 1;
1251 }
1252
1253 /* The following are frequently used i-form instructions. */
1254
1255 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1256 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1257
1258 /* Generate a b-form instruction in BUF and return the number of bytes written.
1259
1260 0 6 11 16 30 31 32
1261 | OPCD | BO | BI | BD |AA|LK| */
1262
1263 static int
1264 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1265 int aa, int lk)
1266 {
1267 uint32_t insn;
1268
1269 gdb_assert ((opcd & ~0x3f) == 0);
1270 gdb_assert ((bo & ~0x1f) == 0);
1271 gdb_assert ((bi & ~0x1f) == 0);
1272
1273 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1274 *buf = (opcd << 26) | insn;
1275 return 1;
1276 }
1277
1278 /* The following are frequently used b-form instructions. */
1279 /* Assume bi = cr7. */
1280 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1281
1282 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1283 respectively. They are primary used for save/restore GPRs in jump-pad,
1284 not used for bytecode compiling. */
1285
1286 #ifdef __powerpc64__
1287 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1288 GEN_LD (buf, rt, ra, si) : \
1289 GEN_LWZ (buf, rt, ra, si))
1290 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1291 GEN_STD (buf, rt, ra, si) : \
1292 GEN_STW (buf, rt, ra, si))
1293 #else
1294 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1295 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1296 #endif
1297
1298 /* Generate a sequence of instructions to load IMM in the register REG.
1299 Write the instructions in BUF and return the number of bytes written. */
1300
1301 static int
1302 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1303 {
1304 uint32_t *p = buf;
1305
1306 if ((imm + 32768) < 65536)
1307 {
1308 /* li reg, imm[15:0] */
1309 p += GEN_LI (p, reg, imm);
1310 }
1311 else if ((imm >> 32) == 0)
1312 {
1313 /* lis reg, imm[31:16]
1314 ori reg, reg, imm[15:0]
1315 rldicl reg, reg, 0, 32 */
1316 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1317 if ((imm & 0xffff) != 0)
1318 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1319 /* Clear upper 32-bit if sign-bit is set. */
1320 if (imm & (1u << 31) && is_64)
1321 p += GEN_RLDICL (p, reg, reg, 0, 32);
1322 }
1323 else
1324 {
1325 gdb_assert (is_64);
1326 /* lis reg, <imm[63:48]>
1327 ori reg, reg, <imm[48:32]>
1328 rldicr reg, reg, 32, 31
1329 oris reg, reg, <imm[31:16]>
1330 ori reg, reg, <imm[15:0]> */
1331 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1332 if (((imm >> 32) & 0xffff) != 0)
1333 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1334 p += GEN_RLDICR (p, reg, reg, 32, 31);
1335 if (((imm >> 16) & 0xffff) != 0)
1336 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1337 if ((imm & 0xffff) != 0)
1338 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1339 }
1340
1341 return p - buf;
1342 }
1343
1344 /* Generate a sequence for atomically exchange at location LOCK.
1345 This code sequence clobbers r6, r7, r8. LOCK is the location for
1346 the atomic-xchg, OLD_VALUE is expected old value stored in the
1347 location, and R_NEW is a register for the new value. */
1348
1349 static int
1350 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1351 int is_64)
1352 {
1353 const int r_lock = 6;
1354 const int r_old = 7;
1355 const int r_tmp = 8;
1356 uint32_t *p = buf;
1357
1358 /*
1359 1: lwarx TMP, 0, LOCK
1360 cmpwi TMP, OLD
1361 bne 1b
1362 stwcx. NEW, 0, LOCK
1363 bne 1b */
1364
1365 p += gen_limm (p, r_lock, lock, is_64);
1366 p += gen_limm (p, r_old, old_value, is_64);
1367
1368 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1369 p += GEN_CMPW (p, r_tmp, r_old);
1370 p += GEN_BNE (p, -8);
1371 p += GEN_STWCX (p, r_new, 0, r_lock);
1372 p += GEN_BNE (p, -16);
1373
1374 return p - buf;
1375 }
1376
1377 /* Generate a sequence of instructions for calling a function
1378 at address of FN. Return the number of bytes are written in BUF. */
1379
1380 static int
1381 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1382 {
1383 uint32_t *p = buf;
1384
1385 /* Must be called by r12 for caller to calculate TOC address. */
1386 p += gen_limm (p, 12, fn, is_64);
1387 if (is_opd)
1388 {
1389 p += GEN_LOAD (p, 11, 12, 16, is_64);
1390 p += GEN_LOAD (p, 2, 12, 8, is_64);
1391 p += GEN_LOAD (p, 12, 12, 0, is_64);
1392 }
1393 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1394 *p++ = 0x4e800421; /* bctrl */
1395
1396 return p - buf;
1397 }
1398
1399 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1400 of instruction. This function is used to adjust pc-relative instructions
1401 when copying. */
1402
1403 static void
1404 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1405 {
1406 uint32_t insn, op6;
1407 long rel, newrel;
1408
1409 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1410 op6 = PPC_OP6 (insn);
1411
1412 if (op6 == 18 && (insn & 2) == 0)
1413 {
1414 /* branch && AA = 0 */
1415 rel = PPC_LI (insn);
1416 newrel = (oldloc - *to) + rel;
1417
1418 /* Out of range. Cannot relocate instruction. */
1419 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1420 return;
1421
1422 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1423 }
1424 else if (op6 == 16 && (insn & 2) == 0)
1425 {
1426 /* conditional branch && AA = 0 */
1427
1428 /* If the new relocation is too big for even a 26-bit unconditional
1429 branch, there is nothing we can do. Just abort.
1430
1431 Otherwise, if it can be fit in 16-bit conditional branch, just
1432 copy the instruction and relocate the address.
1433
1434 If the it's big for conditional-branch (16-bit), try to invert the
1435 condition and jump with 26-bit branch. For example,
1436
1437 beq .Lgoto
1438 INSN1
1439
1440 =>
1441
1442 bne 1f (+8)
1443 b .Lgoto
1444 1:INSN1
1445
1446 After this transform, we are actually jump from *TO+4 instead of *TO,
1447 so check the relocation again because it will be 1-insn farther then
1448 before if *TO is after OLDLOC.
1449
1450
1451 For BDNZT (or so) is transformed from
1452
1453 bdnzt eq, .Lgoto
1454 INSN1
1455
1456 =>
1457
1458 bdz 1f (+12)
1459 bf eq, 1f (+8)
1460 b .Lgoto
1461 1:INSN1
1462
1463 See also "BO field encodings". */
1464
1465 rel = PPC_BD (insn);
1466 newrel = (oldloc - *to) + rel;
1467
1468 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1469 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1470 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1471 {
1472 newrel -= 4;
1473
1474 /* Out of range. Cannot relocate instruction. */
1475 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1476 return;
1477
1478 if ((PPC_BO (insn) & 0x14) == 0x4)
1479 insn ^= (1 << 24);
1480 else if ((PPC_BO (insn) & 0x14) == 0x10)
1481 insn ^= (1 << 22);
1482
1483 /* Jump over the unconditional branch. */
1484 insn = (insn & ~0xfffc) | 0x8;
1485 target_write_memory (*to, (unsigned char *) &insn, 4);
1486 *to += 4;
1487
1488 /* Build a unconditional branch and copy LK bit. */
1489 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1490 target_write_memory (*to, (unsigned char *) &insn, 4);
1491 *to += 4;
1492
1493 return;
1494 }
1495 else if ((PPC_BO (insn) & 0x14) == 0)
1496 {
1497 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1498 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1499
1500 newrel -= 8;
1501
1502 /* Out of range. Cannot relocate instruction. */
1503 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1504 return;
1505
1506 /* Copy BI field. */
1507 bf_insn |= (insn & 0x1f0000);
1508
1509 /* Invert condition. */
1510 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1511 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1512
1513 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1514 *to += 4;
1515 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1516 *to += 4;
1517
1518 /* Build a unconditional branch and copy LK bit. */
1519 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1520 target_write_memory (*to, (unsigned char *) &insn, 4);
1521 *to += 4;
1522
1523 return;
1524 }
1525 else /* (BO & 0x14) == 0x14, branch always. */
1526 {
1527 /* Out of range. Cannot relocate instruction. */
1528 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1529 return;
1530
1531 /* Build a unconditional branch and copy LK bit. */
1532 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1533 target_write_memory (*to, (unsigned char *) &insn, 4);
1534 *to += 4;
1535
1536 return;
1537 }
1538 }
1539
1540 target_write_memory (*to, (unsigned char *) &insn, 4);
1541 *to += 4;
1542 }
1543
1544 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1545 See target.h for details. */
1546
1547 static int
1548 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1549 CORE_ADDR collector,
1550 CORE_ADDR lockaddr,
1551 ULONGEST orig_size,
1552 CORE_ADDR *jump_entry,
1553 CORE_ADDR *trampoline,
1554 ULONGEST *trampoline_size,
1555 unsigned char *jjump_pad_insn,
1556 ULONGEST *jjump_pad_insn_size,
1557 CORE_ADDR *adjusted_insn_addr,
1558 CORE_ADDR *adjusted_insn_addr_end,
1559 char *err)
1560 {
1561 uint32_t buf[256];
1562 uint32_t *p = buf;
1563 int j, offset;
1564 CORE_ADDR buildaddr = *jump_entry;
1565 const CORE_ADDR entryaddr = *jump_entry;
1566 int rsz, min_frame, frame_size, tp_reg;
1567 #ifdef __powerpc64__
1568 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1569 int is_64 = register_size (regcache->tdesc, 0) == 8;
1570 int is_opd = is_64 && !is_elfv2_inferior ();
1571 #else
1572 int is_64 = 0, is_opd = 0;
1573 #endif
1574
1575 #ifdef __powerpc64__
1576 if (is_64)
1577 {
1578 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1579 rsz = 8;
1580 min_frame = 112;
1581 frame_size = (40 * rsz) + min_frame;
1582 tp_reg = 13;
1583 }
1584 else
1585 {
1586 #endif
1587 rsz = 4;
1588 min_frame = 16;
1589 frame_size = (40 * rsz) + min_frame;
1590 tp_reg = 2;
1591 #ifdef __powerpc64__
1592 }
1593 #endif
1594
1595 /* Stack frame layout for this jump pad,
1596
1597 High thread_area (r13/r2) |
1598 tpoint - collecting_t obj
1599 PC/<tpaddr> | +36
1600 CTR | +35
1601 LR | +34
1602 XER | +33
1603 CR | +32
1604 R31 |
1605 R29 |
1606 ... |
1607 R1 | +1
1608 R0 - collected registers
1609 ... |
1610 ... |
1611 Low Back-chain -
1612
1613
1614 The code flow of this jump pad,
1615
1616 1. Adjust SP
1617 2. Save GPR and SPR
1618 3. Prepare argument
1619 4. Call gdb_collector
1620 5. Restore GPR and SPR
1621 6. Restore SP
1622 7. Build a jump for back to the program
1623 8. Copy/relocate original instruction
1624 9. Build a jump for replacing original instruction. */
1625
1626 /* Adjust stack pointer. */
1627 if (is_64)
1628 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1629 else
1630 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1631
1632 /* Store GPRs. Save R1 later, because it had just been modified, but
1633 we want the original value. */
1634 for (j = 2; j < 32; j++)
1635 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1636 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1637 /* Set r0 to the original value of r1 before adjusting stack frame,
1638 and then save it. */
1639 p += GEN_ADDI (p, 0, 1, frame_size);
1640 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1641
1642 /* Save CR, XER, LR, and CTR. */
1643 p += GEN_MFCR (p, 3); /* mfcr r3 */
1644 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1645 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1646 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1647 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1648 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1649 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1650 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1651
1652 /* Save PC<tpaddr> */
1653 p += gen_limm (p, 3, tpaddr, is_64);
1654 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1655
1656
1657 /* Setup arguments to collector. */
1658 /* Set r4 to collected registers. */
1659 p += GEN_ADDI (p, 4, 1, min_frame);
1660 /* Set r3 to TPOINT. */
1661 p += gen_limm (p, 3, tpoint, is_64);
1662
1663 /* Prepare collecting_t object for lock. */
1664 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1665 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1666 /* Set R5 to collecting object. */
1667 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1668
1669 p += GEN_LWSYNC (p);
1670 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1671 p += GEN_LWSYNC (p);
1672
1673 /* Call to collector. */
1674 p += gen_call (p, collector, is_64, is_opd);
1675
1676 /* Simply write 0 to release the lock. */
1677 p += gen_limm (p, 3, lockaddr, is_64);
1678 p += gen_limm (p, 4, 0, is_64);
1679 p += GEN_LWSYNC (p);
1680 p += GEN_STORE (p, 4, 3, 0, is_64);
1681
1682 /* Restore stack and registers. */
1683 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1684 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1685 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1686 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1687 p += GEN_MTCR (p, 3); /* mtcr r3 */
1688 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1689 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1690 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1691
1692 /* Restore GPRs. */
1693 for (j = 2; j < 32; j++)
1694 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1695 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1696 /* Restore SP. */
1697 p += GEN_ADDI (p, 1, 1, frame_size);
1698
1699 /* Flush instructions to inferior memory. */
1700 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1701
1702 /* Now, insert the original instruction to execute in the jump pad. */
1703 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1704 *adjusted_insn_addr_end = *adjusted_insn_addr;
1705 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1706
1707 /* Verify the relocation size. If should be 4 for normal copy,
1708 8 or 12 for some conditional branch. */
1709 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1710 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1711 {
1712 sprintf (err, "E.Unexpected instruction length = %d"
1713 "when relocate instruction.",
1714 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1715 return 1;
1716 }
1717
1718 buildaddr = *adjusted_insn_addr_end;
1719 p = buf;
1720 /* Finally, write a jump back to the program. */
1721 offset = (tpaddr + 4) - buildaddr;
1722 if (offset >= (1 << 25) || offset < -(1 << 25))
1723 {
1724 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1725 "(offset 0x%x > 26-bit).", offset);
1726 return 1;
1727 }
1728 /* b <tpaddr+4> */
1729 p += GEN_B (p, offset);
1730 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1731 *jump_entry = buildaddr + (p - buf) * 4;
1732
1733 /* The jump pad is now built. Wire in a jump to our jump pad. This
1734 is always done last (by our caller actually), so that we can
1735 install fast tracepoints with threads running. This relies on
1736 the agent's atomic write support. */
1737 offset = entryaddr - tpaddr;
1738 if (offset >= (1 << 25) || offset < -(1 << 25))
1739 {
1740 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1741 "(offset 0x%x > 26-bit).", offset);
1742 return 1;
1743 }
1744 /* b <jentry> */
1745 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1746 *jjump_pad_insn_size = 4;
1747
1748 return 0;
1749 }
1750
1751 /* Returns the minimum instruction length for installing a tracepoint. */
1752
1753 static int
1754 ppc_get_min_fast_tracepoint_insn_len (void)
1755 {
1756 return 4;
1757 }
1758
1759 /* Emits a given buffer into the target at current_insn_ptr. Length
1760 is in units of 32-bit words. */
1761
1762 static void
1763 emit_insns (uint32_t *buf, int n)
1764 {
1765 n = n * sizeof (uint32_t);
1766 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1767 current_insn_ptr += n;
1768 }
1769
1770 #define __EMIT_ASM(NAME, INSNS) \
1771 do \
1772 { \
1773 extern uint32_t start_bcax_ ## NAME []; \
1774 extern uint32_t end_bcax_ ## NAME []; \
1775 emit_insns (start_bcax_ ## NAME, \
1776 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1777 __asm__ (".section .text.__ppcbcax\n\t" \
1778 "start_bcax_" #NAME ":\n\t" \
1779 INSNS "\n\t" \
1780 "end_bcax_" #NAME ":\n\t" \
1781 ".previous\n\t"); \
1782 } while (0)
1783
1784 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1785 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1786
1787 /*
1788
1789 Bytecode execution stack frame - 32-bit
1790
1791 | LR save area (SP + 4)
1792 SP' -> +- Back chain (SP + 0)
1793 | Save r31 for access saved arguments
1794 | Save r30 for bytecode stack pointer
1795 | Save r4 for incoming argument *value
1796 | Save r3 for incoming argument regs
1797 r30 -> +- Bytecode execution stack
1798 |
1799 | 64-byte (8 doublewords) at initial.
1800 | Expand stack as needed.
1801 |
1802 +-
1803 | Some padding for minimum stack frame and 16-byte alignment.
1804 | 16 bytes.
1805 SP +- Back-chain (SP')
1806
1807 initial frame size
1808 = 16 + (4 * 4) + 64
1809 = 96
1810
1811 r30 is the stack-pointer for bytecode machine.
1812 It should point to next-empty, so we can use LDU for pop.
1813 r3 is used for cache of the high part of TOP value.
1814 It was the first argument, pointer to regs.
1815 r4 is used for cache of the low part of TOP value.
1816 It was the second argument, pointer to the result.
1817 We should set *result = TOP after leaving this function.
1818
1819 Note:
1820 * To restore stack at epilogue
1821 => sp = r31
1822 * To check stack is big enough for bytecode execution.
1823 => r30 - 8 > SP + 8
1824 * To return execution result.
1825 => 0(r4) = TOP
1826
1827 */
1828
1829 /* Regardless of endian, register 3 is always high part, 4 is low part.
1830 These defines are used when the register pair is stored/loaded.
1831 Likewise, to simplify code, have a similiar define for 5:6. */
1832
1833 #if __BYTE_ORDER == __LITTLE_ENDIAN
1834 #define TOP_FIRST "4"
1835 #define TOP_SECOND "3"
1836 #define TMP_FIRST "6"
1837 #define TMP_SECOND "5"
1838 #else
1839 #define TOP_FIRST "3"
1840 #define TOP_SECOND "4"
1841 #define TMP_FIRST "5"
1842 #define TMP_SECOND "6"
1843 #endif
1844
1845 /* Emit prologue in inferior memory. See above comments. */
1846
1847 static void
1848 ppc_emit_prologue (void)
1849 {
1850 EMIT_ASM (/* Save return address. */
1851 "mflr 0 \n"
1852 "stw 0, 4(1) \n"
1853 /* Adjust SP. 96 is the initial frame size. */
1854 "stwu 1, -96(1) \n"
1855 /* Save r30 and incoming arguments. */
1856 "stw 31, 96-4(1) \n"
1857 "stw 30, 96-8(1) \n"
1858 "stw 4, 96-12(1) \n"
1859 "stw 3, 96-16(1) \n"
1860 /* Point r31 to original r1 for access arguments. */
1861 "addi 31, 1, 96 \n"
1862 /* Set r30 to pointing stack-top. */
1863 "addi 30, 1, 64 \n"
1864 /* Initial r3/TOP to 0. */
1865 "li 3, 0 \n"
1866 "li 4, 0 \n");
1867 }
1868
1869 /* Emit epilogue in inferior memory. See above comments. */
1870
1871 static void
1872 ppc_emit_epilogue (void)
1873 {
1874 EMIT_ASM (/* *result = TOP */
1875 "lwz 5, -12(31) \n"
1876 "stw " TOP_FIRST ", 0(5) \n"
1877 "stw " TOP_SECOND ", 4(5) \n"
1878 /* Restore registers. */
1879 "lwz 31, -4(31) \n"
1880 "lwz 30, -8(31) \n"
1881 /* Restore SP. */
1882 "lwz 1, 0(1) \n"
1883 /* Restore LR. */
1884 "lwz 0, 4(1) \n"
1885 /* Return 0 for no-error. */
1886 "li 3, 0 \n"
1887 "mtlr 0 \n"
1888 "blr \n");
1889 }
1890
1891 /* TOP = stack[--sp] + TOP */
1892
1893 static void
1894 ppc_emit_add (void)
1895 {
1896 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1897 "lwz " TMP_SECOND ", 4(30)\n"
1898 "addc 4, 6, 4 \n"
1899 "adde 3, 5, 3 \n");
1900 }
1901
1902 /* TOP = stack[--sp] - TOP */
1903
1904 static void
1905 ppc_emit_sub (void)
1906 {
1907 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1908 "lwz " TMP_SECOND ", 4(30) \n"
1909 "subfc 4, 4, 6 \n"
1910 "subfe 3, 3, 5 \n");
1911 }
1912
1913 /* TOP = stack[--sp] * TOP */
1914
1915 static void
1916 ppc_emit_mul (void)
1917 {
1918 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1919 "lwz " TMP_SECOND ", 4(30) \n"
1920 "mulhwu 7, 6, 4 \n"
1921 "mullw 3, 6, 3 \n"
1922 "mullw 5, 4, 5 \n"
1923 "mullw 4, 6, 4 \n"
1924 "add 3, 5, 3 \n"
1925 "add 3, 7, 3 \n");
1926 }
1927
1928 /* TOP = stack[--sp] << TOP */
1929
1930 static void
1931 ppc_emit_lsh (void)
1932 {
1933 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1934 "lwz " TMP_SECOND ", 4(30) \n"
1935 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1936 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1937 "slw 5, 5, 4\n" /* Shift high part left */
1938 "slw 4, 6, 4\n" /* Shift low part left */
1939 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1940 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1941 "or 3, 5, 3\n"
1942 "or 3, 7, 3\n"); /* Assemble high part */
1943 }
1944
1945 /* Top = stack[--sp] >> TOP
1946 (Arithmetic shift right) */
1947
1948 static void
1949 ppc_emit_rsh_signed (void)
1950 {
1951 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1952 "lwz " TMP_SECOND ", 4(30) \n"
1953 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1954 "sraw 3, 5, 4\n" /* Shift high part right */
1955 "cmpwi 7, 1\n"
1956 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1957 "sraw 4, 5, 7\n" /* Shift high to low */
1958 "b 2f\n"
1959 "1:\n"
1960 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1961 "srw 4, 6, 4\n" /* Shift low part right */
1962 "slw 5, 5, 7\n" /* Shift high to low */
1963 "or 4, 4, 5\n" /* Assemble low part */
1964 "2:\n");
1965 }
1966
1967 /* Top = stack[--sp] >> TOP
1968 (Logical shift right) */
1969
1970 static void
1971 ppc_emit_rsh_unsigned (void)
1972 {
1973 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1974 "lwz " TMP_SECOND ", 4(30) \n"
1975 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1976 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1977 "srw 6, 6, 4\n" /* Shift low part right */
1978 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1979 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1980 "or 6, 6, 3\n"
1981 "srw 3, 5, 4\n" /* Shift high part right */
1982 "or 4, 6, 7\n"); /* Assemble low part */
1983 }
1984
1985 /* Emit code for signed-extension specified by ARG. */
1986
1987 static void
1988 ppc_emit_ext (int arg)
1989 {
1990 switch (arg)
1991 {
1992 case 8:
1993 EMIT_ASM ("extsb 4, 4\n"
1994 "srawi 3, 4, 31");
1995 break;
1996 case 16:
1997 EMIT_ASM ("extsh 4, 4\n"
1998 "srawi 3, 4, 31");
1999 break;
2000 case 32:
2001 EMIT_ASM ("srawi 3, 4, 31");
2002 break;
2003 default:
2004 emit_error = 1;
2005 }
2006 }
2007
2008 /* Emit code for zero-extension specified by ARG. */
2009
2010 static void
2011 ppc_emit_zero_ext (int arg)
2012 {
2013 switch (arg)
2014 {
2015 case 8:
2016 EMIT_ASM ("clrlwi 4,4,24\n"
2017 "li 3, 0\n");
2018 break;
2019 case 16:
2020 EMIT_ASM ("clrlwi 4,4,16\n"
2021 "li 3, 0\n");
2022 break;
2023 case 32:
2024 EMIT_ASM ("li 3, 0");
2025 break;
2026 default:
2027 emit_error = 1;
2028 }
2029 }
2030
2031 /* TOP = !TOP
2032 i.e., TOP = (TOP == 0) ? 1 : 0; */
2033
2034 static void
2035 ppc_emit_log_not (void)
2036 {
2037 EMIT_ASM ("or 4, 3, 4 \n"
2038 "cntlzw 4, 4 \n"
2039 "srwi 4, 4, 5 \n"
2040 "li 3, 0 \n");
2041 }
2042
2043 /* TOP = stack[--sp] & TOP */
2044
2045 static void
2046 ppc_emit_bit_and (void)
2047 {
2048 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2049 "lwz " TMP_SECOND ", 4(30) \n"
2050 "and 4, 6, 4 \n"
2051 "and 3, 5, 3 \n");
2052 }
2053
2054 /* TOP = stack[--sp] | TOP */
2055
2056 static void
2057 ppc_emit_bit_or (void)
2058 {
2059 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2060 "lwz " TMP_SECOND ", 4(30) \n"
2061 "or 4, 6, 4 \n"
2062 "or 3, 5, 3 \n");
2063 }
2064
2065 /* TOP = stack[--sp] ^ TOP */
2066
2067 static void
2068 ppc_emit_bit_xor (void)
2069 {
2070 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2071 "lwz " TMP_SECOND ", 4(30) \n"
2072 "xor 4, 6, 4 \n"
2073 "xor 3, 5, 3 \n");
2074 }
2075
2076 /* TOP = ~TOP
2077 i.e., TOP = ~(TOP | TOP) */
2078
2079 static void
2080 ppc_emit_bit_not (void)
2081 {
2082 EMIT_ASM ("nor 3, 3, 3 \n"
2083 "nor 4, 4, 4 \n");
2084 }
2085
2086 /* TOP = stack[--sp] == TOP */
2087
2088 static void
2089 ppc_emit_equal (void)
2090 {
2091 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2092 "lwz " TMP_SECOND ", 4(30) \n"
2093 "xor 4, 6, 4 \n"
2094 "xor 3, 5, 3 \n"
2095 "or 4, 3, 4 \n"
2096 "cntlzw 4, 4 \n"
2097 "srwi 4, 4, 5 \n"
2098 "li 3, 0 \n");
2099 }
2100
2101 /* TOP = stack[--sp] < TOP
2102 (Signed comparison) */
2103
2104 static void
2105 ppc_emit_less_signed (void)
2106 {
2107 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2108 "lwz " TMP_SECOND ", 4(30) \n"
2109 "cmplw 6, 6, 4 \n"
2110 "cmpw 7, 5, 3 \n"
2111 /* CR6 bit 0 = low less and high equal */
2112 "crand 6*4+0, 6*4+0, 7*4+2\n"
2113 /* CR7 bit 0 = (low less and high equal) or high less */
2114 "cror 7*4+0, 7*4+0, 6*4+0\n"
2115 "mfcr 4 \n"
2116 "rlwinm 4, 4, 29, 31, 31 \n"
2117 "li 3, 0 \n");
2118 }
2119
2120 /* TOP = stack[--sp] < TOP
2121 (Unsigned comparison) */
2122
2123 static void
2124 ppc_emit_less_unsigned (void)
2125 {
2126 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2127 "lwz " TMP_SECOND ", 4(30) \n"
2128 "cmplw 6, 6, 4 \n"
2129 "cmplw 7, 5, 3 \n"
2130 /* CR6 bit 0 = low less and high equal */
2131 "crand 6*4+0, 6*4+0, 7*4+2\n"
2132 /* CR7 bit 0 = (low less and high equal) or high less */
2133 "cror 7*4+0, 7*4+0, 6*4+0\n"
2134 "mfcr 4 \n"
2135 "rlwinm 4, 4, 29, 31, 31 \n"
2136 "li 3, 0 \n");
2137 }
2138
2139 /* Access the memory address in TOP in size of SIZE.
2140 Zero-extend the read value. */
2141
2142 static void
2143 ppc_emit_ref (int size)
2144 {
2145 switch (size)
2146 {
2147 case 1:
2148 EMIT_ASM ("lbz 4, 0(4)\n"
2149 "li 3, 0");
2150 break;
2151 case 2:
2152 EMIT_ASM ("lhz 4, 0(4)\n"
2153 "li 3, 0");
2154 break;
2155 case 4:
2156 EMIT_ASM ("lwz 4, 0(4)\n"
2157 "li 3, 0");
2158 break;
2159 case 8:
2160 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2161 EMIT_ASM ("lwz 3, 4(4)\n"
2162 "lwz 4, 0(4)");
2163 else
2164 EMIT_ASM ("lwz 3, 0(4)\n"
2165 "lwz 4, 4(4)");
2166 break;
2167 }
2168 }
2169
2170 /* TOP = NUM */
2171
2172 static void
2173 ppc_emit_const (LONGEST num)
2174 {
2175 uint32_t buf[10];
2176 uint32_t *p = buf;
2177
2178 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2179 p += gen_limm (p, 4, num & 0xffffffff, 0);
2180
2181 emit_insns (buf, p - buf);
2182 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2183 }
2184
2185 /* Set TOP to the value of register REG by calling get_raw_reg function
2186 with two argument, collected buffer and register number. */
2187
2188 static void
2189 ppc_emit_reg (int reg)
2190 {
2191 uint32_t buf[13];
2192 uint32_t *p = buf;
2193
2194 /* fctx->regs is passed in r3 and then saved in -16(31). */
2195 p += GEN_LWZ (p, 3, 31, -16);
2196 p += GEN_LI (p, 4, reg); /* li r4, reg */
2197 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2198
2199 emit_insns (buf, p - buf);
2200 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2201
2202 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2203 {
2204 EMIT_ASM ("mr 5, 4\n"
2205 "mr 4, 3\n"
2206 "mr 3, 5\n");
2207 }
2208 }
2209
2210 /* TOP = stack[--sp] */
2211
2212 static void
2213 ppc_emit_pop (void)
2214 {
2215 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2216 "lwz " TOP_SECOND ", 4(30) \n");
2217 }
2218
2219 /* stack[sp++] = TOP
2220
2221 Because we may use up bytecode stack, expand 8 doublewords more
2222 if needed. */
2223
2224 static void
2225 ppc_emit_stack_flush (void)
2226 {
2227 /* Make sure bytecode stack is big enough before push.
2228 Otherwise, expand 64-byte more. */
2229
2230 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2231 " stw " TOP_SECOND ", 4(30)\n"
2232 " addi 5, 30, -(8 + 8) \n"
2233 " cmpw 7, 5, 1 \n"
2234 " bgt 7, 1f \n"
2235 " stwu 31, -64(1) \n"
2236 "1:addi 30, 30, -8 \n");
2237 }
2238
2239 /* Swap TOP and stack[sp-1] */
2240
2241 static void
2242 ppc_emit_swap (void)
2243 {
2244 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2245 "lwz " TMP_SECOND ", 12(30) \n"
2246 "stw " TOP_FIRST ", 8(30) \n"
2247 "stw " TOP_SECOND ", 12(30) \n"
2248 "mr 3, 5 \n"
2249 "mr 4, 6 \n");
2250 }
2251
2252 /* Discard N elements in the stack. Also used for ppc64. */
2253
2254 static void
2255 ppc_emit_stack_adjust (int n)
2256 {
2257 uint32_t buf[6];
2258 uint32_t *p = buf;
2259
2260 n = n << 3;
2261 if ((n >> 15) != 0)
2262 {
2263 emit_error = 1;
2264 return;
2265 }
2266
2267 p += GEN_ADDI (p, 30, 30, n);
2268
2269 emit_insns (buf, p - buf);
2270 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2271 }
2272
2273 /* Call function FN. */
2274
2275 static void
2276 ppc_emit_call (CORE_ADDR fn)
2277 {
2278 uint32_t buf[11];
2279 uint32_t *p = buf;
2280
2281 p += gen_call (p, fn, 0, 0);
2282
2283 emit_insns (buf, p - buf);
2284 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2285 }
2286
2287 /* FN's prototype is `LONGEST(*fn)(int)'.
2288 TOP = fn (arg1)
2289 */
2290
2291 static void
2292 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2293 {
2294 uint32_t buf[15];
2295 uint32_t *p = buf;
2296
2297 /* Setup argument. arg1 is a 16-bit value. */
2298 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2299 p += gen_call (p, fn, 0, 0);
2300
2301 emit_insns (buf, p - buf);
2302 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2303
2304 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2305 {
2306 EMIT_ASM ("mr 5, 4\n"
2307 "mr 4, 3\n"
2308 "mr 3, 5\n");
2309 }
2310 }
2311
2312 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2313 fn (arg1, TOP)
2314
2315 TOP should be preserved/restored before/after the call. */
2316
2317 static void
2318 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2319 {
2320 uint32_t buf[21];
2321 uint32_t *p = buf;
2322
2323 /* Save TOP. 0(30) is next-empty. */
2324 p += GEN_STW (p, 3, 30, 0);
2325 p += GEN_STW (p, 4, 30, 4);
2326
2327 /* Setup argument. arg1 is a 16-bit value. */
2328 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2329 {
2330 p += GEN_MR (p, 5, 4);
2331 p += GEN_MR (p, 6, 3);
2332 }
2333 else
2334 {
2335 p += GEN_MR (p, 5, 3);
2336 p += GEN_MR (p, 6, 4);
2337 }
2338 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2339 p += gen_call (p, fn, 0, 0);
2340
2341 /* Restore TOP */
2342 p += GEN_LWZ (p, 3, 30, 0);
2343 p += GEN_LWZ (p, 4, 30, 4);
2344
2345 emit_insns (buf, p - buf);
2346 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2347 }
2348
2349 /* Note in the following goto ops:
2350
2351 When emitting goto, the target address is later relocated by
2352 write_goto_address. OFFSET_P is the offset of the branch instruction
2353 in the code sequence, and SIZE_P is how to relocate the instruction,
2354 recognized by ppc_write_goto_address. In current implementation,
2355 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2356 */
2357
2358 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2359
2360 static void
2361 ppc_emit_if_goto (int *offset_p, int *size_p)
2362 {
2363 EMIT_ASM ("or. 3, 3, 4 \n"
2364 "lwzu " TOP_FIRST ", 8(30) \n"
2365 "lwz " TOP_SECOND ", 4(30) \n"
2366 "1:bne 0, 1b \n");
2367
2368 if (offset_p)
2369 *offset_p = 12;
2370 if (size_p)
2371 *size_p = 14;
2372 }
2373
2374 /* Unconditional goto. Also used for ppc64. */
2375
2376 static void
2377 ppc_emit_goto (int *offset_p, int *size_p)
2378 {
2379 EMIT_ASM ("1:b 1b");
2380
2381 if (offset_p)
2382 *offset_p = 0;
2383 if (size_p)
2384 *size_p = 24;
2385 }
2386
2387 /* Goto if stack[--sp] == TOP */
2388
2389 static void
2390 ppc_emit_eq_goto (int *offset_p, int *size_p)
2391 {
2392 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2393 "lwz " TMP_SECOND ", 4(30) \n"
2394 "xor 4, 6, 4 \n"
2395 "xor 3, 5, 3 \n"
2396 "or. 3, 3, 4 \n"
2397 "lwzu " TOP_FIRST ", 8(30) \n"
2398 "lwz " TOP_SECOND ", 4(30) \n"
2399 "1:beq 0, 1b \n");
2400
2401 if (offset_p)
2402 *offset_p = 28;
2403 if (size_p)
2404 *size_p = 14;
2405 }
2406
2407 /* Goto if stack[--sp] != TOP */
2408
2409 static void
2410 ppc_emit_ne_goto (int *offset_p, int *size_p)
2411 {
2412 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2413 "lwz " TMP_SECOND ", 4(30) \n"
2414 "xor 4, 6, 4 \n"
2415 "xor 3, 5, 3 \n"
2416 "or. 3, 3, 4 \n"
2417 "lwzu " TOP_FIRST ", 8(30) \n"
2418 "lwz " TOP_SECOND ", 4(30) \n"
2419 "1:bne 0, 1b \n");
2420
2421 if (offset_p)
2422 *offset_p = 28;
2423 if (size_p)
2424 *size_p = 14;
2425 }
2426
2427 /* Goto if stack[--sp] < TOP */
2428
2429 static void
2430 ppc_emit_lt_goto (int *offset_p, int *size_p)
2431 {
2432 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2433 "lwz " TMP_SECOND ", 4(30) \n"
2434 "cmplw 6, 6, 4 \n"
2435 "cmpw 7, 5, 3 \n"
2436 /* CR6 bit 0 = low less and high equal */
2437 "crand 6*4+0, 6*4+0, 7*4+2\n"
2438 /* CR7 bit 0 = (low less and high equal) or high less */
2439 "cror 7*4+0, 7*4+0, 6*4+0\n"
2440 "lwzu " TOP_FIRST ", 8(30) \n"
2441 "lwz " TOP_SECOND ", 4(30)\n"
2442 "1:blt 7, 1b \n");
2443
2444 if (offset_p)
2445 *offset_p = 32;
2446 if (size_p)
2447 *size_p = 14;
2448 }
2449
2450 /* Goto if stack[--sp] <= TOP */
2451
2452 static void
2453 ppc_emit_le_goto (int *offset_p, int *size_p)
2454 {
2455 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2456 "lwz " TMP_SECOND ", 4(30) \n"
2457 "cmplw 6, 6, 4 \n"
2458 "cmpw 7, 5, 3 \n"
2459 /* CR6 bit 0 = low less/equal and high equal */
2460 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2461 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2462 "cror 7*4+0, 7*4+0, 6*4+0\n"
2463 "lwzu " TOP_FIRST ", 8(30) \n"
2464 "lwz " TOP_SECOND ", 4(30)\n"
2465 "1:blt 7, 1b \n");
2466
2467 if (offset_p)
2468 *offset_p = 32;
2469 if (size_p)
2470 *size_p = 14;
2471 }
2472
2473 /* Goto if stack[--sp] > TOP */
2474
2475 static void
2476 ppc_emit_gt_goto (int *offset_p, int *size_p)
2477 {
2478 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2479 "lwz " TMP_SECOND ", 4(30) \n"
2480 "cmplw 6, 6, 4 \n"
2481 "cmpw 7, 5, 3 \n"
2482 /* CR6 bit 0 = low greater and high equal */
2483 "crand 6*4+0, 6*4+1, 7*4+2\n"
2484 /* CR7 bit 0 = (low greater and high equal) or high greater */
2485 "cror 7*4+0, 7*4+1, 6*4+0\n"
2486 "lwzu " TOP_FIRST ", 8(30) \n"
2487 "lwz " TOP_SECOND ", 4(30)\n"
2488 "1:blt 7, 1b \n");
2489
2490 if (offset_p)
2491 *offset_p = 32;
2492 if (size_p)
2493 *size_p = 14;
2494 }
2495
2496 /* Goto if stack[--sp] >= TOP */
2497
2498 static void
2499 ppc_emit_ge_goto (int *offset_p, int *size_p)
2500 {
2501 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2502 "lwz " TMP_SECOND ", 4(30) \n"
2503 "cmplw 6, 6, 4 \n"
2504 "cmpw 7, 5, 3 \n"
2505 /* CR6 bit 0 = low ge and high equal */
2506 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2507 /* CR7 bit 0 = (low ge and high equal) or high greater */
2508 "cror 7*4+0, 7*4+1, 6*4+0\n"
2509 "lwzu " TOP_FIRST ", 8(30)\n"
2510 "lwz " TOP_SECOND ", 4(30)\n"
2511 "1:blt 7, 1b \n");
2512
2513 if (offset_p)
2514 *offset_p = 32;
2515 if (size_p)
2516 *size_p = 14;
2517 }
2518
2519 /* Relocate previous emitted branch instruction. FROM is the address
2520 of the branch instruction, TO is the goto target address, and SIZE
2521 if the value we set by *SIZE_P before. Currently, it is either
2522 24 or 14 of branch and conditional-branch instruction.
2523 Also used for ppc64. */
2524
2525 static void
2526 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2527 {
2528 long rel = to - from;
2529 uint32_t insn;
2530 int opcd;
2531
2532 read_inferior_memory (from, (unsigned char *) &insn, 4);
2533 opcd = (insn >> 26) & 0x3f;
2534
2535 switch (size)
2536 {
2537 case 14:
2538 if (opcd != 16
2539 || (rel >= (1 << 15) || rel < -(1 << 15)))
2540 emit_error = 1;
2541 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2542 break;
2543 case 24:
2544 if (opcd != 18
2545 || (rel >= (1 << 25) || rel < -(1 << 25)))
2546 emit_error = 1;
2547 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2548 break;
2549 default:
2550 emit_error = 1;
2551 }
2552
2553 if (!emit_error)
2554 target_write_memory (from, (unsigned char *) &insn, 4);
2555 }
2556
2557 /* Table of emit ops for 32-bit. */
2558
2559 static struct emit_ops ppc_emit_ops_impl =
2560 {
2561 ppc_emit_prologue,
2562 ppc_emit_epilogue,
2563 ppc_emit_add,
2564 ppc_emit_sub,
2565 ppc_emit_mul,
2566 ppc_emit_lsh,
2567 ppc_emit_rsh_signed,
2568 ppc_emit_rsh_unsigned,
2569 ppc_emit_ext,
2570 ppc_emit_log_not,
2571 ppc_emit_bit_and,
2572 ppc_emit_bit_or,
2573 ppc_emit_bit_xor,
2574 ppc_emit_bit_not,
2575 ppc_emit_equal,
2576 ppc_emit_less_signed,
2577 ppc_emit_less_unsigned,
2578 ppc_emit_ref,
2579 ppc_emit_if_goto,
2580 ppc_emit_goto,
2581 ppc_write_goto_address,
2582 ppc_emit_const,
2583 ppc_emit_call,
2584 ppc_emit_reg,
2585 ppc_emit_pop,
2586 ppc_emit_stack_flush,
2587 ppc_emit_zero_ext,
2588 ppc_emit_swap,
2589 ppc_emit_stack_adjust,
2590 ppc_emit_int_call_1,
2591 ppc_emit_void_call_2,
2592 ppc_emit_eq_goto,
2593 ppc_emit_ne_goto,
2594 ppc_emit_lt_goto,
2595 ppc_emit_le_goto,
2596 ppc_emit_gt_goto,
2597 ppc_emit_ge_goto
2598 };
2599
2600 #ifdef __powerpc64__
2601
2602 /*
2603
2604 Bytecode execution stack frame - 64-bit
2605
2606 | LR save area (SP + 16)
2607 | CR save area (SP + 8)
2608 SP' -> +- Back chain (SP + 0)
2609 | Save r31 for access saved arguments
2610 | Save r30 for bytecode stack pointer
2611 | Save r4 for incoming argument *value
2612 | Save r3 for incoming argument regs
2613 r30 -> +- Bytecode execution stack
2614 |
2615 | 64-byte (8 doublewords) at initial.
2616 | Expand stack as needed.
2617 |
2618 +-
2619 | Some padding for minimum stack frame.
2620 | 112 for ELFv1.
2621 SP +- Back-chain (SP')
2622
2623 initial frame size
2624 = 112 + (4 * 8) + 64
2625 = 208
2626
2627 r30 is the stack-pointer for bytecode machine.
2628 It should point to next-empty, so we can use LDU for pop.
2629 r3 is used for cache of TOP value.
2630 It was the first argument, pointer to regs.
2631 r4 is the second argument, pointer to the result.
2632 We should set *result = TOP after leaving this function.
2633
2634 Note:
2635 * To restore stack at epilogue
2636 => sp = r31
2637 * To check stack is big enough for bytecode execution.
2638 => r30 - 8 > SP + 112
2639 * To return execution result.
2640 => 0(r4) = TOP
2641
2642 */
2643
2644 /* Emit prologue in inferior memory. See above comments. */
2645
2646 static void
2647 ppc64v1_emit_prologue (void)
2648 {
2649 /* On ELFv1, function pointers really point to function descriptor,
2650 so emit one here. We don't care about contents of words 1 and 2,
2651 so let them just overlap out code. */
2652 uint64_t opd = current_insn_ptr + 8;
2653 uint32_t buf[2];
2654
2655 /* Mind the strict aliasing rules. */
2656 memcpy (buf, &opd, sizeof buf);
2657 emit_insns(buf, 2);
2658 EMIT_ASM (/* Save return address. */
2659 "mflr 0 \n"
2660 "std 0, 16(1) \n"
2661 /* Save r30 and incoming arguments. */
2662 "std 31, -8(1) \n"
2663 "std 30, -16(1) \n"
2664 "std 4, -24(1) \n"
2665 "std 3, -32(1) \n"
2666 /* Point r31 to current r1 for access arguments. */
2667 "mr 31, 1 \n"
2668 /* Adjust SP. 208 is the initial frame size. */
2669 "stdu 1, -208(1) \n"
2670 /* Set r30 to pointing stack-top. */
2671 "addi 30, 1, 168 \n"
2672 /* Initial r3/TOP to 0. */
2673 "li 3, 0 \n");
2674 }
2675
2676 /* Emit prologue in inferior memory. See above comments. */
2677
2678 static void
2679 ppc64v2_emit_prologue (void)
2680 {
2681 EMIT_ASM (/* Save return address. */
2682 "mflr 0 \n"
2683 "std 0, 16(1) \n"
2684 /* Save r30 and incoming arguments. */
2685 "std 31, -8(1) \n"
2686 "std 30, -16(1) \n"
2687 "std 4, -24(1) \n"
2688 "std 3, -32(1) \n"
2689 /* Point r31 to current r1 for access arguments. */
2690 "mr 31, 1 \n"
2691 /* Adjust SP. 208 is the initial frame size. */
2692 "stdu 1, -208(1) \n"
2693 /* Set r30 to pointing stack-top. */
2694 "addi 30, 1, 168 \n"
2695 /* Initial r3/TOP to 0. */
2696 "li 3, 0 \n");
2697 }
2698
2699 /* Emit epilogue in inferior memory. See above comments. */
2700
2701 static void
2702 ppc64_emit_epilogue (void)
2703 {
2704 EMIT_ASM (/* Restore SP. */
2705 "ld 1, 0(1) \n"
2706 /* *result = TOP */
2707 "ld 4, -24(1) \n"
2708 "std 3, 0(4) \n"
2709 /* Restore registers. */
2710 "ld 31, -8(1) \n"
2711 "ld 30, -16(1) \n"
2712 /* Restore LR. */
2713 "ld 0, 16(1) \n"
2714 /* Return 0 for no-error. */
2715 "li 3, 0 \n"
2716 "mtlr 0 \n"
2717 "blr \n");
2718 }
2719
2720 /* TOP = stack[--sp] + TOP */
2721
2722 static void
2723 ppc64_emit_add (void)
2724 {
2725 EMIT_ASM ("ldu 4, 8(30) \n"
2726 "add 3, 4, 3 \n");
2727 }
2728
2729 /* TOP = stack[--sp] - TOP */
2730
2731 static void
2732 ppc64_emit_sub (void)
2733 {
2734 EMIT_ASM ("ldu 4, 8(30) \n"
2735 "sub 3, 4, 3 \n");
2736 }
2737
2738 /* TOP = stack[--sp] * TOP */
2739
2740 static void
2741 ppc64_emit_mul (void)
2742 {
2743 EMIT_ASM ("ldu 4, 8(30) \n"
2744 "mulld 3, 4, 3 \n");
2745 }
2746
2747 /* TOP = stack[--sp] << TOP */
2748
2749 static void
2750 ppc64_emit_lsh (void)
2751 {
2752 EMIT_ASM ("ldu 4, 8(30) \n"
2753 "sld 3, 4, 3 \n");
2754 }
2755
2756 /* Top = stack[--sp] >> TOP
2757 (Arithmetic shift right) */
2758
2759 static void
2760 ppc64_emit_rsh_signed (void)
2761 {
2762 EMIT_ASM ("ldu 4, 8(30) \n"
2763 "srad 3, 4, 3 \n");
2764 }
2765
2766 /* Top = stack[--sp] >> TOP
2767 (Logical shift right) */
2768
2769 static void
2770 ppc64_emit_rsh_unsigned (void)
2771 {
2772 EMIT_ASM ("ldu 4, 8(30) \n"
2773 "srd 3, 4, 3 \n");
2774 }
2775
2776 /* Emit code for signed-extension specified by ARG. */
2777
2778 static void
2779 ppc64_emit_ext (int arg)
2780 {
2781 switch (arg)
2782 {
2783 case 8:
2784 EMIT_ASM ("extsb 3, 3");
2785 break;
2786 case 16:
2787 EMIT_ASM ("extsh 3, 3");
2788 break;
2789 case 32:
2790 EMIT_ASM ("extsw 3, 3");
2791 break;
2792 default:
2793 emit_error = 1;
2794 }
2795 }
2796
2797 /* Emit code for zero-extension specified by ARG. */
2798
2799 static void
2800 ppc64_emit_zero_ext (int arg)
2801 {
2802 switch (arg)
2803 {
2804 case 8:
2805 EMIT_ASM ("rldicl 3,3,0,56");
2806 break;
2807 case 16:
2808 EMIT_ASM ("rldicl 3,3,0,48");
2809 break;
2810 case 32:
2811 EMIT_ASM ("rldicl 3,3,0,32");
2812 break;
2813 default:
2814 emit_error = 1;
2815 }
2816 }
2817
2818 /* TOP = !TOP
2819 i.e., TOP = (TOP == 0) ? 1 : 0; */
2820
2821 static void
2822 ppc64_emit_log_not (void)
2823 {
2824 EMIT_ASM ("cntlzd 3, 3 \n"
2825 "srdi 3, 3, 6 \n");
2826 }
2827
2828 /* TOP = stack[--sp] & TOP */
2829
2830 static void
2831 ppc64_emit_bit_and (void)
2832 {
2833 EMIT_ASM ("ldu 4, 8(30) \n"
2834 "and 3, 4, 3 \n");
2835 }
2836
2837 /* TOP = stack[--sp] | TOP */
2838
2839 static void
2840 ppc64_emit_bit_or (void)
2841 {
2842 EMIT_ASM ("ldu 4, 8(30) \n"
2843 "or 3, 4, 3 \n");
2844 }
2845
2846 /* TOP = stack[--sp] ^ TOP */
2847
2848 static void
2849 ppc64_emit_bit_xor (void)
2850 {
2851 EMIT_ASM ("ldu 4, 8(30) \n"
2852 "xor 3, 4, 3 \n");
2853 }
2854
2855 /* TOP = ~TOP
2856 i.e., TOP = ~(TOP | TOP) */
2857
2858 static void
2859 ppc64_emit_bit_not (void)
2860 {
2861 EMIT_ASM ("nor 3, 3, 3 \n");
2862 }
2863
2864 /* TOP = stack[--sp] == TOP */
2865
2866 static void
2867 ppc64_emit_equal (void)
2868 {
2869 EMIT_ASM ("ldu 4, 8(30) \n"
2870 "xor 3, 3, 4 \n"
2871 "cntlzd 3, 3 \n"
2872 "srdi 3, 3, 6 \n");
2873 }
2874
2875 /* TOP = stack[--sp] < TOP
2876 (Signed comparison) */
2877
2878 static void
2879 ppc64_emit_less_signed (void)
2880 {
2881 EMIT_ASM ("ldu 4, 8(30) \n"
2882 "cmpd 7, 4, 3 \n"
2883 "mfcr 3 \n"
2884 "rlwinm 3, 3, 29, 31, 31 \n");
2885 }
2886
2887 /* TOP = stack[--sp] < TOP
2888 (Unsigned comparison) */
2889
2890 static void
2891 ppc64_emit_less_unsigned (void)
2892 {
2893 EMIT_ASM ("ldu 4, 8(30) \n"
2894 "cmpld 7, 4, 3 \n"
2895 "mfcr 3 \n"
2896 "rlwinm 3, 3, 29, 31, 31 \n");
2897 }
2898
2899 /* Access the memory address in TOP in size of SIZE.
2900 Zero-extend the read value. */
2901
2902 static void
2903 ppc64_emit_ref (int size)
2904 {
2905 switch (size)
2906 {
2907 case 1:
2908 EMIT_ASM ("lbz 3, 0(3)");
2909 break;
2910 case 2:
2911 EMIT_ASM ("lhz 3, 0(3)");
2912 break;
2913 case 4:
2914 EMIT_ASM ("lwz 3, 0(3)");
2915 break;
2916 case 8:
2917 EMIT_ASM ("ld 3, 0(3)");
2918 break;
2919 }
2920 }
2921
2922 /* TOP = NUM */
2923
2924 static void
2925 ppc64_emit_const (LONGEST num)
2926 {
2927 uint32_t buf[5];
2928 uint32_t *p = buf;
2929
2930 p += gen_limm (p, 3, num, 1);
2931
2932 emit_insns (buf, p - buf);
2933 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2934 }
2935
2936 /* Set TOP to the value of register REG by calling get_raw_reg function
2937 with two argument, collected buffer and register number. */
2938
2939 static void
2940 ppc64v1_emit_reg (int reg)
2941 {
2942 uint32_t buf[15];
2943 uint32_t *p = buf;
2944
2945 /* fctx->regs is passed in r3 and then saved in 176(1). */
2946 p += GEN_LD (p, 3, 31, -32);
2947 p += GEN_LI (p, 4, reg);
2948 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2949 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2950 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2951
2952 emit_insns (buf, p - buf);
2953 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2954 }
2955
2956 /* Likewise, for ELFv2. */
2957
2958 static void
2959 ppc64v2_emit_reg (int reg)
2960 {
2961 uint32_t buf[12];
2962 uint32_t *p = buf;
2963
2964 /* fctx->regs is passed in r3 and then saved in 176(1). */
2965 p += GEN_LD (p, 3, 31, -32);
2966 p += GEN_LI (p, 4, reg);
2967 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2968 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2969 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2970
2971 emit_insns (buf, p - buf);
2972 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2973 }
2974
2975 /* TOP = stack[--sp] */
2976
2977 static void
2978 ppc64_emit_pop (void)
2979 {
2980 EMIT_ASM ("ldu 3, 8(30)");
2981 }
2982
2983 /* stack[sp++] = TOP
2984
2985 Because we may use up bytecode stack, expand 8 doublewords more
2986 if needed. */
2987
2988 static void
2989 ppc64_emit_stack_flush (void)
2990 {
2991 /* Make sure bytecode stack is big enough before push.
2992 Otherwise, expand 64-byte more. */
2993
2994 EMIT_ASM (" std 3, 0(30) \n"
2995 " addi 4, 30, -(112 + 8) \n"
2996 " cmpd 7, 4, 1 \n"
2997 " bgt 7, 1f \n"
2998 " stdu 31, -64(1) \n"
2999 "1:addi 30, 30, -8 \n");
3000 }
3001
3002 /* Swap TOP and stack[sp-1] */
3003
3004 static void
3005 ppc64_emit_swap (void)
3006 {
3007 EMIT_ASM ("ld 4, 8(30) \n"
3008 "std 3, 8(30) \n"
3009 "mr 3, 4 \n");
3010 }
3011
3012 /* Call function FN - ELFv1. */
3013
3014 static void
3015 ppc64v1_emit_call (CORE_ADDR fn)
3016 {
3017 uint32_t buf[13];
3018 uint32_t *p = buf;
3019
3020 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3021 p += gen_call (p, fn, 1, 1);
3022 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3023
3024 emit_insns (buf, p - buf);
3025 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3026 }
3027
3028 /* Call function FN - ELFv2. */
3029
3030 static void
3031 ppc64v2_emit_call (CORE_ADDR fn)
3032 {
3033 uint32_t buf[10];
3034 uint32_t *p = buf;
3035
3036 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3037 p += gen_call (p, fn, 1, 0);
3038 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3039
3040 emit_insns (buf, p - buf);
3041 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3042 }
3043
3044 /* FN's prototype is `LONGEST(*fn)(int)'.
3045 TOP = fn (arg1)
3046 */
3047
3048 static void
3049 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3050 {
3051 uint32_t buf[13];
3052 uint32_t *p = buf;
3053
3054 /* Setup argument. arg1 is a 16-bit value. */
3055 p += gen_limm (p, 3, arg1, 1);
3056 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3057 p += gen_call (p, fn, 1, 1);
3058 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3059
3060 emit_insns (buf, p - buf);
3061 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3062 }
3063
3064 /* Likewise for ELFv2. */
3065
3066 static void
3067 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3068 {
3069 uint32_t buf[10];
3070 uint32_t *p = buf;
3071
3072 /* Setup argument. arg1 is a 16-bit value. */
3073 p += gen_limm (p, 3, arg1, 1);
3074 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3075 p += gen_call (p, fn, 1, 0);
3076 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3077
3078 emit_insns (buf, p - buf);
3079 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3080 }
3081
3082 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3083 fn (arg1, TOP)
3084
3085 TOP should be preserved/restored before/after the call. */
3086
3087 static void
3088 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3089 {
3090 uint32_t buf[17];
3091 uint32_t *p = buf;
3092
3093 /* Save TOP. 0(30) is next-empty. */
3094 p += GEN_STD (p, 3, 30, 0);
3095
3096 /* Setup argument. arg1 is a 16-bit value. */
3097 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3098 p += gen_limm (p, 3, arg1, 1);
3099 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3100 p += gen_call (p, fn, 1, 1);
3101 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3102
3103 /* Restore TOP */
3104 p += GEN_LD (p, 3, 30, 0);
3105
3106 emit_insns (buf, p - buf);
3107 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3108 }
3109
3110 /* Likewise for ELFv2. */
3111
3112 static void
3113 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3114 {
3115 uint32_t buf[14];
3116 uint32_t *p = buf;
3117
3118 /* Save TOP. 0(30) is next-empty. */
3119 p += GEN_STD (p, 3, 30, 0);
3120
3121 /* Setup argument. arg1 is a 16-bit value. */
3122 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3123 p += gen_limm (p, 3, arg1, 1);
3124 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3125 p += gen_call (p, fn, 1, 0);
3126 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3127
3128 /* Restore TOP */
3129 p += GEN_LD (p, 3, 30, 0);
3130
3131 emit_insns (buf, p - buf);
3132 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3133 }
3134
3135 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3136
3137 static void
3138 ppc64_emit_if_goto (int *offset_p, int *size_p)
3139 {
3140 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3141 "ldu 3, 8(30) \n"
3142 "1:bne 7, 1b \n");
3143
3144 if (offset_p)
3145 *offset_p = 8;
3146 if (size_p)
3147 *size_p = 14;
3148 }
3149
3150 /* Goto if stack[--sp] == TOP */
3151
3152 static void
3153 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3154 {
3155 EMIT_ASM ("ldu 4, 8(30) \n"
3156 "cmpd 7, 4, 3 \n"
3157 "ldu 3, 8(30) \n"
3158 "1:beq 7, 1b \n");
3159
3160 if (offset_p)
3161 *offset_p = 12;
3162 if (size_p)
3163 *size_p = 14;
3164 }
3165
3166 /* Goto if stack[--sp] != TOP */
3167
3168 static void
3169 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3170 {
3171 EMIT_ASM ("ldu 4, 8(30) \n"
3172 "cmpd 7, 4, 3 \n"
3173 "ldu 3, 8(30) \n"
3174 "1:bne 7, 1b \n");
3175
3176 if (offset_p)
3177 *offset_p = 12;
3178 if (size_p)
3179 *size_p = 14;
3180 }
3181
3182 /* Goto if stack[--sp] < TOP */
3183
3184 static void
3185 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3186 {
3187 EMIT_ASM ("ldu 4, 8(30) \n"
3188 "cmpd 7, 4, 3 \n"
3189 "ldu 3, 8(30) \n"
3190 "1:blt 7, 1b \n");
3191
3192 if (offset_p)
3193 *offset_p = 12;
3194 if (size_p)
3195 *size_p = 14;
3196 }
3197
3198 /* Goto if stack[--sp] <= TOP */
3199
3200 static void
3201 ppc64_emit_le_goto (int *offset_p, int *size_p)
3202 {
3203 EMIT_ASM ("ldu 4, 8(30) \n"
3204 "cmpd 7, 4, 3 \n"
3205 "ldu 3, 8(30) \n"
3206 "1:ble 7, 1b \n");
3207
3208 if (offset_p)
3209 *offset_p = 12;
3210 if (size_p)
3211 *size_p = 14;
3212 }
3213
3214 /* Goto if stack[--sp] > TOP */
3215
3216 static void
3217 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3218 {
3219 EMIT_ASM ("ldu 4, 8(30) \n"
3220 "cmpd 7, 4, 3 \n"
3221 "ldu 3, 8(30) \n"
3222 "1:bgt 7, 1b \n");
3223
3224 if (offset_p)
3225 *offset_p = 12;
3226 if (size_p)
3227 *size_p = 14;
3228 }
3229
3230 /* Goto if stack[--sp] >= TOP */
3231
3232 static void
3233 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3234 {
3235 EMIT_ASM ("ldu 4, 8(30) \n"
3236 "cmpd 7, 4, 3 \n"
3237 "ldu 3, 8(30) \n"
3238 "1:bge 7, 1b \n");
3239
3240 if (offset_p)
3241 *offset_p = 12;
3242 if (size_p)
3243 *size_p = 14;
3244 }
3245
3246 /* Table of emit ops for 64-bit ELFv1. */
3247
3248 static struct emit_ops ppc64v1_emit_ops_impl =
3249 {
3250 ppc64v1_emit_prologue,
3251 ppc64_emit_epilogue,
3252 ppc64_emit_add,
3253 ppc64_emit_sub,
3254 ppc64_emit_mul,
3255 ppc64_emit_lsh,
3256 ppc64_emit_rsh_signed,
3257 ppc64_emit_rsh_unsigned,
3258 ppc64_emit_ext,
3259 ppc64_emit_log_not,
3260 ppc64_emit_bit_and,
3261 ppc64_emit_bit_or,
3262 ppc64_emit_bit_xor,
3263 ppc64_emit_bit_not,
3264 ppc64_emit_equal,
3265 ppc64_emit_less_signed,
3266 ppc64_emit_less_unsigned,
3267 ppc64_emit_ref,
3268 ppc64_emit_if_goto,
3269 ppc_emit_goto,
3270 ppc_write_goto_address,
3271 ppc64_emit_const,
3272 ppc64v1_emit_call,
3273 ppc64v1_emit_reg,
3274 ppc64_emit_pop,
3275 ppc64_emit_stack_flush,
3276 ppc64_emit_zero_ext,
3277 ppc64_emit_swap,
3278 ppc_emit_stack_adjust,
3279 ppc64v1_emit_int_call_1,
3280 ppc64v1_emit_void_call_2,
3281 ppc64_emit_eq_goto,
3282 ppc64_emit_ne_goto,
3283 ppc64_emit_lt_goto,
3284 ppc64_emit_le_goto,
3285 ppc64_emit_gt_goto,
3286 ppc64_emit_ge_goto
3287 };
3288
3289 /* Table of emit ops for 64-bit ELFv2. */
3290
3291 static struct emit_ops ppc64v2_emit_ops_impl =
3292 {
3293 ppc64v2_emit_prologue,
3294 ppc64_emit_epilogue,
3295 ppc64_emit_add,
3296 ppc64_emit_sub,
3297 ppc64_emit_mul,
3298 ppc64_emit_lsh,
3299 ppc64_emit_rsh_signed,
3300 ppc64_emit_rsh_unsigned,
3301 ppc64_emit_ext,
3302 ppc64_emit_log_not,
3303 ppc64_emit_bit_and,
3304 ppc64_emit_bit_or,
3305 ppc64_emit_bit_xor,
3306 ppc64_emit_bit_not,
3307 ppc64_emit_equal,
3308 ppc64_emit_less_signed,
3309 ppc64_emit_less_unsigned,
3310 ppc64_emit_ref,
3311 ppc64_emit_if_goto,
3312 ppc_emit_goto,
3313 ppc_write_goto_address,
3314 ppc64_emit_const,
3315 ppc64v2_emit_call,
3316 ppc64v2_emit_reg,
3317 ppc64_emit_pop,
3318 ppc64_emit_stack_flush,
3319 ppc64_emit_zero_ext,
3320 ppc64_emit_swap,
3321 ppc_emit_stack_adjust,
3322 ppc64v2_emit_int_call_1,
3323 ppc64v2_emit_void_call_2,
3324 ppc64_emit_eq_goto,
3325 ppc64_emit_ne_goto,
3326 ppc64_emit_lt_goto,
3327 ppc64_emit_le_goto,
3328 ppc64_emit_gt_goto,
3329 ppc64_emit_ge_goto
3330 };
3331
3332 #endif
3333
3334 /* Implementation of linux_target_ops method "emit_ops". */
3335
3336 static struct emit_ops *
3337 ppc_emit_ops (void)
3338 {
3339 #ifdef __powerpc64__
3340 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3341
3342 if (register_size (regcache->tdesc, 0) == 8)
3343 {
3344 if (is_elfv2_inferior ())
3345 return &ppc64v2_emit_ops_impl;
3346 else
3347 return &ppc64v1_emit_ops_impl;
3348 }
3349 #endif
3350 return &ppc_emit_ops_impl;
3351 }
3352
3353 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3354
3355 static int
3356 ppc_get_ipa_tdesc_idx (void)
3357 {
3358 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3359 const struct target_desc *tdesc = regcache->tdesc;
3360
3361 #ifdef __powerpc64__
3362 if (tdesc == tdesc_powerpc_64l)
3363 return PPC_TDESC_BASE;
3364 if (tdesc == tdesc_powerpc_altivec64l)
3365 return PPC_TDESC_ALTIVEC;
3366 if (tdesc == tdesc_powerpc_vsx64l)
3367 return PPC_TDESC_VSX;
3368 if (tdesc == tdesc_powerpc_isa205_64l)
3369 return PPC_TDESC_ISA205;
3370 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3371 return PPC_TDESC_ISA205_ALTIVEC;
3372 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3373 return PPC_TDESC_ISA205_VSX;
3374 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3375 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3376 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3377 return PPC_TDESC_ISA207_VSX;
3378 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3379 return PPC_TDESC_ISA207_HTM_VSX;
3380 #endif
3381
3382 if (tdesc == tdesc_powerpc_32l)
3383 return PPC_TDESC_BASE;
3384 if (tdesc == tdesc_powerpc_altivec32l)
3385 return PPC_TDESC_ALTIVEC;
3386 if (tdesc == tdesc_powerpc_vsx32l)
3387 return PPC_TDESC_VSX;
3388 if (tdesc == tdesc_powerpc_isa205_32l)
3389 return PPC_TDESC_ISA205;
3390 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3391 return PPC_TDESC_ISA205_ALTIVEC;
3392 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3393 return PPC_TDESC_ISA205_VSX;
3394 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3395 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3396 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3397 return PPC_TDESC_ISA207_VSX;
3398 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3399 return PPC_TDESC_ISA207_HTM_VSX;
3400 if (tdesc == tdesc_powerpc_e500l)
3401 return PPC_TDESC_E500;
3402
3403 return 0;
3404 }
3405
3406 struct linux_target_ops the_low_target = {
3407 ppc_sw_breakpoint_from_kind,
3408 NULL,
3409 0,
3410 ppc_breakpoint_at,
3411 ppc_supports_z_point_type,
3412 ppc_insert_point,
3413 ppc_remove_point,
3414 NULL,
3415 NULL,
3416 ppc_collect_ptrace_register,
3417 ppc_supply_ptrace_register,
3418 NULL, /* siginfo_fixup */
3419 NULL, /* new_process */
3420 NULL, /* delete_process */
3421 NULL, /* new_thread */
3422 NULL, /* delete_thread */
3423 NULL, /* new_fork */
3424 NULL, /* prepare_to_resume */
3425 NULL, /* process_qsupported */
3426 ppc_supports_tracepoints,
3427 ppc_get_thread_area,
3428 ppc_install_fast_tracepoint_jump_pad,
3429 ppc_emit_ops,
3430 ppc_get_min_fast_tracepoint_insn_len,
3431 NULL, /* supports_range_stepping */
3432 ppc_supports_hardware_single_step,
3433 NULL, /* get_syscall_trapinfo */
3434 ppc_get_ipa_tdesc_idx,
3435 };
3436
3437 /* The linux target ops object. */
3438
3439 linux_process_target *the_linux_target = &the_ppc_target;
3440
3441 void
3442 initialize_low_arch (void)
3443 {
3444 /* Initialize the Linux target descriptions. */
3445
3446 init_registers_powerpc_32l ();
3447 init_registers_powerpc_altivec32l ();
3448 init_registers_powerpc_vsx32l ();
3449 init_registers_powerpc_isa205_32l ();
3450 init_registers_powerpc_isa205_altivec32l ();
3451 init_registers_powerpc_isa205_vsx32l ();
3452 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3453 init_registers_powerpc_isa207_vsx32l ();
3454 init_registers_powerpc_isa207_htm_vsx32l ();
3455 init_registers_powerpc_e500l ();
3456 #if __powerpc64__
3457 init_registers_powerpc_64l ();
3458 init_registers_powerpc_altivec64l ();
3459 init_registers_powerpc_vsx64l ();
3460 init_registers_powerpc_isa205_64l ();
3461 init_registers_powerpc_isa205_altivec64l ();
3462 init_registers_powerpc_isa205_vsx64l ();
3463 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3464 init_registers_powerpc_isa207_vsx64l ();
3465 init_registers_powerpc_isa207_htm_vsx64l ();
3466 #endif
3467
3468 initialize_regsets_info (&ppc_regsets_info);
3469 }
This page took 0.101466 seconds and 4 git commands to generate.