fe63e7bf07b767479bdf9547e88e60e469633563
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 protected:
56
57 void low_arch_setup () override;
58 };
59
60 /* The singleton target ops object. */
61
62 static ppc_target the_ppc_target;
63
64 /* Holds the AT_HWCAP auxv entry. */
65
66 static unsigned long ppc_hwcap;
67
68 /* Holds the AT_HWCAP2 auxv entry. */
69
70 static unsigned long ppc_hwcap2;
71
72
73 #define ppc_num_regs 73
74
75 #ifdef __powerpc64__
76 /* We use a constant for FPSCR instead of PT_FPSCR, because
77 many shipped PPC64 kernels had the wrong value in ptrace.h. */
78 static int ppc_regmap[] =
79 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
80 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
81 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
82 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
83 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
84 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
85 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
86 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
87 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
88 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
89 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
90 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
91 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
92 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
93 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
94 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
95 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
96 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
97 PT_ORIG_R3 * 8, PT_TRAP * 8 };
98 #else
99 /* Currently, don't check/send MQ. */
100 static int ppc_regmap[] =
101 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
102 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
103 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
104 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
105 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
106 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
107 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
108 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
109 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
110 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
111 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
112 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
113 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
114 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
115 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
116 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
117 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
118 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
119 PT_ORIG_R3 * 4, PT_TRAP * 4
120 };
121
122 static int ppc_regmap_e500[] =
123 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
124 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
125 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
126 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
127 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
128 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
129 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
130 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
131 -1, -1, -1, -1,
132 -1, -1, -1, -1,
133 -1, -1, -1, -1,
134 -1, -1, -1, -1,
135 -1, -1, -1, -1,
136 -1, -1, -1, -1,
137 -1, -1, -1, -1,
138 -1, -1, -1, -1,
139 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
140 PT_CTR * 4, PT_XER * 4, -1,
141 PT_ORIG_R3 * 4, PT_TRAP * 4
142 };
143 #endif
144
145 /* Check whether the kernel provides a register set with number
146 REGSET_ID of size REGSETSIZE for process/thread TID. */
147
148 static int
149 ppc_check_regset (int tid, int regset_id, int regsetsize)
150 {
151 void *buf = alloca (regsetsize);
152 struct iovec iov;
153
154 iov.iov_base = buf;
155 iov.iov_len = regsetsize;
156
157 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
158 || errno == ENODATA)
159 return 1;
160 return 0;
161 }
162
163 static int
164 ppc_cannot_store_register (int regno)
165 {
166 const struct target_desc *tdesc = current_process ()->tdesc;
167
168 #ifndef __powerpc64__
169 /* Some kernels do not allow us to store fpscr. */
170 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
171 && regno == find_regno (tdesc, "fpscr"))
172 return 1;
173 #endif
174
175 /* Some kernels do not allow us to store orig_r3 or trap. */
176 if (regno == find_regno (tdesc, "orig_r3")
177 || regno == find_regno (tdesc, "trap"))
178 return 1;
179
180 return 0;
181 }
182
183 static int
184 ppc_cannot_fetch_register (int regno)
185 {
186 return 0;
187 }
188
189 static void
190 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
191 {
192 memset (buf, 0, sizeof (long));
193
194 if (__BYTE_ORDER == __LITTLE_ENDIAN)
195 {
196 /* Little-endian values always sit at the left end of the buffer. */
197 collect_register (regcache, regno, buf);
198 }
199 else if (__BYTE_ORDER == __BIG_ENDIAN)
200 {
201 /* Big-endian values sit at the right end of the buffer. In case of
202 registers whose sizes are smaller than sizeof (long), we must use a
203 padding to access them correctly. */
204 int size = register_size (regcache->tdesc, regno);
205
206 if (size < sizeof (long))
207 collect_register (regcache, regno, buf + sizeof (long) - size);
208 else
209 collect_register (regcache, regno, buf);
210 }
211 else
212 perror_with_name ("Unexpected byte order");
213 }
214
215 static void
216 ppc_supply_ptrace_register (struct regcache *regcache,
217 int regno, const char *buf)
218 {
219 if (__BYTE_ORDER == __LITTLE_ENDIAN)
220 {
221 /* Little-endian values always sit at the left end of the buffer. */
222 supply_register (regcache, regno, buf);
223 }
224 else if (__BYTE_ORDER == __BIG_ENDIAN)
225 {
226 /* Big-endian values sit at the right end of the buffer. In case of
227 registers whose sizes are smaller than sizeof (long), we must use a
228 padding to access them correctly. */
229 int size = register_size (regcache->tdesc, regno);
230
231 if (size < sizeof (long))
232 supply_register (regcache, regno, buf + sizeof (long) - size);
233 else
234 supply_register (regcache, regno, buf);
235 }
236 else
237 perror_with_name ("Unexpected byte order");
238 }
239
240 static CORE_ADDR
241 ppc_get_pc (struct regcache *regcache)
242 {
243 if (register_size (regcache->tdesc, 0) == 4)
244 {
245 unsigned int pc;
246 collect_register_by_name (regcache, "pc", &pc);
247 return (CORE_ADDR) pc;
248 }
249 else
250 {
251 unsigned long pc;
252 collect_register_by_name (regcache, "pc", &pc);
253 return (CORE_ADDR) pc;
254 }
255 }
256
257 static void
258 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
259 {
260 if (register_size (regcache->tdesc, 0) == 4)
261 {
262 unsigned int newpc = pc;
263 supply_register_by_name (regcache, "pc", &newpc);
264 }
265 else
266 {
267 unsigned long newpc = pc;
268 supply_register_by_name (regcache, "pc", &newpc);
269 }
270 }
271
272 #ifndef __powerpc64__
273 static int ppc_regmap_adjusted;
274 #endif
275
276
277 /* Correct in either endianness.
278 This instruction is "twge r2, r2", which GDB uses as a software
279 breakpoint. */
280 static const unsigned int ppc_breakpoint = 0x7d821008;
281 #define ppc_breakpoint_len 4
282
283 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
284
285 static const gdb_byte *
286 ppc_sw_breakpoint_from_kind (int kind, int *size)
287 {
288 *size = ppc_breakpoint_len;
289 return (const gdb_byte *) &ppc_breakpoint;
290 }
291
292 static int
293 ppc_breakpoint_at (CORE_ADDR where)
294 {
295 unsigned int insn;
296
297 the_target->read_memory (where, (unsigned char *) &insn, 4);
298 if (insn == ppc_breakpoint)
299 return 1;
300 /* If necessary, recognize more trap instructions here. GDB only uses
301 the one. */
302
303 return 0;
304 }
305
306 /* Implement supports_z_point_type target-ops.
307 Returns true if type Z_TYPE breakpoint is supported.
308
309 Handling software breakpoint at server side, so tracepoints
310 and breakpoints can be inserted at the same location. */
311
312 static int
313 ppc_supports_z_point_type (char z_type)
314 {
315 switch (z_type)
316 {
317 case Z_PACKET_SW_BP:
318 return 1;
319 case Z_PACKET_HW_BP:
320 case Z_PACKET_WRITE_WP:
321 case Z_PACKET_ACCESS_WP:
322 default:
323 return 0;
324 }
325 }
326
327 /* Implement insert_point target-ops.
328 Returns 0 on success, -1 on failure and 1 on unsupported. */
329
330 static int
331 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
332 int size, struct raw_breakpoint *bp)
333 {
334 switch (type)
335 {
336 case raw_bkpt_type_sw:
337 return insert_memory_breakpoint (bp);
338
339 case raw_bkpt_type_hw:
340 case raw_bkpt_type_write_wp:
341 case raw_bkpt_type_access_wp:
342 default:
343 /* Unsupported. */
344 return 1;
345 }
346 }
347
348 /* Implement remove_point target-ops.
349 Returns 0 on success, -1 on failure and 1 on unsupported. */
350
351 static int
352 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
353 int size, struct raw_breakpoint *bp)
354 {
355 switch (type)
356 {
357 case raw_bkpt_type_sw:
358 return remove_memory_breakpoint (bp);
359
360 case raw_bkpt_type_hw:
361 case raw_bkpt_type_write_wp:
362 case raw_bkpt_type_access_wp:
363 default:
364 /* Unsupported. */
365 return 1;
366 }
367 }
368
369 /* Provide only a fill function for the general register set. ps_lgetregs
370 will use this for NPTL support. */
371
372 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
373 {
374 int i;
375
376 for (i = 0; i < 32; i++)
377 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
378
379 for (i = 64; i < 70; i++)
380 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
381
382 for (i = 71; i < 73; i++)
383 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
384 }
385
386 /* Program Priority Register regset fill function. */
387
388 static void
389 ppc_fill_pprregset (struct regcache *regcache, void *buf)
390 {
391 char *ppr = (char *) buf;
392
393 collect_register_by_name (regcache, "ppr", ppr);
394 }
395
396 /* Program Priority Register regset store function. */
397
398 static void
399 ppc_store_pprregset (struct regcache *regcache, const void *buf)
400 {
401 const char *ppr = (const char *) buf;
402
403 supply_register_by_name (regcache, "ppr", ppr);
404 }
405
406 /* Data Stream Control Register regset fill function. */
407
408 static void
409 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
410 {
411 char *dscr = (char *) buf;
412
413 collect_register_by_name (regcache, "dscr", dscr);
414 }
415
416 /* Data Stream Control Register regset store function. */
417
418 static void
419 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
420 {
421 const char *dscr = (const char *) buf;
422
423 supply_register_by_name (regcache, "dscr", dscr);
424 }
425
426 /* Target Address Register regset fill function. */
427
428 static void
429 ppc_fill_tarregset (struct regcache *regcache, void *buf)
430 {
431 char *tar = (char *) buf;
432
433 collect_register_by_name (regcache, "tar", tar);
434 }
435
436 /* Target Address Register regset store function. */
437
438 static void
439 ppc_store_tarregset (struct regcache *regcache, const void *buf)
440 {
441 const char *tar = (const char *) buf;
442
443 supply_register_by_name (regcache, "tar", tar);
444 }
445
446 /* Event-Based Branching regset store function. Unless the inferior
447 has a perf event open, ptrace can return in error when reading and
448 writing to the regset, with ENODATA. For reading, the registers
449 will correctly show as unavailable. For writing, gdbserver
450 currently only caches any register writes from P and G packets and
451 the stub always tries to write all the regsets when resuming the
452 inferior, which would result in frequent warnings. For this
453 reason, we don't define a fill function. This also means that the
454 client-side regcache will be dirty if the user tries to write to
455 the EBB registers. G packets that the client sends to write to
456 unrelated registers will also include data for EBB registers, even
457 if they are unavailable. */
458
459 static void
460 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
461 {
462 const char *regset = (const char *) buf;
463
464 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
465 .dat file is BESCR, EBBHR, EBBRR. */
466 supply_register_by_name (regcache, "ebbrr", &regset[0]);
467 supply_register_by_name (regcache, "ebbhr", &regset[8]);
468 supply_register_by_name (regcache, "bescr", &regset[16]);
469 }
470
471 /* Performance Monitoring Unit regset fill function. */
472
473 static void
474 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
475 {
476 char *regset = (char *) buf;
477
478 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
479 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
480 collect_register_by_name (regcache, "siar", &regset[0]);
481 collect_register_by_name (regcache, "sdar", &regset[8]);
482 collect_register_by_name (regcache, "sier", &regset[16]);
483 collect_register_by_name (regcache, "mmcr2", &regset[24]);
484 collect_register_by_name (regcache, "mmcr0", &regset[32]);
485 }
486
487 /* Performance Monitoring Unit regset store function. */
488
489 static void
490 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
491 {
492 const char *regset = (const char *) buf;
493
494 supply_register_by_name (regcache, "siar", &regset[0]);
495 supply_register_by_name (regcache, "sdar", &regset[8]);
496 supply_register_by_name (regcache, "sier", &regset[16]);
497 supply_register_by_name (regcache, "mmcr2", &regset[24]);
498 supply_register_by_name (regcache, "mmcr0", &regset[32]);
499 }
500
501 /* Hardware Transactional Memory special-purpose register regset fill
502 function. */
503
504 static void
505 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
506 {
507 int i, base;
508 char *regset = (char *) buf;
509
510 base = find_regno (regcache->tdesc, "tfhar");
511 for (i = 0; i < 3; i++)
512 collect_register (regcache, base + i, &regset[i * 8]);
513 }
514
515 /* Hardware Transactional Memory special-purpose register regset store
516 function. */
517
518 static void
519 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
520 {
521 int i, base;
522 const char *regset = (const char *) buf;
523
524 base = find_regno (regcache->tdesc, "tfhar");
525 for (i = 0; i < 3; i++)
526 supply_register (regcache, base + i, &regset[i * 8]);
527 }
528
529 /* For the same reasons as the EBB regset, none of the HTM
530 checkpointed regsets have a fill function. These registers are
531 only available if the inferior is in a transaction. */
532
533 /* Hardware Transactional Memory checkpointed general-purpose regset
534 store function. */
535
536 static void
537 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
538 {
539 int i, base, size, endian_offset;
540 const char *regset = (const char *) buf;
541
542 base = find_regno (regcache->tdesc, "cr0");
543 size = register_size (regcache->tdesc, base);
544
545 gdb_assert (size == 4 || size == 8);
546
547 for (i = 0; i < 32; i++)
548 supply_register (regcache, base + i, &regset[i * size]);
549
550 endian_offset = 0;
551
552 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
553 endian_offset = 4;
554
555 supply_register_by_name (regcache, "ccr",
556 &regset[PT_CCR * size + endian_offset]);
557
558 supply_register_by_name (regcache, "cxer",
559 &regset[PT_XER * size + endian_offset]);
560
561 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
562 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
563 }
564
565 /* Hardware Transactional Memory checkpointed floating-point regset
566 store function. */
567
568 static void
569 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
570 {
571 int i, base;
572 const char *regset = (const char *) buf;
573
574 base = find_regno (regcache->tdesc, "cf0");
575
576 for (i = 0; i < 32; i++)
577 supply_register (regcache, base + i, &regset[i * 8]);
578
579 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
580 }
581
582 /* Hardware Transactional Memory checkpointed vector regset store
583 function. */
584
585 static void
586 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
587 {
588 int i, base;
589 const char *regset = (const char *) buf;
590 int vscr_offset = 0;
591
592 base = find_regno (regcache->tdesc, "cvr0");
593
594 for (i = 0; i < 32; i++)
595 supply_register (regcache, base + i, &regset[i * 16]);
596
597 if (__BYTE_ORDER == __BIG_ENDIAN)
598 vscr_offset = 12;
599
600 supply_register_by_name (regcache, "cvscr",
601 &regset[32 * 16 + vscr_offset]);
602
603 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
604 }
605
606 /* Hardware Transactional Memory checkpointed vector-scalar regset
607 store function. */
608
609 static void
610 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
611 {
612 int i, base;
613 const char *regset = (const char *) buf;
614
615 base = find_regno (regcache->tdesc, "cvs0h");
616 for (i = 0; i < 32; i++)
617 supply_register (regcache, base + i, &regset[i * 8]);
618 }
619
620 /* Hardware Transactional Memory checkpointed Program Priority
621 Register regset store function. */
622
623 static void
624 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
625 {
626 const char *cppr = (const char *) buf;
627
628 supply_register_by_name (regcache, "cppr", cppr);
629 }
630
631 /* Hardware Transactional Memory checkpointed Data Stream Control
632 Register regset store function. */
633
634 static void
635 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
636 {
637 const char *cdscr = (const char *) buf;
638
639 supply_register_by_name (regcache, "cdscr", cdscr);
640 }
641
642 /* Hardware Transactional Memory checkpointed Target Address Register
643 regset store function. */
644
645 static void
646 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
647 {
648 const char *ctar = (const char *) buf;
649
650 supply_register_by_name (regcache, "ctar", ctar);
651 }
652
653 static void
654 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
655 {
656 int i, base;
657 char *regset = (char *) buf;
658
659 base = find_regno (regcache->tdesc, "vs0h");
660 for (i = 0; i < 32; i++)
661 collect_register (regcache, base + i, &regset[i * 8]);
662 }
663
664 static void
665 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
666 {
667 int i, base;
668 const char *regset = (const char *) buf;
669
670 base = find_regno (regcache->tdesc, "vs0h");
671 for (i = 0; i < 32; i++)
672 supply_register (regcache, base + i, &regset[i * 8]);
673 }
674
675 static void
676 ppc_fill_vrregset (struct regcache *regcache, void *buf)
677 {
678 int i, base;
679 char *regset = (char *) buf;
680 int vscr_offset = 0;
681
682 base = find_regno (regcache->tdesc, "vr0");
683 for (i = 0; i < 32; i++)
684 collect_register (regcache, base + i, &regset[i * 16]);
685
686 if (__BYTE_ORDER == __BIG_ENDIAN)
687 vscr_offset = 12;
688
689 collect_register_by_name (regcache, "vscr",
690 &regset[32 * 16 + vscr_offset]);
691
692 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
693 }
694
695 static void
696 ppc_store_vrregset (struct regcache *regcache, const void *buf)
697 {
698 int i, base;
699 const char *regset = (const char *) buf;
700 int vscr_offset = 0;
701
702 base = find_regno (regcache->tdesc, "vr0");
703 for (i = 0; i < 32; i++)
704 supply_register (regcache, base + i, &regset[i * 16]);
705
706 if (__BYTE_ORDER == __BIG_ENDIAN)
707 vscr_offset = 12;
708
709 supply_register_by_name (regcache, "vscr",
710 &regset[32 * 16 + vscr_offset]);
711 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
712 }
713
714 struct gdb_evrregset_t
715 {
716 unsigned long evr[32];
717 unsigned long long acc;
718 unsigned long spefscr;
719 };
720
721 static void
722 ppc_fill_evrregset (struct regcache *regcache, void *buf)
723 {
724 int i, ev0;
725 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
726
727 ev0 = find_regno (regcache->tdesc, "ev0h");
728 for (i = 0; i < 32; i++)
729 collect_register (regcache, ev0 + i, &regset->evr[i]);
730
731 collect_register_by_name (regcache, "acc", &regset->acc);
732 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
733 }
734
735 static void
736 ppc_store_evrregset (struct regcache *regcache, const void *buf)
737 {
738 int i, ev0;
739 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
740
741 ev0 = find_regno (regcache->tdesc, "ev0h");
742 for (i = 0; i < 32; i++)
743 supply_register (regcache, ev0 + i, &regset->evr[i]);
744
745 supply_register_by_name (regcache, "acc", &regset->acc);
746 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
747 }
748
749 /* Support for hardware single step. */
750
751 static int
752 ppc_supports_hardware_single_step (void)
753 {
754 return 1;
755 }
756
757 static struct regset_info ppc_regsets[] = {
758 /* List the extra register sets before GENERAL_REGS. That way we will
759 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
760 general registers. Some kernels support these, but not the newer
761 PPC_PTRACE_GETREGS. */
762 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
763 NULL, ppc_store_tm_ctarregset },
764 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
765 NULL, ppc_store_tm_cdscrregset },
766 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
767 NULL, ppc_store_tm_cpprregset },
768 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
769 NULL, ppc_store_tm_cvsxregset },
770 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
771 NULL, ppc_store_tm_cvrregset },
772 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
773 NULL, ppc_store_tm_cfprregset },
774 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
775 NULL, ppc_store_tm_cgprregset },
776 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
777 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
778 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
779 NULL, ppc_store_ebbregset },
780 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
781 ppc_fill_pmuregset, ppc_store_pmuregset },
782 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
783 ppc_fill_tarregset, ppc_store_tarregset },
784 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
785 ppc_fill_pprregset, ppc_store_pprregset },
786 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
787 ppc_fill_dscrregset, ppc_store_dscrregset },
788 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
789 ppc_fill_vsxregset, ppc_store_vsxregset },
790 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
791 ppc_fill_vrregset, ppc_store_vrregset },
792 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
793 ppc_fill_evrregset, ppc_store_evrregset },
794 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
795 NULL_REGSET
796 };
797
798 static struct usrregs_info ppc_usrregs_info =
799 {
800 ppc_num_regs,
801 ppc_regmap,
802 };
803
804 static struct regsets_info ppc_regsets_info =
805 {
806 ppc_regsets, /* regsets */
807 0, /* num_regsets */
808 NULL, /* disabled_regsets */
809 };
810
811 static struct regs_info myregs_info =
812 {
813 NULL, /* regset_bitmap */
814 &ppc_usrregs_info,
815 &ppc_regsets_info
816 };
817
818 const regs_info *
819 ppc_target::get_regs_info ()
820 {
821 return &myregs_info;
822 }
823
824 void
825 ppc_target::low_arch_setup ()
826 {
827 const struct target_desc *tdesc;
828 struct regset_info *regset;
829 struct ppc_linux_features features = ppc_linux_no_features;
830
831 int tid = lwpid_of (current_thread);
832
833 features.wordsize = ppc_linux_target_wordsize (tid);
834
835 if (features.wordsize == 4)
836 tdesc = tdesc_powerpc_32l;
837 else
838 tdesc = tdesc_powerpc_64l;
839
840 current_process ()->tdesc = tdesc;
841
842 /* The value of current_process ()->tdesc needs to be set for this
843 call. */
844 ppc_hwcap = linux_get_hwcap (features.wordsize);
845 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
846
847 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
848
849 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
850 features.vsx = true;
851
852 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
853 features.altivec = true;
854
855 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
856 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
857 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
858 {
859 features.ppr_dscr = true;
860 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
861 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
862 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
863 && ppc_check_regset (tid, NT_PPC_TAR,
864 PPC_LINUX_SIZEOF_TARREGSET)
865 && ppc_check_regset (tid, NT_PPC_EBB,
866 PPC_LINUX_SIZEOF_EBBREGSET)
867 && ppc_check_regset (tid, NT_PPC_PMU,
868 PPC_LINUX_SIZEOF_PMUREGSET))
869 {
870 features.isa207 = true;
871 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
872 && ppc_check_regset (tid, NT_PPC_TM_SPR,
873 PPC_LINUX_SIZEOF_TM_SPRREGSET))
874 features.htm = true;
875 }
876 }
877
878 tdesc = ppc_linux_match_description (features);
879
880 /* On 32-bit machines, check for SPE registers.
881 Set the low target's regmap field as appropriately. */
882 #ifndef __powerpc64__
883 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
884 tdesc = tdesc_powerpc_e500l;
885
886 if (!ppc_regmap_adjusted)
887 {
888 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
889 ppc_usrregs_info.regmap = ppc_regmap_e500;
890
891 /* If the FPSCR is 64-bit wide, we need to fetch the whole
892 64-bit slot and not just its second word. The PT_FPSCR
893 supplied in a 32-bit GDB compilation doesn't reflect
894 this. */
895 if (register_size (tdesc, 70) == 8)
896 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
897
898 ppc_regmap_adjusted = 1;
899 }
900 #endif
901
902 current_process ()->tdesc = tdesc;
903
904 for (regset = ppc_regsets; regset->size >= 0; regset++)
905 switch (regset->get_request)
906 {
907 case PTRACE_GETVRREGS:
908 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
909 break;
910 case PTRACE_GETVSXREGS:
911 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
912 break;
913 case PTRACE_GETEVRREGS:
914 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
915 regset->size = 32 * 4 + 8 + 4;
916 else
917 regset->size = 0;
918 break;
919 case PTRACE_GETREGSET:
920 switch (regset->nt_type)
921 {
922 case NT_PPC_PPR:
923 regset->size = (features.ppr_dscr ?
924 PPC_LINUX_SIZEOF_PPRREGSET : 0);
925 break;
926 case NT_PPC_DSCR:
927 regset->size = (features.ppr_dscr ?
928 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
929 break;
930 case NT_PPC_TAR:
931 regset->size = (features.isa207 ?
932 PPC_LINUX_SIZEOF_TARREGSET : 0);
933 break;
934 case NT_PPC_EBB:
935 regset->size = (features.isa207 ?
936 PPC_LINUX_SIZEOF_EBBREGSET : 0);
937 break;
938 case NT_PPC_PMU:
939 regset->size = (features.isa207 ?
940 PPC_LINUX_SIZEOF_PMUREGSET : 0);
941 break;
942 case NT_PPC_TM_SPR:
943 regset->size = (features.htm ?
944 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
945 break;
946 case NT_PPC_TM_CGPR:
947 if (features.wordsize == 4)
948 regset->size = (features.htm ?
949 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
950 else
951 regset->size = (features.htm ?
952 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
953 break;
954 case NT_PPC_TM_CFPR:
955 regset->size = (features.htm ?
956 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
957 break;
958 case NT_PPC_TM_CVMX:
959 regset->size = (features.htm ?
960 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
961 break;
962 case NT_PPC_TM_CVSX:
963 regset->size = (features.htm ?
964 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
965 break;
966 case NT_PPC_TM_CPPR:
967 regset->size = (features.htm ?
968 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
969 break;
970 case NT_PPC_TM_CDSCR:
971 regset->size = (features.htm ?
972 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
973 break;
974 case NT_PPC_TM_CTAR:
975 regset->size = (features.htm ?
976 PPC_LINUX_SIZEOF_CTARREGSET : 0);
977 break;
978 default:
979 break;
980 }
981 break;
982 default:
983 break;
984 }
985 }
986
987 /* Implementation of linux_target_ops method "supports_tracepoints". */
988
989 static int
990 ppc_supports_tracepoints (void)
991 {
992 return 1;
993 }
994
995 /* Get the thread area address. This is used to recognize which
996 thread is which when tracing with the in-process agent library. We
997 don't read anything from the address, and treat it as opaque; it's
998 the address itself that we assume is unique per-thread. */
999
1000 static int
1001 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
1002 {
1003 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1004 struct thread_info *thr = get_lwp_thread (lwp);
1005 struct regcache *regcache = get_thread_regcache (thr, 1);
1006 ULONGEST tp = 0;
1007
1008 #ifdef __powerpc64__
1009 if (register_size (regcache->tdesc, 0) == 8)
1010 collect_register_by_name (regcache, "r13", &tp);
1011 else
1012 #endif
1013 collect_register_by_name (regcache, "r2", &tp);
1014
1015 *addr = tp;
1016
1017 return 0;
1018 }
1019
1020 #ifdef __powerpc64__
1021
1022 /* Older glibc doesn't provide this. */
1023
1024 #ifndef EF_PPC64_ABI
1025 #define EF_PPC64_ABI 3
1026 #endif
1027
1028 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1029 inferiors. */
1030
1031 static int
1032 is_elfv2_inferior (void)
1033 {
1034 /* To be used as fallback if we're unable to determine the right result -
1035 assume inferior uses the same ABI as gdbserver. */
1036 #if _CALL_ELF == 2
1037 const int def_res = 1;
1038 #else
1039 const int def_res = 0;
1040 #endif
1041 CORE_ADDR phdr;
1042 Elf64_Ehdr ehdr;
1043
1044 const struct target_desc *tdesc = current_process ()->tdesc;
1045 int wordsize = register_size (tdesc, 0);
1046
1047 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1048 return def_res;
1049
1050 /* Assume ELF header is at the beginning of the page where program headers
1051 are located. If it doesn't look like one, bail. */
1052
1053 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1054 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1055 return def_res;
1056
1057 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1058 }
1059
1060 #endif
1061
1062 /* Generate a ds-form instruction in BUF and return the number of bytes written
1063
1064 0 6 11 16 30 32
1065 | OPCD | RST | RA | DS |XO| */
1066
1067 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1068 static int
1069 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1070 {
1071 uint32_t insn;
1072
1073 gdb_assert ((opcd & ~0x3f) == 0);
1074 gdb_assert ((rst & ~0x1f) == 0);
1075 gdb_assert ((ra & ~0x1f) == 0);
1076 gdb_assert ((xo & ~0x3) == 0);
1077
1078 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1079 *buf = (opcd << 26) | insn;
1080 return 1;
1081 }
1082
1083 /* Followings are frequently used ds-form instructions. */
1084
1085 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1086 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1087 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1088 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1089
1090 /* Generate a d-form instruction in BUF.
1091
1092 0 6 11 16 32
1093 | OPCD | RST | RA | D | */
1094
1095 static int
1096 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1097 {
1098 uint32_t insn;
1099
1100 gdb_assert ((opcd & ~0x3f) == 0);
1101 gdb_assert ((rst & ~0x1f) == 0);
1102 gdb_assert ((ra & ~0x1f) == 0);
1103
1104 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1105 *buf = (opcd << 26) | insn;
1106 return 1;
1107 }
1108
1109 /* Followings are frequently used d-form instructions. */
1110
1111 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1112 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1113 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1114 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1115 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1116 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1117 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1118 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1119 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1120
1121 /* Generate a xfx-form instruction in BUF and return the number of bytes
1122 written.
1123
1124 0 6 11 21 31 32
1125 | OPCD | RST | RI | XO |/| */
1126
1127 static int
1128 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1129 {
1130 uint32_t insn;
1131 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1132
1133 gdb_assert ((opcd & ~0x3f) == 0);
1134 gdb_assert ((rst & ~0x1f) == 0);
1135 gdb_assert ((xo & ~0x3ff) == 0);
1136
1137 insn = (rst << 21) | (n << 11) | (xo << 1);
1138 *buf = (opcd << 26) | insn;
1139 return 1;
1140 }
1141
1142 /* Followings are frequently used xfx-form instructions. */
1143
1144 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1145 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1146 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1147 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1148 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1149 E & 0xf, 598)
1150 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1151
1152
1153 /* Generate a x-form instruction in BUF and return the number of bytes written.
1154
1155 0 6 11 16 21 31 32
1156 | OPCD | RST | RA | RB | XO |RC| */
1157
1158 static int
1159 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1160 {
1161 uint32_t insn;
1162
1163 gdb_assert ((opcd & ~0x3f) == 0);
1164 gdb_assert ((rst & ~0x1f) == 0);
1165 gdb_assert ((ra & ~0x1f) == 0);
1166 gdb_assert ((rb & ~0x1f) == 0);
1167 gdb_assert ((xo & ~0x3ff) == 0);
1168 gdb_assert ((rc & ~1) == 0);
1169
1170 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1171 *buf = (opcd << 26) | insn;
1172 return 1;
1173 }
1174
1175 /* Followings are frequently used x-form instructions. */
1176
1177 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1178 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1179 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1180 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1181 /* Assume bf = cr7. */
1182 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1183
1184
1185 /* Generate a md-form instruction in BUF and return the number of bytes written.
1186
1187 0 6 11 16 21 27 30 31 32
1188 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1189
1190 static int
1191 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1192 int xo, int rc)
1193 {
1194 uint32_t insn;
1195 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1196 unsigned int sh0_4 = sh & 0x1f;
1197 unsigned int sh5 = (sh >> 5) & 1;
1198
1199 gdb_assert ((opcd & ~0x3f) == 0);
1200 gdb_assert ((rs & ~0x1f) == 0);
1201 gdb_assert ((ra & ~0x1f) == 0);
1202 gdb_assert ((sh & ~0x3f) == 0);
1203 gdb_assert ((mb & ~0x3f) == 0);
1204 gdb_assert ((xo & ~0x7) == 0);
1205 gdb_assert ((rc & ~0x1) == 0);
1206
1207 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1208 | (sh5 << 1) | (xo << 2) | (rc & 1);
1209 *buf = (opcd << 26) | insn;
1210 return 1;
1211 }
1212
1213 /* The following are frequently used md-form instructions. */
1214
1215 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1216 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1217 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1218 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1219
1220 /* Generate a i-form instruction in BUF and return the number of bytes written.
1221
1222 0 6 30 31 32
1223 | OPCD | LI |AA|LK| */
1224
1225 static int
1226 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1227 {
1228 uint32_t insn;
1229
1230 gdb_assert ((opcd & ~0x3f) == 0);
1231
1232 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1233 *buf = (opcd << 26) | insn;
1234 return 1;
1235 }
1236
1237 /* The following are frequently used i-form instructions. */
1238
1239 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1240 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1241
1242 /* Generate a b-form instruction in BUF and return the number of bytes written.
1243
1244 0 6 11 16 30 31 32
1245 | OPCD | BO | BI | BD |AA|LK| */
1246
1247 static int
1248 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1249 int aa, int lk)
1250 {
1251 uint32_t insn;
1252
1253 gdb_assert ((opcd & ~0x3f) == 0);
1254 gdb_assert ((bo & ~0x1f) == 0);
1255 gdb_assert ((bi & ~0x1f) == 0);
1256
1257 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1258 *buf = (opcd << 26) | insn;
1259 return 1;
1260 }
1261
1262 /* The following are frequently used b-form instructions. */
1263 /* Assume bi = cr7. */
1264 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1265
1266 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1267 respectively. They are primary used for save/restore GPRs in jump-pad,
1268 not used for bytecode compiling. */
1269
1270 #ifdef __powerpc64__
1271 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1272 GEN_LD (buf, rt, ra, si) : \
1273 GEN_LWZ (buf, rt, ra, si))
1274 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1275 GEN_STD (buf, rt, ra, si) : \
1276 GEN_STW (buf, rt, ra, si))
1277 #else
1278 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1279 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1280 #endif
1281
1282 /* Generate a sequence of instructions to load IMM in the register REG.
1283 Write the instructions in BUF and return the number of bytes written. */
1284
1285 static int
1286 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1287 {
1288 uint32_t *p = buf;
1289
1290 if ((imm + 32768) < 65536)
1291 {
1292 /* li reg, imm[15:0] */
1293 p += GEN_LI (p, reg, imm);
1294 }
1295 else if ((imm >> 32) == 0)
1296 {
1297 /* lis reg, imm[31:16]
1298 ori reg, reg, imm[15:0]
1299 rldicl reg, reg, 0, 32 */
1300 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1301 if ((imm & 0xffff) != 0)
1302 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1303 /* Clear upper 32-bit if sign-bit is set. */
1304 if (imm & (1u << 31) && is_64)
1305 p += GEN_RLDICL (p, reg, reg, 0, 32);
1306 }
1307 else
1308 {
1309 gdb_assert (is_64);
1310 /* lis reg, <imm[63:48]>
1311 ori reg, reg, <imm[48:32]>
1312 rldicr reg, reg, 32, 31
1313 oris reg, reg, <imm[31:16]>
1314 ori reg, reg, <imm[15:0]> */
1315 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1316 if (((imm >> 32) & 0xffff) != 0)
1317 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1318 p += GEN_RLDICR (p, reg, reg, 32, 31);
1319 if (((imm >> 16) & 0xffff) != 0)
1320 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1321 if ((imm & 0xffff) != 0)
1322 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1323 }
1324
1325 return p - buf;
1326 }
1327
1328 /* Generate a sequence for atomically exchange at location LOCK.
1329 This code sequence clobbers r6, r7, r8. LOCK is the location for
1330 the atomic-xchg, OLD_VALUE is expected old value stored in the
1331 location, and R_NEW is a register for the new value. */
1332
1333 static int
1334 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1335 int is_64)
1336 {
1337 const int r_lock = 6;
1338 const int r_old = 7;
1339 const int r_tmp = 8;
1340 uint32_t *p = buf;
1341
1342 /*
1343 1: lwarx TMP, 0, LOCK
1344 cmpwi TMP, OLD
1345 bne 1b
1346 stwcx. NEW, 0, LOCK
1347 bne 1b */
1348
1349 p += gen_limm (p, r_lock, lock, is_64);
1350 p += gen_limm (p, r_old, old_value, is_64);
1351
1352 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1353 p += GEN_CMPW (p, r_tmp, r_old);
1354 p += GEN_BNE (p, -8);
1355 p += GEN_STWCX (p, r_new, 0, r_lock);
1356 p += GEN_BNE (p, -16);
1357
1358 return p - buf;
1359 }
1360
1361 /* Generate a sequence of instructions for calling a function
1362 at address of FN. Return the number of bytes are written in BUF. */
1363
1364 static int
1365 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1366 {
1367 uint32_t *p = buf;
1368
1369 /* Must be called by r12 for caller to calculate TOC address. */
1370 p += gen_limm (p, 12, fn, is_64);
1371 if (is_opd)
1372 {
1373 p += GEN_LOAD (p, 11, 12, 16, is_64);
1374 p += GEN_LOAD (p, 2, 12, 8, is_64);
1375 p += GEN_LOAD (p, 12, 12, 0, is_64);
1376 }
1377 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1378 *p++ = 0x4e800421; /* bctrl */
1379
1380 return p - buf;
1381 }
1382
1383 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1384 of instruction. This function is used to adjust pc-relative instructions
1385 when copying. */
1386
1387 static void
1388 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1389 {
1390 uint32_t insn, op6;
1391 long rel, newrel;
1392
1393 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1394 op6 = PPC_OP6 (insn);
1395
1396 if (op6 == 18 && (insn & 2) == 0)
1397 {
1398 /* branch && AA = 0 */
1399 rel = PPC_LI (insn);
1400 newrel = (oldloc - *to) + rel;
1401
1402 /* Out of range. Cannot relocate instruction. */
1403 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1404 return;
1405
1406 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1407 }
1408 else if (op6 == 16 && (insn & 2) == 0)
1409 {
1410 /* conditional branch && AA = 0 */
1411
1412 /* If the new relocation is too big for even a 26-bit unconditional
1413 branch, there is nothing we can do. Just abort.
1414
1415 Otherwise, if it can be fit in 16-bit conditional branch, just
1416 copy the instruction and relocate the address.
1417
1418 If the it's big for conditional-branch (16-bit), try to invert the
1419 condition and jump with 26-bit branch. For example,
1420
1421 beq .Lgoto
1422 INSN1
1423
1424 =>
1425
1426 bne 1f (+8)
1427 b .Lgoto
1428 1:INSN1
1429
1430 After this transform, we are actually jump from *TO+4 instead of *TO,
1431 so check the relocation again because it will be 1-insn farther then
1432 before if *TO is after OLDLOC.
1433
1434
1435 For BDNZT (or so) is transformed from
1436
1437 bdnzt eq, .Lgoto
1438 INSN1
1439
1440 =>
1441
1442 bdz 1f (+12)
1443 bf eq, 1f (+8)
1444 b .Lgoto
1445 1:INSN1
1446
1447 See also "BO field encodings". */
1448
1449 rel = PPC_BD (insn);
1450 newrel = (oldloc - *to) + rel;
1451
1452 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1453 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1454 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1455 {
1456 newrel -= 4;
1457
1458 /* Out of range. Cannot relocate instruction. */
1459 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1460 return;
1461
1462 if ((PPC_BO (insn) & 0x14) == 0x4)
1463 insn ^= (1 << 24);
1464 else if ((PPC_BO (insn) & 0x14) == 0x10)
1465 insn ^= (1 << 22);
1466
1467 /* Jump over the unconditional branch. */
1468 insn = (insn & ~0xfffc) | 0x8;
1469 target_write_memory (*to, (unsigned char *) &insn, 4);
1470 *to += 4;
1471
1472 /* Build a unconditional branch and copy LK bit. */
1473 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1474 target_write_memory (*to, (unsigned char *) &insn, 4);
1475 *to += 4;
1476
1477 return;
1478 }
1479 else if ((PPC_BO (insn) & 0x14) == 0)
1480 {
1481 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1482 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1483
1484 newrel -= 8;
1485
1486 /* Out of range. Cannot relocate instruction. */
1487 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1488 return;
1489
1490 /* Copy BI field. */
1491 bf_insn |= (insn & 0x1f0000);
1492
1493 /* Invert condition. */
1494 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1495 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1496
1497 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1498 *to += 4;
1499 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1500 *to += 4;
1501
1502 /* Build a unconditional branch and copy LK bit. */
1503 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1504 target_write_memory (*to, (unsigned char *) &insn, 4);
1505 *to += 4;
1506
1507 return;
1508 }
1509 else /* (BO & 0x14) == 0x14, branch always. */
1510 {
1511 /* Out of range. Cannot relocate instruction. */
1512 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1513 return;
1514
1515 /* Build a unconditional branch and copy LK bit. */
1516 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1517 target_write_memory (*to, (unsigned char *) &insn, 4);
1518 *to += 4;
1519
1520 return;
1521 }
1522 }
1523
1524 target_write_memory (*to, (unsigned char *) &insn, 4);
1525 *to += 4;
1526 }
1527
1528 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1529 See target.h for details. */
1530
1531 static int
1532 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1533 CORE_ADDR collector,
1534 CORE_ADDR lockaddr,
1535 ULONGEST orig_size,
1536 CORE_ADDR *jump_entry,
1537 CORE_ADDR *trampoline,
1538 ULONGEST *trampoline_size,
1539 unsigned char *jjump_pad_insn,
1540 ULONGEST *jjump_pad_insn_size,
1541 CORE_ADDR *adjusted_insn_addr,
1542 CORE_ADDR *adjusted_insn_addr_end,
1543 char *err)
1544 {
1545 uint32_t buf[256];
1546 uint32_t *p = buf;
1547 int j, offset;
1548 CORE_ADDR buildaddr = *jump_entry;
1549 const CORE_ADDR entryaddr = *jump_entry;
1550 int rsz, min_frame, frame_size, tp_reg;
1551 #ifdef __powerpc64__
1552 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1553 int is_64 = register_size (regcache->tdesc, 0) == 8;
1554 int is_opd = is_64 && !is_elfv2_inferior ();
1555 #else
1556 int is_64 = 0, is_opd = 0;
1557 #endif
1558
1559 #ifdef __powerpc64__
1560 if (is_64)
1561 {
1562 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1563 rsz = 8;
1564 min_frame = 112;
1565 frame_size = (40 * rsz) + min_frame;
1566 tp_reg = 13;
1567 }
1568 else
1569 {
1570 #endif
1571 rsz = 4;
1572 min_frame = 16;
1573 frame_size = (40 * rsz) + min_frame;
1574 tp_reg = 2;
1575 #ifdef __powerpc64__
1576 }
1577 #endif
1578
1579 /* Stack frame layout for this jump pad,
1580
1581 High thread_area (r13/r2) |
1582 tpoint - collecting_t obj
1583 PC/<tpaddr> | +36
1584 CTR | +35
1585 LR | +34
1586 XER | +33
1587 CR | +32
1588 R31 |
1589 R29 |
1590 ... |
1591 R1 | +1
1592 R0 - collected registers
1593 ... |
1594 ... |
1595 Low Back-chain -
1596
1597
1598 The code flow of this jump pad,
1599
1600 1. Adjust SP
1601 2. Save GPR and SPR
1602 3. Prepare argument
1603 4. Call gdb_collector
1604 5. Restore GPR and SPR
1605 6. Restore SP
1606 7. Build a jump for back to the program
1607 8. Copy/relocate original instruction
1608 9. Build a jump for replacing original instruction. */
1609
1610 /* Adjust stack pointer. */
1611 if (is_64)
1612 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1613 else
1614 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1615
1616 /* Store GPRs. Save R1 later, because it had just been modified, but
1617 we want the original value. */
1618 for (j = 2; j < 32; j++)
1619 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1620 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1621 /* Set r0 to the original value of r1 before adjusting stack frame,
1622 and then save it. */
1623 p += GEN_ADDI (p, 0, 1, frame_size);
1624 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1625
1626 /* Save CR, XER, LR, and CTR. */
1627 p += GEN_MFCR (p, 3); /* mfcr r3 */
1628 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1629 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1630 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1631 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1632 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1633 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1634 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1635
1636 /* Save PC<tpaddr> */
1637 p += gen_limm (p, 3, tpaddr, is_64);
1638 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1639
1640
1641 /* Setup arguments to collector. */
1642 /* Set r4 to collected registers. */
1643 p += GEN_ADDI (p, 4, 1, min_frame);
1644 /* Set r3 to TPOINT. */
1645 p += gen_limm (p, 3, tpoint, is_64);
1646
1647 /* Prepare collecting_t object for lock. */
1648 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1649 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1650 /* Set R5 to collecting object. */
1651 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1652
1653 p += GEN_LWSYNC (p);
1654 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1655 p += GEN_LWSYNC (p);
1656
1657 /* Call to collector. */
1658 p += gen_call (p, collector, is_64, is_opd);
1659
1660 /* Simply write 0 to release the lock. */
1661 p += gen_limm (p, 3, lockaddr, is_64);
1662 p += gen_limm (p, 4, 0, is_64);
1663 p += GEN_LWSYNC (p);
1664 p += GEN_STORE (p, 4, 3, 0, is_64);
1665
1666 /* Restore stack and registers. */
1667 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1668 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1669 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1670 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1671 p += GEN_MTCR (p, 3); /* mtcr r3 */
1672 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1673 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1674 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1675
1676 /* Restore GPRs. */
1677 for (j = 2; j < 32; j++)
1678 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1679 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1680 /* Restore SP. */
1681 p += GEN_ADDI (p, 1, 1, frame_size);
1682
1683 /* Flush instructions to inferior memory. */
1684 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1685
1686 /* Now, insert the original instruction to execute in the jump pad. */
1687 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1688 *adjusted_insn_addr_end = *adjusted_insn_addr;
1689 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1690
1691 /* Verify the relocation size. If should be 4 for normal copy,
1692 8 or 12 for some conditional branch. */
1693 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1694 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1695 {
1696 sprintf (err, "E.Unexpected instruction length = %d"
1697 "when relocate instruction.",
1698 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1699 return 1;
1700 }
1701
1702 buildaddr = *adjusted_insn_addr_end;
1703 p = buf;
1704 /* Finally, write a jump back to the program. */
1705 offset = (tpaddr + 4) - buildaddr;
1706 if (offset >= (1 << 25) || offset < -(1 << 25))
1707 {
1708 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1709 "(offset 0x%x > 26-bit).", offset);
1710 return 1;
1711 }
1712 /* b <tpaddr+4> */
1713 p += GEN_B (p, offset);
1714 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1715 *jump_entry = buildaddr + (p - buf) * 4;
1716
1717 /* The jump pad is now built. Wire in a jump to our jump pad. This
1718 is always done last (by our caller actually), so that we can
1719 install fast tracepoints with threads running. This relies on
1720 the agent's atomic write support. */
1721 offset = entryaddr - tpaddr;
1722 if (offset >= (1 << 25) || offset < -(1 << 25))
1723 {
1724 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1725 "(offset 0x%x > 26-bit).", offset);
1726 return 1;
1727 }
1728 /* b <jentry> */
1729 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1730 *jjump_pad_insn_size = 4;
1731
1732 return 0;
1733 }
1734
1735 /* Returns the minimum instruction length for installing a tracepoint. */
1736
1737 static int
1738 ppc_get_min_fast_tracepoint_insn_len (void)
1739 {
1740 return 4;
1741 }
1742
1743 /* Emits a given buffer into the target at current_insn_ptr. Length
1744 is in units of 32-bit words. */
1745
1746 static void
1747 emit_insns (uint32_t *buf, int n)
1748 {
1749 n = n * sizeof (uint32_t);
1750 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1751 current_insn_ptr += n;
1752 }
1753
1754 #define __EMIT_ASM(NAME, INSNS) \
1755 do \
1756 { \
1757 extern uint32_t start_bcax_ ## NAME []; \
1758 extern uint32_t end_bcax_ ## NAME []; \
1759 emit_insns (start_bcax_ ## NAME, \
1760 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1761 __asm__ (".section .text.__ppcbcax\n\t" \
1762 "start_bcax_" #NAME ":\n\t" \
1763 INSNS "\n\t" \
1764 "end_bcax_" #NAME ":\n\t" \
1765 ".previous\n\t"); \
1766 } while (0)
1767
1768 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1769 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1770
1771 /*
1772
1773 Bytecode execution stack frame - 32-bit
1774
1775 | LR save area (SP + 4)
1776 SP' -> +- Back chain (SP + 0)
1777 | Save r31 for access saved arguments
1778 | Save r30 for bytecode stack pointer
1779 | Save r4 for incoming argument *value
1780 | Save r3 for incoming argument regs
1781 r30 -> +- Bytecode execution stack
1782 |
1783 | 64-byte (8 doublewords) at initial.
1784 | Expand stack as needed.
1785 |
1786 +-
1787 | Some padding for minimum stack frame and 16-byte alignment.
1788 | 16 bytes.
1789 SP +- Back-chain (SP')
1790
1791 initial frame size
1792 = 16 + (4 * 4) + 64
1793 = 96
1794
1795 r30 is the stack-pointer for bytecode machine.
1796 It should point to next-empty, so we can use LDU for pop.
1797 r3 is used for cache of the high part of TOP value.
1798 It was the first argument, pointer to regs.
1799 r4 is used for cache of the low part of TOP value.
1800 It was the second argument, pointer to the result.
1801 We should set *result = TOP after leaving this function.
1802
1803 Note:
1804 * To restore stack at epilogue
1805 => sp = r31
1806 * To check stack is big enough for bytecode execution.
1807 => r30 - 8 > SP + 8
1808 * To return execution result.
1809 => 0(r4) = TOP
1810
1811 */
1812
1813 /* Regardless of endian, register 3 is always high part, 4 is low part.
1814 These defines are used when the register pair is stored/loaded.
1815 Likewise, to simplify code, have a similiar define for 5:6. */
1816
1817 #if __BYTE_ORDER == __LITTLE_ENDIAN
1818 #define TOP_FIRST "4"
1819 #define TOP_SECOND "3"
1820 #define TMP_FIRST "6"
1821 #define TMP_SECOND "5"
1822 #else
1823 #define TOP_FIRST "3"
1824 #define TOP_SECOND "4"
1825 #define TMP_FIRST "5"
1826 #define TMP_SECOND "6"
1827 #endif
1828
1829 /* Emit prologue in inferior memory. See above comments. */
1830
1831 static void
1832 ppc_emit_prologue (void)
1833 {
1834 EMIT_ASM (/* Save return address. */
1835 "mflr 0 \n"
1836 "stw 0, 4(1) \n"
1837 /* Adjust SP. 96 is the initial frame size. */
1838 "stwu 1, -96(1) \n"
1839 /* Save r30 and incoming arguments. */
1840 "stw 31, 96-4(1) \n"
1841 "stw 30, 96-8(1) \n"
1842 "stw 4, 96-12(1) \n"
1843 "stw 3, 96-16(1) \n"
1844 /* Point r31 to original r1 for access arguments. */
1845 "addi 31, 1, 96 \n"
1846 /* Set r30 to pointing stack-top. */
1847 "addi 30, 1, 64 \n"
1848 /* Initial r3/TOP to 0. */
1849 "li 3, 0 \n"
1850 "li 4, 0 \n");
1851 }
1852
1853 /* Emit epilogue in inferior memory. See above comments. */
1854
1855 static void
1856 ppc_emit_epilogue (void)
1857 {
1858 EMIT_ASM (/* *result = TOP */
1859 "lwz 5, -12(31) \n"
1860 "stw " TOP_FIRST ", 0(5) \n"
1861 "stw " TOP_SECOND ", 4(5) \n"
1862 /* Restore registers. */
1863 "lwz 31, -4(31) \n"
1864 "lwz 30, -8(31) \n"
1865 /* Restore SP. */
1866 "lwz 1, 0(1) \n"
1867 /* Restore LR. */
1868 "lwz 0, 4(1) \n"
1869 /* Return 0 for no-error. */
1870 "li 3, 0 \n"
1871 "mtlr 0 \n"
1872 "blr \n");
1873 }
1874
1875 /* TOP = stack[--sp] + TOP */
1876
1877 static void
1878 ppc_emit_add (void)
1879 {
1880 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1881 "lwz " TMP_SECOND ", 4(30)\n"
1882 "addc 4, 6, 4 \n"
1883 "adde 3, 5, 3 \n");
1884 }
1885
1886 /* TOP = stack[--sp] - TOP */
1887
1888 static void
1889 ppc_emit_sub (void)
1890 {
1891 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1892 "lwz " TMP_SECOND ", 4(30) \n"
1893 "subfc 4, 4, 6 \n"
1894 "subfe 3, 3, 5 \n");
1895 }
1896
1897 /* TOP = stack[--sp] * TOP */
1898
1899 static void
1900 ppc_emit_mul (void)
1901 {
1902 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1903 "lwz " TMP_SECOND ", 4(30) \n"
1904 "mulhwu 7, 6, 4 \n"
1905 "mullw 3, 6, 3 \n"
1906 "mullw 5, 4, 5 \n"
1907 "mullw 4, 6, 4 \n"
1908 "add 3, 5, 3 \n"
1909 "add 3, 7, 3 \n");
1910 }
1911
1912 /* TOP = stack[--sp] << TOP */
1913
1914 static void
1915 ppc_emit_lsh (void)
1916 {
1917 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1918 "lwz " TMP_SECOND ", 4(30) \n"
1919 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1920 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1921 "slw 5, 5, 4\n" /* Shift high part left */
1922 "slw 4, 6, 4\n" /* Shift low part left */
1923 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1924 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1925 "or 3, 5, 3\n"
1926 "or 3, 7, 3\n"); /* Assemble high part */
1927 }
1928
1929 /* Top = stack[--sp] >> TOP
1930 (Arithmetic shift right) */
1931
1932 static void
1933 ppc_emit_rsh_signed (void)
1934 {
1935 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1936 "lwz " TMP_SECOND ", 4(30) \n"
1937 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1938 "sraw 3, 5, 4\n" /* Shift high part right */
1939 "cmpwi 7, 1\n"
1940 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1941 "sraw 4, 5, 7\n" /* Shift high to low */
1942 "b 2f\n"
1943 "1:\n"
1944 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1945 "srw 4, 6, 4\n" /* Shift low part right */
1946 "slw 5, 5, 7\n" /* Shift high to low */
1947 "or 4, 4, 5\n" /* Assemble low part */
1948 "2:\n");
1949 }
1950
1951 /* Top = stack[--sp] >> TOP
1952 (Logical shift right) */
1953
1954 static void
1955 ppc_emit_rsh_unsigned (void)
1956 {
1957 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1958 "lwz " TMP_SECOND ", 4(30) \n"
1959 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1960 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1961 "srw 6, 6, 4\n" /* Shift low part right */
1962 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1963 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1964 "or 6, 6, 3\n"
1965 "srw 3, 5, 4\n" /* Shift high part right */
1966 "or 4, 6, 7\n"); /* Assemble low part */
1967 }
1968
1969 /* Emit code for signed-extension specified by ARG. */
1970
1971 static void
1972 ppc_emit_ext (int arg)
1973 {
1974 switch (arg)
1975 {
1976 case 8:
1977 EMIT_ASM ("extsb 4, 4\n"
1978 "srawi 3, 4, 31");
1979 break;
1980 case 16:
1981 EMIT_ASM ("extsh 4, 4\n"
1982 "srawi 3, 4, 31");
1983 break;
1984 case 32:
1985 EMIT_ASM ("srawi 3, 4, 31");
1986 break;
1987 default:
1988 emit_error = 1;
1989 }
1990 }
1991
1992 /* Emit code for zero-extension specified by ARG. */
1993
1994 static void
1995 ppc_emit_zero_ext (int arg)
1996 {
1997 switch (arg)
1998 {
1999 case 8:
2000 EMIT_ASM ("clrlwi 4,4,24\n"
2001 "li 3, 0\n");
2002 break;
2003 case 16:
2004 EMIT_ASM ("clrlwi 4,4,16\n"
2005 "li 3, 0\n");
2006 break;
2007 case 32:
2008 EMIT_ASM ("li 3, 0");
2009 break;
2010 default:
2011 emit_error = 1;
2012 }
2013 }
2014
2015 /* TOP = !TOP
2016 i.e., TOP = (TOP == 0) ? 1 : 0; */
2017
2018 static void
2019 ppc_emit_log_not (void)
2020 {
2021 EMIT_ASM ("or 4, 3, 4 \n"
2022 "cntlzw 4, 4 \n"
2023 "srwi 4, 4, 5 \n"
2024 "li 3, 0 \n");
2025 }
2026
2027 /* TOP = stack[--sp] & TOP */
2028
2029 static void
2030 ppc_emit_bit_and (void)
2031 {
2032 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2033 "lwz " TMP_SECOND ", 4(30) \n"
2034 "and 4, 6, 4 \n"
2035 "and 3, 5, 3 \n");
2036 }
2037
2038 /* TOP = stack[--sp] | TOP */
2039
2040 static void
2041 ppc_emit_bit_or (void)
2042 {
2043 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2044 "lwz " TMP_SECOND ", 4(30) \n"
2045 "or 4, 6, 4 \n"
2046 "or 3, 5, 3 \n");
2047 }
2048
2049 /* TOP = stack[--sp] ^ TOP */
2050
2051 static void
2052 ppc_emit_bit_xor (void)
2053 {
2054 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2055 "lwz " TMP_SECOND ", 4(30) \n"
2056 "xor 4, 6, 4 \n"
2057 "xor 3, 5, 3 \n");
2058 }
2059
2060 /* TOP = ~TOP
2061 i.e., TOP = ~(TOP | TOP) */
2062
2063 static void
2064 ppc_emit_bit_not (void)
2065 {
2066 EMIT_ASM ("nor 3, 3, 3 \n"
2067 "nor 4, 4, 4 \n");
2068 }
2069
2070 /* TOP = stack[--sp] == TOP */
2071
2072 static void
2073 ppc_emit_equal (void)
2074 {
2075 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2076 "lwz " TMP_SECOND ", 4(30) \n"
2077 "xor 4, 6, 4 \n"
2078 "xor 3, 5, 3 \n"
2079 "or 4, 3, 4 \n"
2080 "cntlzw 4, 4 \n"
2081 "srwi 4, 4, 5 \n"
2082 "li 3, 0 \n");
2083 }
2084
2085 /* TOP = stack[--sp] < TOP
2086 (Signed comparison) */
2087
2088 static void
2089 ppc_emit_less_signed (void)
2090 {
2091 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2092 "lwz " TMP_SECOND ", 4(30) \n"
2093 "cmplw 6, 6, 4 \n"
2094 "cmpw 7, 5, 3 \n"
2095 /* CR6 bit 0 = low less and high equal */
2096 "crand 6*4+0, 6*4+0, 7*4+2\n"
2097 /* CR7 bit 0 = (low less and high equal) or high less */
2098 "cror 7*4+0, 7*4+0, 6*4+0\n"
2099 "mfcr 4 \n"
2100 "rlwinm 4, 4, 29, 31, 31 \n"
2101 "li 3, 0 \n");
2102 }
2103
2104 /* TOP = stack[--sp] < TOP
2105 (Unsigned comparison) */
2106
2107 static void
2108 ppc_emit_less_unsigned (void)
2109 {
2110 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2111 "lwz " TMP_SECOND ", 4(30) \n"
2112 "cmplw 6, 6, 4 \n"
2113 "cmplw 7, 5, 3 \n"
2114 /* CR6 bit 0 = low less and high equal */
2115 "crand 6*4+0, 6*4+0, 7*4+2\n"
2116 /* CR7 bit 0 = (low less and high equal) or high less */
2117 "cror 7*4+0, 7*4+0, 6*4+0\n"
2118 "mfcr 4 \n"
2119 "rlwinm 4, 4, 29, 31, 31 \n"
2120 "li 3, 0 \n");
2121 }
2122
2123 /* Access the memory address in TOP in size of SIZE.
2124 Zero-extend the read value. */
2125
2126 static void
2127 ppc_emit_ref (int size)
2128 {
2129 switch (size)
2130 {
2131 case 1:
2132 EMIT_ASM ("lbz 4, 0(4)\n"
2133 "li 3, 0");
2134 break;
2135 case 2:
2136 EMIT_ASM ("lhz 4, 0(4)\n"
2137 "li 3, 0");
2138 break;
2139 case 4:
2140 EMIT_ASM ("lwz 4, 0(4)\n"
2141 "li 3, 0");
2142 break;
2143 case 8:
2144 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2145 EMIT_ASM ("lwz 3, 4(4)\n"
2146 "lwz 4, 0(4)");
2147 else
2148 EMIT_ASM ("lwz 3, 0(4)\n"
2149 "lwz 4, 4(4)");
2150 break;
2151 }
2152 }
2153
2154 /* TOP = NUM */
2155
2156 static void
2157 ppc_emit_const (LONGEST num)
2158 {
2159 uint32_t buf[10];
2160 uint32_t *p = buf;
2161
2162 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2163 p += gen_limm (p, 4, num & 0xffffffff, 0);
2164
2165 emit_insns (buf, p - buf);
2166 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2167 }
2168
2169 /* Set TOP to the value of register REG by calling get_raw_reg function
2170 with two argument, collected buffer and register number. */
2171
2172 static void
2173 ppc_emit_reg (int reg)
2174 {
2175 uint32_t buf[13];
2176 uint32_t *p = buf;
2177
2178 /* fctx->regs is passed in r3 and then saved in -16(31). */
2179 p += GEN_LWZ (p, 3, 31, -16);
2180 p += GEN_LI (p, 4, reg); /* li r4, reg */
2181 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2182
2183 emit_insns (buf, p - buf);
2184 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2185
2186 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2187 {
2188 EMIT_ASM ("mr 5, 4\n"
2189 "mr 4, 3\n"
2190 "mr 3, 5\n");
2191 }
2192 }
2193
2194 /* TOP = stack[--sp] */
2195
2196 static void
2197 ppc_emit_pop (void)
2198 {
2199 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2200 "lwz " TOP_SECOND ", 4(30) \n");
2201 }
2202
2203 /* stack[sp++] = TOP
2204
2205 Because we may use up bytecode stack, expand 8 doublewords more
2206 if needed. */
2207
2208 static void
2209 ppc_emit_stack_flush (void)
2210 {
2211 /* Make sure bytecode stack is big enough before push.
2212 Otherwise, expand 64-byte more. */
2213
2214 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2215 " stw " TOP_SECOND ", 4(30)\n"
2216 " addi 5, 30, -(8 + 8) \n"
2217 " cmpw 7, 5, 1 \n"
2218 " bgt 7, 1f \n"
2219 " stwu 31, -64(1) \n"
2220 "1:addi 30, 30, -8 \n");
2221 }
2222
2223 /* Swap TOP and stack[sp-1] */
2224
2225 static void
2226 ppc_emit_swap (void)
2227 {
2228 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2229 "lwz " TMP_SECOND ", 12(30) \n"
2230 "stw " TOP_FIRST ", 8(30) \n"
2231 "stw " TOP_SECOND ", 12(30) \n"
2232 "mr 3, 5 \n"
2233 "mr 4, 6 \n");
2234 }
2235
2236 /* Discard N elements in the stack. Also used for ppc64. */
2237
2238 static void
2239 ppc_emit_stack_adjust (int n)
2240 {
2241 uint32_t buf[6];
2242 uint32_t *p = buf;
2243
2244 n = n << 3;
2245 if ((n >> 15) != 0)
2246 {
2247 emit_error = 1;
2248 return;
2249 }
2250
2251 p += GEN_ADDI (p, 30, 30, n);
2252
2253 emit_insns (buf, p - buf);
2254 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2255 }
2256
2257 /* Call function FN. */
2258
2259 static void
2260 ppc_emit_call (CORE_ADDR fn)
2261 {
2262 uint32_t buf[11];
2263 uint32_t *p = buf;
2264
2265 p += gen_call (p, fn, 0, 0);
2266
2267 emit_insns (buf, p - buf);
2268 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2269 }
2270
2271 /* FN's prototype is `LONGEST(*fn)(int)'.
2272 TOP = fn (arg1)
2273 */
2274
2275 static void
2276 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2277 {
2278 uint32_t buf[15];
2279 uint32_t *p = buf;
2280
2281 /* Setup argument. arg1 is a 16-bit value. */
2282 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2283 p += gen_call (p, fn, 0, 0);
2284
2285 emit_insns (buf, p - buf);
2286 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2287
2288 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2289 {
2290 EMIT_ASM ("mr 5, 4\n"
2291 "mr 4, 3\n"
2292 "mr 3, 5\n");
2293 }
2294 }
2295
2296 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2297 fn (arg1, TOP)
2298
2299 TOP should be preserved/restored before/after the call. */
2300
2301 static void
2302 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2303 {
2304 uint32_t buf[21];
2305 uint32_t *p = buf;
2306
2307 /* Save TOP. 0(30) is next-empty. */
2308 p += GEN_STW (p, 3, 30, 0);
2309 p += GEN_STW (p, 4, 30, 4);
2310
2311 /* Setup argument. arg1 is a 16-bit value. */
2312 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2313 {
2314 p += GEN_MR (p, 5, 4);
2315 p += GEN_MR (p, 6, 3);
2316 }
2317 else
2318 {
2319 p += GEN_MR (p, 5, 3);
2320 p += GEN_MR (p, 6, 4);
2321 }
2322 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2323 p += gen_call (p, fn, 0, 0);
2324
2325 /* Restore TOP */
2326 p += GEN_LWZ (p, 3, 30, 0);
2327 p += GEN_LWZ (p, 4, 30, 4);
2328
2329 emit_insns (buf, p - buf);
2330 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2331 }
2332
2333 /* Note in the following goto ops:
2334
2335 When emitting goto, the target address is later relocated by
2336 write_goto_address. OFFSET_P is the offset of the branch instruction
2337 in the code sequence, and SIZE_P is how to relocate the instruction,
2338 recognized by ppc_write_goto_address. In current implementation,
2339 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2340 */
2341
2342 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2343
2344 static void
2345 ppc_emit_if_goto (int *offset_p, int *size_p)
2346 {
2347 EMIT_ASM ("or. 3, 3, 4 \n"
2348 "lwzu " TOP_FIRST ", 8(30) \n"
2349 "lwz " TOP_SECOND ", 4(30) \n"
2350 "1:bne 0, 1b \n");
2351
2352 if (offset_p)
2353 *offset_p = 12;
2354 if (size_p)
2355 *size_p = 14;
2356 }
2357
2358 /* Unconditional goto. Also used for ppc64. */
2359
2360 static void
2361 ppc_emit_goto (int *offset_p, int *size_p)
2362 {
2363 EMIT_ASM ("1:b 1b");
2364
2365 if (offset_p)
2366 *offset_p = 0;
2367 if (size_p)
2368 *size_p = 24;
2369 }
2370
2371 /* Goto if stack[--sp] == TOP */
2372
2373 static void
2374 ppc_emit_eq_goto (int *offset_p, int *size_p)
2375 {
2376 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2377 "lwz " TMP_SECOND ", 4(30) \n"
2378 "xor 4, 6, 4 \n"
2379 "xor 3, 5, 3 \n"
2380 "or. 3, 3, 4 \n"
2381 "lwzu " TOP_FIRST ", 8(30) \n"
2382 "lwz " TOP_SECOND ", 4(30) \n"
2383 "1:beq 0, 1b \n");
2384
2385 if (offset_p)
2386 *offset_p = 28;
2387 if (size_p)
2388 *size_p = 14;
2389 }
2390
2391 /* Goto if stack[--sp] != TOP */
2392
2393 static void
2394 ppc_emit_ne_goto (int *offset_p, int *size_p)
2395 {
2396 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2397 "lwz " TMP_SECOND ", 4(30) \n"
2398 "xor 4, 6, 4 \n"
2399 "xor 3, 5, 3 \n"
2400 "or. 3, 3, 4 \n"
2401 "lwzu " TOP_FIRST ", 8(30) \n"
2402 "lwz " TOP_SECOND ", 4(30) \n"
2403 "1:bne 0, 1b \n");
2404
2405 if (offset_p)
2406 *offset_p = 28;
2407 if (size_p)
2408 *size_p = 14;
2409 }
2410
2411 /* Goto if stack[--sp] < TOP */
2412
2413 static void
2414 ppc_emit_lt_goto (int *offset_p, int *size_p)
2415 {
2416 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2417 "lwz " TMP_SECOND ", 4(30) \n"
2418 "cmplw 6, 6, 4 \n"
2419 "cmpw 7, 5, 3 \n"
2420 /* CR6 bit 0 = low less and high equal */
2421 "crand 6*4+0, 6*4+0, 7*4+2\n"
2422 /* CR7 bit 0 = (low less and high equal) or high less */
2423 "cror 7*4+0, 7*4+0, 6*4+0\n"
2424 "lwzu " TOP_FIRST ", 8(30) \n"
2425 "lwz " TOP_SECOND ", 4(30)\n"
2426 "1:blt 7, 1b \n");
2427
2428 if (offset_p)
2429 *offset_p = 32;
2430 if (size_p)
2431 *size_p = 14;
2432 }
2433
2434 /* Goto if stack[--sp] <= TOP */
2435
2436 static void
2437 ppc_emit_le_goto (int *offset_p, int *size_p)
2438 {
2439 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2440 "lwz " TMP_SECOND ", 4(30) \n"
2441 "cmplw 6, 6, 4 \n"
2442 "cmpw 7, 5, 3 \n"
2443 /* CR6 bit 0 = low less/equal and high equal */
2444 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2445 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2446 "cror 7*4+0, 7*4+0, 6*4+0\n"
2447 "lwzu " TOP_FIRST ", 8(30) \n"
2448 "lwz " TOP_SECOND ", 4(30)\n"
2449 "1:blt 7, 1b \n");
2450
2451 if (offset_p)
2452 *offset_p = 32;
2453 if (size_p)
2454 *size_p = 14;
2455 }
2456
2457 /* Goto if stack[--sp] > TOP */
2458
2459 static void
2460 ppc_emit_gt_goto (int *offset_p, int *size_p)
2461 {
2462 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2463 "lwz " TMP_SECOND ", 4(30) \n"
2464 "cmplw 6, 6, 4 \n"
2465 "cmpw 7, 5, 3 \n"
2466 /* CR6 bit 0 = low greater and high equal */
2467 "crand 6*4+0, 6*4+1, 7*4+2\n"
2468 /* CR7 bit 0 = (low greater and high equal) or high greater */
2469 "cror 7*4+0, 7*4+1, 6*4+0\n"
2470 "lwzu " TOP_FIRST ", 8(30) \n"
2471 "lwz " TOP_SECOND ", 4(30)\n"
2472 "1:blt 7, 1b \n");
2473
2474 if (offset_p)
2475 *offset_p = 32;
2476 if (size_p)
2477 *size_p = 14;
2478 }
2479
2480 /* Goto if stack[--sp] >= TOP */
2481
2482 static void
2483 ppc_emit_ge_goto (int *offset_p, int *size_p)
2484 {
2485 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2486 "lwz " TMP_SECOND ", 4(30) \n"
2487 "cmplw 6, 6, 4 \n"
2488 "cmpw 7, 5, 3 \n"
2489 /* CR6 bit 0 = low ge and high equal */
2490 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2491 /* CR7 bit 0 = (low ge and high equal) or high greater */
2492 "cror 7*4+0, 7*4+1, 6*4+0\n"
2493 "lwzu " TOP_FIRST ", 8(30)\n"
2494 "lwz " TOP_SECOND ", 4(30)\n"
2495 "1:blt 7, 1b \n");
2496
2497 if (offset_p)
2498 *offset_p = 32;
2499 if (size_p)
2500 *size_p = 14;
2501 }
2502
2503 /* Relocate previous emitted branch instruction. FROM is the address
2504 of the branch instruction, TO is the goto target address, and SIZE
2505 if the value we set by *SIZE_P before. Currently, it is either
2506 24 or 14 of branch and conditional-branch instruction.
2507 Also used for ppc64. */
2508
2509 static void
2510 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2511 {
2512 long rel = to - from;
2513 uint32_t insn;
2514 int opcd;
2515
2516 read_inferior_memory (from, (unsigned char *) &insn, 4);
2517 opcd = (insn >> 26) & 0x3f;
2518
2519 switch (size)
2520 {
2521 case 14:
2522 if (opcd != 16
2523 || (rel >= (1 << 15) || rel < -(1 << 15)))
2524 emit_error = 1;
2525 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2526 break;
2527 case 24:
2528 if (opcd != 18
2529 || (rel >= (1 << 25) || rel < -(1 << 25)))
2530 emit_error = 1;
2531 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2532 break;
2533 default:
2534 emit_error = 1;
2535 }
2536
2537 if (!emit_error)
2538 target_write_memory (from, (unsigned char *) &insn, 4);
2539 }
2540
2541 /* Table of emit ops for 32-bit. */
2542
2543 static struct emit_ops ppc_emit_ops_impl =
2544 {
2545 ppc_emit_prologue,
2546 ppc_emit_epilogue,
2547 ppc_emit_add,
2548 ppc_emit_sub,
2549 ppc_emit_mul,
2550 ppc_emit_lsh,
2551 ppc_emit_rsh_signed,
2552 ppc_emit_rsh_unsigned,
2553 ppc_emit_ext,
2554 ppc_emit_log_not,
2555 ppc_emit_bit_and,
2556 ppc_emit_bit_or,
2557 ppc_emit_bit_xor,
2558 ppc_emit_bit_not,
2559 ppc_emit_equal,
2560 ppc_emit_less_signed,
2561 ppc_emit_less_unsigned,
2562 ppc_emit_ref,
2563 ppc_emit_if_goto,
2564 ppc_emit_goto,
2565 ppc_write_goto_address,
2566 ppc_emit_const,
2567 ppc_emit_call,
2568 ppc_emit_reg,
2569 ppc_emit_pop,
2570 ppc_emit_stack_flush,
2571 ppc_emit_zero_ext,
2572 ppc_emit_swap,
2573 ppc_emit_stack_adjust,
2574 ppc_emit_int_call_1,
2575 ppc_emit_void_call_2,
2576 ppc_emit_eq_goto,
2577 ppc_emit_ne_goto,
2578 ppc_emit_lt_goto,
2579 ppc_emit_le_goto,
2580 ppc_emit_gt_goto,
2581 ppc_emit_ge_goto
2582 };
2583
2584 #ifdef __powerpc64__
2585
2586 /*
2587
2588 Bytecode execution stack frame - 64-bit
2589
2590 | LR save area (SP + 16)
2591 | CR save area (SP + 8)
2592 SP' -> +- Back chain (SP + 0)
2593 | Save r31 for access saved arguments
2594 | Save r30 for bytecode stack pointer
2595 | Save r4 for incoming argument *value
2596 | Save r3 for incoming argument regs
2597 r30 -> +- Bytecode execution stack
2598 |
2599 | 64-byte (8 doublewords) at initial.
2600 | Expand stack as needed.
2601 |
2602 +-
2603 | Some padding for minimum stack frame.
2604 | 112 for ELFv1.
2605 SP +- Back-chain (SP')
2606
2607 initial frame size
2608 = 112 + (4 * 8) + 64
2609 = 208
2610
2611 r30 is the stack-pointer for bytecode machine.
2612 It should point to next-empty, so we can use LDU for pop.
2613 r3 is used for cache of TOP value.
2614 It was the first argument, pointer to regs.
2615 r4 is the second argument, pointer to the result.
2616 We should set *result = TOP after leaving this function.
2617
2618 Note:
2619 * To restore stack at epilogue
2620 => sp = r31
2621 * To check stack is big enough for bytecode execution.
2622 => r30 - 8 > SP + 112
2623 * To return execution result.
2624 => 0(r4) = TOP
2625
2626 */
2627
2628 /* Emit prologue in inferior memory. See above comments. */
2629
2630 static void
2631 ppc64v1_emit_prologue (void)
2632 {
2633 /* On ELFv1, function pointers really point to function descriptor,
2634 so emit one here. We don't care about contents of words 1 and 2,
2635 so let them just overlap out code. */
2636 uint64_t opd = current_insn_ptr + 8;
2637 uint32_t buf[2];
2638
2639 /* Mind the strict aliasing rules. */
2640 memcpy (buf, &opd, sizeof buf);
2641 emit_insns(buf, 2);
2642 EMIT_ASM (/* Save return address. */
2643 "mflr 0 \n"
2644 "std 0, 16(1) \n"
2645 /* Save r30 and incoming arguments. */
2646 "std 31, -8(1) \n"
2647 "std 30, -16(1) \n"
2648 "std 4, -24(1) \n"
2649 "std 3, -32(1) \n"
2650 /* Point r31 to current r1 for access arguments. */
2651 "mr 31, 1 \n"
2652 /* Adjust SP. 208 is the initial frame size. */
2653 "stdu 1, -208(1) \n"
2654 /* Set r30 to pointing stack-top. */
2655 "addi 30, 1, 168 \n"
2656 /* Initial r3/TOP to 0. */
2657 "li 3, 0 \n");
2658 }
2659
2660 /* Emit prologue in inferior memory. See above comments. */
2661
2662 static void
2663 ppc64v2_emit_prologue (void)
2664 {
2665 EMIT_ASM (/* Save return address. */
2666 "mflr 0 \n"
2667 "std 0, 16(1) \n"
2668 /* Save r30 and incoming arguments. */
2669 "std 31, -8(1) \n"
2670 "std 30, -16(1) \n"
2671 "std 4, -24(1) \n"
2672 "std 3, -32(1) \n"
2673 /* Point r31 to current r1 for access arguments. */
2674 "mr 31, 1 \n"
2675 /* Adjust SP. 208 is the initial frame size. */
2676 "stdu 1, -208(1) \n"
2677 /* Set r30 to pointing stack-top. */
2678 "addi 30, 1, 168 \n"
2679 /* Initial r3/TOP to 0. */
2680 "li 3, 0 \n");
2681 }
2682
2683 /* Emit epilogue in inferior memory. See above comments. */
2684
2685 static void
2686 ppc64_emit_epilogue (void)
2687 {
2688 EMIT_ASM (/* Restore SP. */
2689 "ld 1, 0(1) \n"
2690 /* *result = TOP */
2691 "ld 4, -24(1) \n"
2692 "std 3, 0(4) \n"
2693 /* Restore registers. */
2694 "ld 31, -8(1) \n"
2695 "ld 30, -16(1) \n"
2696 /* Restore LR. */
2697 "ld 0, 16(1) \n"
2698 /* Return 0 for no-error. */
2699 "li 3, 0 \n"
2700 "mtlr 0 \n"
2701 "blr \n");
2702 }
2703
2704 /* TOP = stack[--sp] + TOP */
2705
2706 static void
2707 ppc64_emit_add (void)
2708 {
2709 EMIT_ASM ("ldu 4, 8(30) \n"
2710 "add 3, 4, 3 \n");
2711 }
2712
2713 /* TOP = stack[--sp] - TOP */
2714
2715 static void
2716 ppc64_emit_sub (void)
2717 {
2718 EMIT_ASM ("ldu 4, 8(30) \n"
2719 "sub 3, 4, 3 \n");
2720 }
2721
2722 /* TOP = stack[--sp] * TOP */
2723
2724 static void
2725 ppc64_emit_mul (void)
2726 {
2727 EMIT_ASM ("ldu 4, 8(30) \n"
2728 "mulld 3, 4, 3 \n");
2729 }
2730
2731 /* TOP = stack[--sp] << TOP */
2732
2733 static void
2734 ppc64_emit_lsh (void)
2735 {
2736 EMIT_ASM ("ldu 4, 8(30) \n"
2737 "sld 3, 4, 3 \n");
2738 }
2739
2740 /* Top = stack[--sp] >> TOP
2741 (Arithmetic shift right) */
2742
2743 static void
2744 ppc64_emit_rsh_signed (void)
2745 {
2746 EMIT_ASM ("ldu 4, 8(30) \n"
2747 "srad 3, 4, 3 \n");
2748 }
2749
2750 /* Top = stack[--sp] >> TOP
2751 (Logical shift right) */
2752
2753 static void
2754 ppc64_emit_rsh_unsigned (void)
2755 {
2756 EMIT_ASM ("ldu 4, 8(30) \n"
2757 "srd 3, 4, 3 \n");
2758 }
2759
2760 /* Emit code for signed-extension specified by ARG. */
2761
2762 static void
2763 ppc64_emit_ext (int arg)
2764 {
2765 switch (arg)
2766 {
2767 case 8:
2768 EMIT_ASM ("extsb 3, 3");
2769 break;
2770 case 16:
2771 EMIT_ASM ("extsh 3, 3");
2772 break;
2773 case 32:
2774 EMIT_ASM ("extsw 3, 3");
2775 break;
2776 default:
2777 emit_error = 1;
2778 }
2779 }
2780
2781 /* Emit code for zero-extension specified by ARG. */
2782
2783 static void
2784 ppc64_emit_zero_ext (int arg)
2785 {
2786 switch (arg)
2787 {
2788 case 8:
2789 EMIT_ASM ("rldicl 3,3,0,56");
2790 break;
2791 case 16:
2792 EMIT_ASM ("rldicl 3,3,0,48");
2793 break;
2794 case 32:
2795 EMIT_ASM ("rldicl 3,3,0,32");
2796 break;
2797 default:
2798 emit_error = 1;
2799 }
2800 }
2801
2802 /* TOP = !TOP
2803 i.e., TOP = (TOP == 0) ? 1 : 0; */
2804
2805 static void
2806 ppc64_emit_log_not (void)
2807 {
2808 EMIT_ASM ("cntlzd 3, 3 \n"
2809 "srdi 3, 3, 6 \n");
2810 }
2811
2812 /* TOP = stack[--sp] & TOP */
2813
2814 static void
2815 ppc64_emit_bit_and (void)
2816 {
2817 EMIT_ASM ("ldu 4, 8(30) \n"
2818 "and 3, 4, 3 \n");
2819 }
2820
2821 /* TOP = stack[--sp] | TOP */
2822
2823 static void
2824 ppc64_emit_bit_or (void)
2825 {
2826 EMIT_ASM ("ldu 4, 8(30) \n"
2827 "or 3, 4, 3 \n");
2828 }
2829
2830 /* TOP = stack[--sp] ^ TOP */
2831
2832 static void
2833 ppc64_emit_bit_xor (void)
2834 {
2835 EMIT_ASM ("ldu 4, 8(30) \n"
2836 "xor 3, 4, 3 \n");
2837 }
2838
2839 /* TOP = ~TOP
2840 i.e., TOP = ~(TOP | TOP) */
2841
2842 static void
2843 ppc64_emit_bit_not (void)
2844 {
2845 EMIT_ASM ("nor 3, 3, 3 \n");
2846 }
2847
2848 /* TOP = stack[--sp] == TOP */
2849
2850 static void
2851 ppc64_emit_equal (void)
2852 {
2853 EMIT_ASM ("ldu 4, 8(30) \n"
2854 "xor 3, 3, 4 \n"
2855 "cntlzd 3, 3 \n"
2856 "srdi 3, 3, 6 \n");
2857 }
2858
2859 /* TOP = stack[--sp] < TOP
2860 (Signed comparison) */
2861
2862 static void
2863 ppc64_emit_less_signed (void)
2864 {
2865 EMIT_ASM ("ldu 4, 8(30) \n"
2866 "cmpd 7, 4, 3 \n"
2867 "mfcr 3 \n"
2868 "rlwinm 3, 3, 29, 31, 31 \n");
2869 }
2870
2871 /* TOP = stack[--sp] < TOP
2872 (Unsigned comparison) */
2873
2874 static void
2875 ppc64_emit_less_unsigned (void)
2876 {
2877 EMIT_ASM ("ldu 4, 8(30) \n"
2878 "cmpld 7, 4, 3 \n"
2879 "mfcr 3 \n"
2880 "rlwinm 3, 3, 29, 31, 31 \n");
2881 }
2882
2883 /* Access the memory address in TOP in size of SIZE.
2884 Zero-extend the read value. */
2885
2886 static void
2887 ppc64_emit_ref (int size)
2888 {
2889 switch (size)
2890 {
2891 case 1:
2892 EMIT_ASM ("lbz 3, 0(3)");
2893 break;
2894 case 2:
2895 EMIT_ASM ("lhz 3, 0(3)");
2896 break;
2897 case 4:
2898 EMIT_ASM ("lwz 3, 0(3)");
2899 break;
2900 case 8:
2901 EMIT_ASM ("ld 3, 0(3)");
2902 break;
2903 }
2904 }
2905
2906 /* TOP = NUM */
2907
2908 static void
2909 ppc64_emit_const (LONGEST num)
2910 {
2911 uint32_t buf[5];
2912 uint32_t *p = buf;
2913
2914 p += gen_limm (p, 3, num, 1);
2915
2916 emit_insns (buf, p - buf);
2917 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2918 }
2919
2920 /* Set TOP to the value of register REG by calling get_raw_reg function
2921 with two argument, collected buffer and register number. */
2922
2923 static void
2924 ppc64v1_emit_reg (int reg)
2925 {
2926 uint32_t buf[15];
2927 uint32_t *p = buf;
2928
2929 /* fctx->regs is passed in r3 and then saved in 176(1). */
2930 p += GEN_LD (p, 3, 31, -32);
2931 p += GEN_LI (p, 4, reg);
2932 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2933 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2934 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2935
2936 emit_insns (buf, p - buf);
2937 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2938 }
2939
2940 /* Likewise, for ELFv2. */
2941
2942 static void
2943 ppc64v2_emit_reg (int reg)
2944 {
2945 uint32_t buf[12];
2946 uint32_t *p = buf;
2947
2948 /* fctx->regs is passed in r3 and then saved in 176(1). */
2949 p += GEN_LD (p, 3, 31, -32);
2950 p += GEN_LI (p, 4, reg);
2951 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2952 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2953 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2954
2955 emit_insns (buf, p - buf);
2956 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2957 }
2958
2959 /* TOP = stack[--sp] */
2960
2961 static void
2962 ppc64_emit_pop (void)
2963 {
2964 EMIT_ASM ("ldu 3, 8(30)");
2965 }
2966
2967 /* stack[sp++] = TOP
2968
2969 Because we may use up bytecode stack, expand 8 doublewords more
2970 if needed. */
2971
2972 static void
2973 ppc64_emit_stack_flush (void)
2974 {
2975 /* Make sure bytecode stack is big enough before push.
2976 Otherwise, expand 64-byte more. */
2977
2978 EMIT_ASM (" std 3, 0(30) \n"
2979 " addi 4, 30, -(112 + 8) \n"
2980 " cmpd 7, 4, 1 \n"
2981 " bgt 7, 1f \n"
2982 " stdu 31, -64(1) \n"
2983 "1:addi 30, 30, -8 \n");
2984 }
2985
2986 /* Swap TOP and stack[sp-1] */
2987
2988 static void
2989 ppc64_emit_swap (void)
2990 {
2991 EMIT_ASM ("ld 4, 8(30) \n"
2992 "std 3, 8(30) \n"
2993 "mr 3, 4 \n");
2994 }
2995
2996 /* Call function FN - ELFv1. */
2997
2998 static void
2999 ppc64v1_emit_call (CORE_ADDR fn)
3000 {
3001 uint32_t buf[13];
3002 uint32_t *p = buf;
3003
3004 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3005 p += gen_call (p, fn, 1, 1);
3006 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3007
3008 emit_insns (buf, p - buf);
3009 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3010 }
3011
3012 /* Call function FN - ELFv2. */
3013
3014 static void
3015 ppc64v2_emit_call (CORE_ADDR fn)
3016 {
3017 uint32_t buf[10];
3018 uint32_t *p = buf;
3019
3020 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3021 p += gen_call (p, fn, 1, 0);
3022 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3023
3024 emit_insns (buf, p - buf);
3025 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3026 }
3027
3028 /* FN's prototype is `LONGEST(*fn)(int)'.
3029 TOP = fn (arg1)
3030 */
3031
3032 static void
3033 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3034 {
3035 uint32_t buf[13];
3036 uint32_t *p = buf;
3037
3038 /* Setup argument. arg1 is a 16-bit value. */
3039 p += gen_limm (p, 3, arg1, 1);
3040 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3041 p += gen_call (p, fn, 1, 1);
3042 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3043
3044 emit_insns (buf, p - buf);
3045 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3046 }
3047
3048 /* Likewise for ELFv2. */
3049
3050 static void
3051 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3052 {
3053 uint32_t buf[10];
3054 uint32_t *p = buf;
3055
3056 /* Setup argument. arg1 is a 16-bit value. */
3057 p += gen_limm (p, 3, arg1, 1);
3058 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3059 p += gen_call (p, fn, 1, 0);
3060 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3061
3062 emit_insns (buf, p - buf);
3063 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3064 }
3065
3066 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3067 fn (arg1, TOP)
3068
3069 TOP should be preserved/restored before/after the call. */
3070
3071 static void
3072 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3073 {
3074 uint32_t buf[17];
3075 uint32_t *p = buf;
3076
3077 /* Save TOP. 0(30) is next-empty. */
3078 p += GEN_STD (p, 3, 30, 0);
3079
3080 /* Setup argument. arg1 is a 16-bit value. */
3081 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3082 p += gen_limm (p, 3, arg1, 1);
3083 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3084 p += gen_call (p, fn, 1, 1);
3085 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3086
3087 /* Restore TOP */
3088 p += GEN_LD (p, 3, 30, 0);
3089
3090 emit_insns (buf, p - buf);
3091 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3092 }
3093
3094 /* Likewise for ELFv2. */
3095
3096 static void
3097 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3098 {
3099 uint32_t buf[14];
3100 uint32_t *p = buf;
3101
3102 /* Save TOP. 0(30) is next-empty. */
3103 p += GEN_STD (p, 3, 30, 0);
3104
3105 /* Setup argument. arg1 is a 16-bit value. */
3106 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3107 p += gen_limm (p, 3, arg1, 1);
3108 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3109 p += gen_call (p, fn, 1, 0);
3110 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3111
3112 /* Restore TOP */
3113 p += GEN_LD (p, 3, 30, 0);
3114
3115 emit_insns (buf, p - buf);
3116 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3117 }
3118
3119 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3120
3121 static void
3122 ppc64_emit_if_goto (int *offset_p, int *size_p)
3123 {
3124 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3125 "ldu 3, 8(30) \n"
3126 "1:bne 7, 1b \n");
3127
3128 if (offset_p)
3129 *offset_p = 8;
3130 if (size_p)
3131 *size_p = 14;
3132 }
3133
3134 /* Goto if stack[--sp] == TOP */
3135
3136 static void
3137 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3138 {
3139 EMIT_ASM ("ldu 4, 8(30) \n"
3140 "cmpd 7, 4, 3 \n"
3141 "ldu 3, 8(30) \n"
3142 "1:beq 7, 1b \n");
3143
3144 if (offset_p)
3145 *offset_p = 12;
3146 if (size_p)
3147 *size_p = 14;
3148 }
3149
3150 /* Goto if stack[--sp] != TOP */
3151
3152 static void
3153 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3154 {
3155 EMIT_ASM ("ldu 4, 8(30) \n"
3156 "cmpd 7, 4, 3 \n"
3157 "ldu 3, 8(30) \n"
3158 "1:bne 7, 1b \n");
3159
3160 if (offset_p)
3161 *offset_p = 12;
3162 if (size_p)
3163 *size_p = 14;
3164 }
3165
3166 /* Goto if stack[--sp] < TOP */
3167
3168 static void
3169 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3170 {
3171 EMIT_ASM ("ldu 4, 8(30) \n"
3172 "cmpd 7, 4, 3 \n"
3173 "ldu 3, 8(30) \n"
3174 "1:blt 7, 1b \n");
3175
3176 if (offset_p)
3177 *offset_p = 12;
3178 if (size_p)
3179 *size_p = 14;
3180 }
3181
3182 /* Goto if stack[--sp] <= TOP */
3183
3184 static void
3185 ppc64_emit_le_goto (int *offset_p, int *size_p)
3186 {
3187 EMIT_ASM ("ldu 4, 8(30) \n"
3188 "cmpd 7, 4, 3 \n"
3189 "ldu 3, 8(30) \n"
3190 "1:ble 7, 1b \n");
3191
3192 if (offset_p)
3193 *offset_p = 12;
3194 if (size_p)
3195 *size_p = 14;
3196 }
3197
3198 /* Goto if stack[--sp] > TOP */
3199
3200 static void
3201 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3202 {
3203 EMIT_ASM ("ldu 4, 8(30) \n"
3204 "cmpd 7, 4, 3 \n"
3205 "ldu 3, 8(30) \n"
3206 "1:bgt 7, 1b \n");
3207
3208 if (offset_p)
3209 *offset_p = 12;
3210 if (size_p)
3211 *size_p = 14;
3212 }
3213
3214 /* Goto if stack[--sp] >= TOP */
3215
3216 static void
3217 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3218 {
3219 EMIT_ASM ("ldu 4, 8(30) \n"
3220 "cmpd 7, 4, 3 \n"
3221 "ldu 3, 8(30) \n"
3222 "1:bge 7, 1b \n");
3223
3224 if (offset_p)
3225 *offset_p = 12;
3226 if (size_p)
3227 *size_p = 14;
3228 }
3229
3230 /* Table of emit ops for 64-bit ELFv1. */
3231
3232 static struct emit_ops ppc64v1_emit_ops_impl =
3233 {
3234 ppc64v1_emit_prologue,
3235 ppc64_emit_epilogue,
3236 ppc64_emit_add,
3237 ppc64_emit_sub,
3238 ppc64_emit_mul,
3239 ppc64_emit_lsh,
3240 ppc64_emit_rsh_signed,
3241 ppc64_emit_rsh_unsigned,
3242 ppc64_emit_ext,
3243 ppc64_emit_log_not,
3244 ppc64_emit_bit_and,
3245 ppc64_emit_bit_or,
3246 ppc64_emit_bit_xor,
3247 ppc64_emit_bit_not,
3248 ppc64_emit_equal,
3249 ppc64_emit_less_signed,
3250 ppc64_emit_less_unsigned,
3251 ppc64_emit_ref,
3252 ppc64_emit_if_goto,
3253 ppc_emit_goto,
3254 ppc_write_goto_address,
3255 ppc64_emit_const,
3256 ppc64v1_emit_call,
3257 ppc64v1_emit_reg,
3258 ppc64_emit_pop,
3259 ppc64_emit_stack_flush,
3260 ppc64_emit_zero_ext,
3261 ppc64_emit_swap,
3262 ppc_emit_stack_adjust,
3263 ppc64v1_emit_int_call_1,
3264 ppc64v1_emit_void_call_2,
3265 ppc64_emit_eq_goto,
3266 ppc64_emit_ne_goto,
3267 ppc64_emit_lt_goto,
3268 ppc64_emit_le_goto,
3269 ppc64_emit_gt_goto,
3270 ppc64_emit_ge_goto
3271 };
3272
3273 /* Table of emit ops for 64-bit ELFv2. */
3274
3275 static struct emit_ops ppc64v2_emit_ops_impl =
3276 {
3277 ppc64v2_emit_prologue,
3278 ppc64_emit_epilogue,
3279 ppc64_emit_add,
3280 ppc64_emit_sub,
3281 ppc64_emit_mul,
3282 ppc64_emit_lsh,
3283 ppc64_emit_rsh_signed,
3284 ppc64_emit_rsh_unsigned,
3285 ppc64_emit_ext,
3286 ppc64_emit_log_not,
3287 ppc64_emit_bit_and,
3288 ppc64_emit_bit_or,
3289 ppc64_emit_bit_xor,
3290 ppc64_emit_bit_not,
3291 ppc64_emit_equal,
3292 ppc64_emit_less_signed,
3293 ppc64_emit_less_unsigned,
3294 ppc64_emit_ref,
3295 ppc64_emit_if_goto,
3296 ppc_emit_goto,
3297 ppc_write_goto_address,
3298 ppc64_emit_const,
3299 ppc64v2_emit_call,
3300 ppc64v2_emit_reg,
3301 ppc64_emit_pop,
3302 ppc64_emit_stack_flush,
3303 ppc64_emit_zero_ext,
3304 ppc64_emit_swap,
3305 ppc_emit_stack_adjust,
3306 ppc64v2_emit_int_call_1,
3307 ppc64v2_emit_void_call_2,
3308 ppc64_emit_eq_goto,
3309 ppc64_emit_ne_goto,
3310 ppc64_emit_lt_goto,
3311 ppc64_emit_le_goto,
3312 ppc64_emit_gt_goto,
3313 ppc64_emit_ge_goto
3314 };
3315
3316 #endif
3317
3318 /* Implementation of linux_target_ops method "emit_ops". */
3319
3320 static struct emit_ops *
3321 ppc_emit_ops (void)
3322 {
3323 #ifdef __powerpc64__
3324 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3325
3326 if (register_size (regcache->tdesc, 0) == 8)
3327 {
3328 if (is_elfv2_inferior ())
3329 return &ppc64v2_emit_ops_impl;
3330 else
3331 return &ppc64v1_emit_ops_impl;
3332 }
3333 #endif
3334 return &ppc_emit_ops_impl;
3335 }
3336
3337 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3338
3339 static int
3340 ppc_get_ipa_tdesc_idx (void)
3341 {
3342 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3343 const struct target_desc *tdesc = regcache->tdesc;
3344
3345 #ifdef __powerpc64__
3346 if (tdesc == tdesc_powerpc_64l)
3347 return PPC_TDESC_BASE;
3348 if (tdesc == tdesc_powerpc_altivec64l)
3349 return PPC_TDESC_ALTIVEC;
3350 if (tdesc == tdesc_powerpc_vsx64l)
3351 return PPC_TDESC_VSX;
3352 if (tdesc == tdesc_powerpc_isa205_64l)
3353 return PPC_TDESC_ISA205;
3354 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3355 return PPC_TDESC_ISA205_ALTIVEC;
3356 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3357 return PPC_TDESC_ISA205_VSX;
3358 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3359 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3360 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3361 return PPC_TDESC_ISA207_VSX;
3362 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3363 return PPC_TDESC_ISA207_HTM_VSX;
3364 #endif
3365
3366 if (tdesc == tdesc_powerpc_32l)
3367 return PPC_TDESC_BASE;
3368 if (tdesc == tdesc_powerpc_altivec32l)
3369 return PPC_TDESC_ALTIVEC;
3370 if (tdesc == tdesc_powerpc_vsx32l)
3371 return PPC_TDESC_VSX;
3372 if (tdesc == tdesc_powerpc_isa205_32l)
3373 return PPC_TDESC_ISA205;
3374 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3375 return PPC_TDESC_ISA205_ALTIVEC;
3376 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3377 return PPC_TDESC_ISA205_VSX;
3378 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3379 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3380 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3381 return PPC_TDESC_ISA207_VSX;
3382 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3383 return PPC_TDESC_ISA207_HTM_VSX;
3384 if (tdesc == tdesc_powerpc_e500l)
3385 return PPC_TDESC_E500;
3386
3387 return 0;
3388 }
3389
3390 struct linux_target_ops the_low_target = {
3391 ppc_cannot_fetch_register,
3392 ppc_cannot_store_register,
3393 NULL, /* fetch_register */
3394 ppc_get_pc,
3395 ppc_set_pc,
3396 NULL, /* breakpoint_kind_from_pc */
3397 ppc_sw_breakpoint_from_kind,
3398 NULL,
3399 0,
3400 ppc_breakpoint_at,
3401 ppc_supports_z_point_type,
3402 ppc_insert_point,
3403 ppc_remove_point,
3404 NULL,
3405 NULL,
3406 ppc_collect_ptrace_register,
3407 ppc_supply_ptrace_register,
3408 NULL, /* siginfo_fixup */
3409 NULL, /* new_process */
3410 NULL, /* delete_process */
3411 NULL, /* new_thread */
3412 NULL, /* delete_thread */
3413 NULL, /* new_fork */
3414 NULL, /* prepare_to_resume */
3415 NULL, /* process_qsupported */
3416 ppc_supports_tracepoints,
3417 ppc_get_thread_area,
3418 ppc_install_fast_tracepoint_jump_pad,
3419 ppc_emit_ops,
3420 ppc_get_min_fast_tracepoint_insn_len,
3421 NULL, /* supports_range_stepping */
3422 NULL, /* breakpoint_kind_from_current_state */
3423 ppc_supports_hardware_single_step,
3424 NULL, /* get_syscall_trapinfo */
3425 ppc_get_ipa_tdesc_idx,
3426 };
3427
3428 /* The linux target ops object. */
3429
3430 linux_process_target *the_linux_target = &the_ppc_target;
3431
3432 void
3433 initialize_low_arch (void)
3434 {
3435 /* Initialize the Linux target descriptions. */
3436
3437 init_registers_powerpc_32l ();
3438 init_registers_powerpc_altivec32l ();
3439 init_registers_powerpc_vsx32l ();
3440 init_registers_powerpc_isa205_32l ();
3441 init_registers_powerpc_isa205_altivec32l ();
3442 init_registers_powerpc_isa205_vsx32l ();
3443 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3444 init_registers_powerpc_isa207_vsx32l ();
3445 init_registers_powerpc_isa207_htm_vsx32l ();
3446 init_registers_powerpc_e500l ();
3447 #if __powerpc64__
3448 init_registers_powerpc_64l ();
3449 init_registers_powerpc_altivec64l ();
3450 init_registers_powerpc_vsx64l ();
3451 init_registers_powerpc_isa205_64l ();
3452 init_registers_powerpc_isa205_altivec64l ();
3453 init_registers_powerpc_isa205_vsx64l ();
3454 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3455 init_registers_powerpc_isa207_vsx64l ();
3456 init_registers_powerpc_isa207_htm_vsx64l ();
3457 #endif
3458
3459 initialize_regsets_info (&ppc_regsets_info);
3460 }
This page took 0.096827 seconds and 3 git commands to generate.