gdbserver/linux-low: turn 'get_thread_area' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 bool supports_z_point_type (char z_type) override;
58
59
60 void low_collect_ptrace_register (regcache *regcache, int regno,
61 char *buf) override;
62
63 void low_supply_ptrace_register (regcache *regcache, int regno,
64 const char *buf) override;
65
66 bool supports_tracepoints () override;
67
68 protected:
69
70 void low_arch_setup () override;
71
72 bool low_cannot_fetch_register (int regno) override;
73
74 bool low_cannot_store_register (int regno) override;
75
76 bool low_supports_breakpoints () override;
77
78 CORE_ADDR low_get_pc (regcache *regcache) override;
79
80 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
81
82 bool low_breakpoint_at (CORE_ADDR pc) override;
83
84 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
85 int size, raw_breakpoint *bp) override;
86
87 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
88 int size, raw_breakpoint *bp) override;
89
90 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
91 };
92
93 /* The singleton target ops object. */
94
95 static ppc_target the_ppc_target;
96
97 /* Holds the AT_HWCAP auxv entry. */
98
99 static unsigned long ppc_hwcap;
100
101 /* Holds the AT_HWCAP2 auxv entry. */
102
103 static unsigned long ppc_hwcap2;
104
105
106 #define ppc_num_regs 73
107
108 #ifdef __powerpc64__
109 /* We use a constant for FPSCR instead of PT_FPSCR, because
110 many shipped PPC64 kernels had the wrong value in ptrace.h. */
111 static int ppc_regmap[] =
112 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
113 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
114 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
115 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
116 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
117 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
118 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
119 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
120 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
121 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
122 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
123 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
124 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
125 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
126 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
127 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
128 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
129 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
130 PT_ORIG_R3 * 8, PT_TRAP * 8 };
131 #else
132 /* Currently, don't check/send MQ. */
133 static int ppc_regmap[] =
134 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
135 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
136 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
137 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
138 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
139 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
140 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
141 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
142 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
143 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
144 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
145 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
146 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
147 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
148 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
149 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
150 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
151 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
152 PT_ORIG_R3 * 4, PT_TRAP * 4
153 };
154
155 static int ppc_regmap_e500[] =
156 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
157 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
158 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
159 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
160 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
161 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
162 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
163 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
164 -1, -1, -1, -1,
165 -1, -1, -1, -1,
166 -1, -1, -1, -1,
167 -1, -1, -1, -1,
168 -1, -1, -1, -1,
169 -1, -1, -1, -1,
170 -1, -1, -1, -1,
171 -1, -1, -1, -1,
172 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
173 PT_CTR * 4, PT_XER * 4, -1,
174 PT_ORIG_R3 * 4, PT_TRAP * 4
175 };
176 #endif
177
178 /* Check whether the kernel provides a register set with number
179 REGSET_ID of size REGSETSIZE for process/thread TID. */
180
181 static int
182 ppc_check_regset (int tid, int regset_id, int regsetsize)
183 {
184 void *buf = alloca (regsetsize);
185 struct iovec iov;
186
187 iov.iov_base = buf;
188 iov.iov_len = regsetsize;
189
190 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
191 || errno == ENODATA)
192 return 1;
193 return 0;
194 }
195
196 bool
197 ppc_target::low_cannot_store_register (int regno)
198 {
199 const struct target_desc *tdesc = current_process ()->tdesc;
200
201 #ifndef __powerpc64__
202 /* Some kernels do not allow us to store fpscr. */
203 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
204 && regno == find_regno (tdesc, "fpscr"))
205 return true;
206 #endif
207
208 /* Some kernels do not allow us to store orig_r3 or trap. */
209 if (regno == find_regno (tdesc, "orig_r3")
210 || regno == find_regno (tdesc, "trap"))
211 return true;
212
213 return false;
214 }
215
216 bool
217 ppc_target::low_cannot_fetch_register (int regno)
218 {
219 return false;
220 }
221
222 void
223 ppc_target::low_collect_ptrace_register (regcache *regcache, int regno,
224 char *buf)
225 {
226 memset (buf, 0, sizeof (long));
227
228 if (__BYTE_ORDER == __LITTLE_ENDIAN)
229 {
230 /* Little-endian values always sit at the left end of the buffer. */
231 collect_register (regcache, regno, buf);
232 }
233 else if (__BYTE_ORDER == __BIG_ENDIAN)
234 {
235 /* Big-endian values sit at the right end of the buffer. In case of
236 registers whose sizes are smaller than sizeof (long), we must use a
237 padding to access them correctly. */
238 int size = register_size (regcache->tdesc, regno);
239
240 if (size < sizeof (long))
241 collect_register (regcache, regno, buf + sizeof (long) - size);
242 else
243 collect_register (regcache, regno, buf);
244 }
245 else
246 perror_with_name ("Unexpected byte order");
247 }
248
249 void
250 ppc_target::low_supply_ptrace_register (regcache *regcache, int regno,
251 const char *buf)
252 {
253 if (__BYTE_ORDER == __LITTLE_ENDIAN)
254 {
255 /* Little-endian values always sit at the left end of the buffer. */
256 supply_register (regcache, regno, buf);
257 }
258 else if (__BYTE_ORDER == __BIG_ENDIAN)
259 {
260 /* Big-endian values sit at the right end of the buffer. In case of
261 registers whose sizes are smaller than sizeof (long), we must use a
262 padding to access them correctly. */
263 int size = register_size (regcache->tdesc, regno);
264
265 if (size < sizeof (long))
266 supply_register (regcache, regno, buf + sizeof (long) - size);
267 else
268 supply_register (regcache, regno, buf);
269 }
270 else
271 perror_with_name ("Unexpected byte order");
272 }
273
274 bool
275 ppc_target::low_supports_breakpoints ()
276 {
277 return true;
278 }
279
280 CORE_ADDR
281 ppc_target::low_get_pc (regcache *regcache)
282 {
283 if (register_size (regcache->tdesc, 0) == 4)
284 {
285 unsigned int pc;
286 collect_register_by_name (regcache, "pc", &pc);
287 return (CORE_ADDR) pc;
288 }
289 else
290 {
291 unsigned long pc;
292 collect_register_by_name (regcache, "pc", &pc);
293 return (CORE_ADDR) pc;
294 }
295 }
296
297 void
298 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
299 {
300 if (register_size (regcache->tdesc, 0) == 4)
301 {
302 unsigned int newpc = pc;
303 supply_register_by_name (regcache, "pc", &newpc);
304 }
305 else
306 {
307 unsigned long newpc = pc;
308 supply_register_by_name (regcache, "pc", &newpc);
309 }
310 }
311
312 #ifndef __powerpc64__
313 static int ppc_regmap_adjusted;
314 #endif
315
316
317 /* Correct in either endianness.
318 This instruction is "twge r2, r2", which GDB uses as a software
319 breakpoint. */
320 static const unsigned int ppc_breakpoint = 0x7d821008;
321 #define ppc_breakpoint_len 4
322
323 /* Implementation of target ops method "sw_breakpoint_from_kind". */
324
325 const gdb_byte *
326 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
327 {
328 *size = ppc_breakpoint_len;
329 return (const gdb_byte *) &ppc_breakpoint;
330 }
331
332 bool
333 ppc_target::low_breakpoint_at (CORE_ADDR where)
334 {
335 unsigned int insn;
336
337 read_memory (where, (unsigned char *) &insn, 4);
338 if (insn == ppc_breakpoint)
339 return true;
340 /* If necessary, recognize more trap instructions here. GDB only uses
341 the one. */
342
343 return false;
344 }
345
346 /* Implement supports_z_point_type target-ops.
347 Returns true if type Z_TYPE breakpoint is supported.
348
349 Handling software breakpoint at server side, so tracepoints
350 and breakpoints can be inserted at the same location. */
351
352 bool
353 ppc_target::supports_z_point_type (char z_type)
354 {
355 switch (z_type)
356 {
357 case Z_PACKET_SW_BP:
358 return true;
359 case Z_PACKET_HW_BP:
360 case Z_PACKET_WRITE_WP:
361 case Z_PACKET_ACCESS_WP:
362 default:
363 return false;
364 }
365 }
366
367 /* Implement the low_insert_point linux target op.
368 Returns 0 on success, -1 on failure and 1 on unsupported. */
369
370 int
371 ppc_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
372 int size, raw_breakpoint *bp)
373 {
374 switch (type)
375 {
376 case raw_bkpt_type_sw:
377 return insert_memory_breakpoint (bp);
378
379 case raw_bkpt_type_hw:
380 case raw_bkpt_type_write_wp:
381 case raw_bkpt_type_access_wp:
382 default:
383 /* Unsupported. */
384 return 1;
385 }
386 }
387
388 /* Implement the low_remove_point linux target op.
389 Returns 0 on success, -1 on failure and 1 on unsupported. */
390
391 int
392 ppc_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
393 int size, raw_breakpoint *bp)
394 {
395 switch (type)
396 {
397 case raw_bkpt_type_sw:
398 return remove_memory_breakpoint (bp);
399
400 case raw_bkpt_type_hw:
401 case raw_bkpt_type_write_wp:
402 case raw_bkpt_type_access_wp:
403 default:
404 /* Unsupported. */
405 return 1;
406 }
407 }
408
409 /* Provide only a fill function for the general register set. ps_lgetregs
410 will use this for NPTL support. */
411
412 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
413 {
414 int i;
415
416 ppc_target *my_ppc_target = (ppc_target *) the_linux_target;
417
418 for (i = 0; i < 32; i++)
419 my_ppc_target->low_collect_ptrace_register (regcache, i,
420 (char *) buf + ppc_regmap[i]);
421
422 for (i = 64; i < 70; i++)
423 my_ppc_target->low_collect_ptrace_register (regcache, i,
424 (char *) buf + ppc_regmap[i]);
425
426 for (i = 71; i < 73; i++)
427 my_ppc_target->low_collect_ptrace_register (regcache, i,
428 (char *) buf + ppc_regmap[i]);
429 }
430
431 /* Program Priority Register regset fill function. */
432
433 static void
434 ppc_fill_pprregset (struct regcache *regcache, void *buf)
435 {
436 char *ppr = (char *) buf;
437
438 collect_register_by_name (regcache, "ppr", ppr);
439 }
440
441 /* Program Priority Register regset store function. */
442
443 static void
444 ppc_store_pprregset (struct regcache *regcache, const void *buf)
445 {
446 const char *ppr = (const char *) buf;
447
448 supply_register_by_name (regcache, "ppr", ppr);
449 }
450
451 /* Data Stream Control Register regset fill function. */
452
453 static void
454 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
455 {
456 char *dscr = (char *) buf;
457
458 collect_register_by_name (regcache, "dscr", dscr);
459 }
460
461 /* Data Stream Control Register regset store function. */
462
463 static void
464 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
465 {
466 const char *dscr = (const char *) buf;
467
468 supply_register_by_name (regcache, "dscr", dscr);
469 }
470
471 /* Target Address Register regset fill function. */
472
473 static void
474 ppc_fill_tarregset (struct regcache *regcache, void *buf)
475 {
476 char *tar = (char *) buf;
477
478 collect_register_by_name (regcache, "tar", tar);
479 }
480
481 /* Target Address Register regset store function. */
482
483 static void
484 ppc_store_tarregset (struct regcache *regcache, const void *buf)
485 {
486 const char *tar = (const char *) buf;
487
488 supply_register_by_name (regcache, "tar", tar);
489 }
490
491 /* Event-Based Branching regset store function. Unless the inferior
492 has a perf event open, ptrace can return in error when reading and
493 writing to the regset, with ENODATA. For reading, the registers
494 will correctly show as unavailable. For writing, gdbserver
495 currently only caches any register writes from P and G packets and
496 the stub always tries to write all the regsets when resuming the
497 inferior, which would result in frequent warnings. For this
498 reason, we don't define a fill function. This also means that the
499 client-side regcache will be dirty if the user tries to write to
500 the EBB registers. G packets that the client sends to write to
501 unrelated registers will also include data for EBB registers, even
502 if they are unavailable. */
503
504 static void
505 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
506 {
507 const char *regset = (const char *) buf;
508
509 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
510 .dat file is BESCR, EBBHR, EBBRR. */
511 supply_register_by_name (regcache, "ebbrr", &regset[0]);
512 supply_register_by_name (regcache, "ebbhr", &regset[8]);
513 supply_register_by_name (regcache, "bescr", &regset[16]);
514 }
515
516 /* Performance Monitoring Unit regset fill function. */
517
518 static void
519 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
520 {
521 char *regset = (char *) buf;
522
523 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
524 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
525 collect_register_by_name (regcache, "siar", &regset[0]);
526 collect_register_by_name (regcache, "sdar", &regset[8]);
527 collect_register_by_name (regcache, "sier", &regset[16]);
528 collect_register_by_name (regcache, "mmcr2", &regset[24]);
529 collect_register_by_name (regcache, "mmcr0", &regset[32]);
530 }
531
532 /* Performance Monitoring Unit regset store function. */
533
534 static void
535 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
536 {
537 const char *regset = (const char *) buf;
538
539 supply_register_by_name (regcache, "siar", &regset[0]);
540 supply_register_by_name (regcache, "sdar", &regset[8]);
541 supply_register_by_name (regcache, "sier", &regset[16]);
542 supply_register_by_name (regcache, "mmcr2", &regset[24]);
543 supply_register_by_name (regcache, "mmcr0", &regset[32]);
544 }
545
546 /* Hardware Transactional Memory special-purpose register regset fill
547 function. */
548
549 static void
550 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
551 {
552 int i, base;
553 char *regset = (char *) buf;
554
555 base = find_regno (regcache->tdesc, "tfhar");
556 for (i = 0; i < 3; i++)
557 collect_register (regcache, base + i, &regset[i * 8]);
558 }
559
560 /* Hardware Transactional Memory special-purpose register regset store
561 function. */
562
563 static void
564 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
565 {
566 int i, base;
567 const char *regset = (const char *) buf;
568
569 base = find_regno (regcache->tdesc, "tfhar");
570 for (i = 0; i < 3; i++)
571 supply_register (regcache, base + i, &regset[i * 8]);
572 }
573
574 /* For the same reasons as the EBB regset, none of the HTM
575 checkpointed regsets have a fill function. These registers are
576 only available if the inferior is in a transaction. */
577
578 /* Hardware Transactional Memory checkpointed general-purpose regset
579 store function. */
580
581 static void
582 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
583 {
584 int i, base, size, endian_offset;
585 const char *regset = (const char *) buf;
586
587 base = find_regno (regcache->tdesc, "cr0");
588 size = register_size (regcache->tdesc, base);
589
590 gdb_assert (size == 4 || size == 8);
591
592 for (i = 0; i < 32; i++)
593 supply_register (regcache, base + i, &regset[i * size]);
594
595 endian_offset = 0;
596
597 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
598 endian_offset = 4;
599
600 supply_register_by_name (regcache, "ccr",
601 &regset[PT_CCR * size + endian_offset]);
602
603 supply_register_by_name (regcache, "cxer",
604 &regset[PT_XER * size + endian_offset]);
605
606 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
607 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
608 }
609
610 /* Hardware Transactional Memory checkpointed floating-point regset
611 store function. */
612
613 static void
614 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
615 {
616 int i, base;
617 const char *regset = (const char *) buf;
618
619 base = find_regno (regcache->tdesc, "cf0");
620
621 for (i = 0; i < 32; i++)
622 supply_register (regcache, base + i, &regset[i * 8]);
623
624 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
625 }
626
627 /* Hardware Transactional Memory checkpointed vector regset store
628 function. */
629
630 static void
631 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
632 {
633 int i, base;
634 const char *regset = (const char *) buf;
635 int vscr_offset = 0;
636
637 base = find_regno (regcache->tdesc, "cvr0");
638
639 for (i = 0; i < 32; i++)
640 supply_register (regcache, base + i, &regset[i * 16]);
641
642 if (__BYTE_ORDER == __BIG_ENDIAN)
643 vscr_offset = 12;
644
645 supply_register_by_name (regcache, "cvscr",
646 &regset[32 * 16 + vscr_offset]);
647
648 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
649 }
650
651 /* Hardware Transactional Memory checkpointed vector-scalar regset
652 store function. */
653
654 static void
655 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
656 {
657 int i, base;
658 const char *regset = (const char *) buf;
659
660 base = find_regno (regcache->tdesc, "cvs0h");
661 for (i = 0; i < 32; i++)
662 supply_register (regcache, base + i, &regset[i * 8]);
663 }
664
665 /* Hardware Transactional Memory checkpointed Program Priority
666 Register regset store function. */
667
668 static void
669 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
670 {
671 const char *cppr = (const char *) buf;
672
673 supply_register_by_name (regcache, "cppr", cppr);
674 }
675
676 /* Hardware Transactional Memory checkpointed Data Stream Control
677 Register regset store function. */
678
679 static void
680 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
681 {
682 const char *cdscr = (const char *) buf;
683
684 supply_register_by_name (regcache, "cdscr", cdscr);
685 }
686
687 /* Hardware Transactional Memory checkpointed Target Address Register
688 regset store function. */
689
690 static void
691 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
692 {
693 const char *ctar = (const char *) buf;
694
695 supply_register_by_name (regcache, "ctar", ctar);
696 }
697
698 static void
699 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
700 {
701 int i, base;
702 char *regset = (char *) buf;
703
704 base = find_regno (regcache->tdesc, "vs0h");
705 for (i = 0; i < 32; i++)
706 collect_register (regcache, base + i, &regset[i * 8]);
707 }
708
709 static void
710 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
711 {
712 int i, base;
713 const char *regset = (const char *) buf;
714
715 base = find_regno (regcache->tdesc, "vs0h");
716 for (i = 0; i < 32; i++)
717 supply_register (regcache, base + i, &regset[i * 8]);
718 }
719
720 static void
721 ppc_fill_vrregset (struct regcache *regcache, void *buf)
722 {
723 int i, base;
724 char *regset = (char *) buf;
725 int vscr_offset = 0;
726
727 base = find_regno (regcache->tdesc, "vr0");
728 for (i = 0; i < 32; i++)
729 collect_register (regcache, base + i, &regset[i * 16]);
730
731 if (__BYTE_ORDER == __BIG_ENDIAN)
732 vscr_offset = 12;
733
734 collect_register_by_name (regcache, "vscr",
735 &regset[32 * 16 + vscr_offset]);
736
737 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
738 }
739
740 static void
741 ppc_store_vrregset (struct regcache *regcache, const void *buf)
742 {
743 int i, base;
744 const char *regset = (const char *) buf;
745 int vscr_offset = 0;
746
747 base = find_regno (regcache->tdesc, "vr0");
748 for (i = 0; i < 32; i++)
749 supply_register (regcache, base + i, &regset[i * 16]);
750
751 if (__BYTE_ORDER == __BIG_ENDIAN)
752 vscr_offset = 12;
753
754 supply_register_by_name (regcache, "vscr",
755 &regset[32 * 16 + vscr_offset]);
756 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
757 }
758
759 struct gdb_evrregset_t
760 {
761 unsigned long evr[32];
762 unsigned long long acc;
763 unsigned long spefscr;
764 };
765
766 static void
767 ppc_fill_evrregset (struct regcache *regcache, void *buf)
768 {
769 int i, ev0;
770 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
771
772 ev0 = find_regno (regcache->tdesc, "ev0h");
773 for (i = 0; i < 32; i++)
774 collect_register (regcache, ev0 + i, &regset->evr[i]);
775
776 collect_register_by_name (regcache, "acc", &regset->acc);
777 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
778 }
779
780 static void
781 ppc_store_evrregset (struct regcache *regcache, const void *buf)
782 {
783 int i, ev0;
784 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
785
786 ev0 = find_regno (regcache->tdesc, "ev0h");
787 for (i = 0; i < 32; i++)
788 supply_register (regcache, ev0 + i, &regset->evr[i]);
789
790 supply_register_by_name (regcache, "acc", &regset->acc);
791 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
792 }
793
794 /* Support for hardware single step. */
795
796 static int
797 ppc_supports_hardware_single_step (void)
798 {
799 return 1;
800 }
801
802 static struct regset_info ppc_regsets[] = {
803 /* List the extra register sets before GENERAL_REGS. That way we will
804 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
805 general registers. Some kernels support these, but not the newer
806 PPC_PTRACE_GETREGS. */
807 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
808 NULL, ppc_store_tm_ctarregset },
809 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
810 NULL, ppc_store_tm_cdscrregset },
811 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
812 NULL, ppc_store_tm_cpprregset },
813 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
814 NULL, ppc_store_tm_cvsxregset },
815 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
816 NULL, ppc_store_tm_cvrregset },
817 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
818 NULL, ppc_store_tm_cfprregset },
819 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
820 NULL, ppc_store_tm_cgprregset },
821 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
822 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
823 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
824 NULL, ppc_store_ebbregset },
825 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
826 ppc_fill_pmuregset, ppc_store_pmuregset },
827 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
828 ppc_fill_tarregset, ppc_store_tarregset },
829 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
830 ppc_fill_pprregset, ppc_store_pprregset },
831 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
832 ppc_fill_dscrregset, ppc_store_dscrregset },
833 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
834 ppc_fill_vsxregset, ppc_store_vsxregset },
835 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
836 ppc_fill_vrregset, ppc_store_vrregset },
837 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
838 ppc_fill_evrregset, ppc_store_evrregset },
839 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
840 NULL_REGSET
841 };
842
843 static struct usrregs_info ppc_usrregs_info =
844 {
845 ppc_num_regs,
846 ppc_regmap,
847 };
848
849 static struct regsets_info ppc_regsets_info =
850 {
851 ppc_regsets, /* regsets */
852 0, /* num_regsets */
853 NULL, /* disabled_regsets */
854 };
855
856 static struct regs_info myregs_info =
857 {
858 NULL, /* regset_bitmap */
859 &ppc_usrregs_info,
860 &ppc_regsets_info
861 };
862
863 const regs_info *
864 ppc_target::get_regs_info ()
865 {
866 return &myregs_info;
867 }
868
869 void
870 ppc_target::low_arch_setup ()
871 {
872 const struct target_desc *tdesc;
873 struct regset_info *regset;
874 struct ppc_linux_features features = ppc_linux_no_features;
875
876 int tid = lwpid_of (current_thread);
877
878 features.wordsize = ppc_linux_target_wordsize (tid);
879
880 if (features.wordsize == 4)
881 tdesc = tdesc_powerpc_32l;
882 else
883 tdesc = tdesc_powerpc_64l;
884
885 current_process ()->tdesc = tdesc;
886
887 /* The value of current_process ()->tdesc needs to be set for this
888 call. */
889 ppc_hwcap = linux_get_hwcap (features.wordsize);
890 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
891
892 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
893
894 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
895 features.vsx = true;
896
897 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
898 features.altivec = true;
899
900 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
901 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
902 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
903 {
904 features.ppr_dscr = true;
905 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
906 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
907 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
908 && ppc_check_regset (tid, NT_PPC_TAR,
909 PPC_LINUX_SIZEOF_TARREGSET)
910 && ppc_check_regset (tid, NT_PPC_EBB,
911 PPC_LINUX_SIZEOF_EBBREGSET)
912 && ppc_check_regset (tid, NT_PPC_PMU,
913 PPC_LINUX_SIZEOF_PMUREGSET))
914 {
915 features.isa207 = true;
916 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
917 && ppc_check_regset (tid, NT_PPC_TM_SPR,
918 PPC_LINUX_SIZEOF_TM_SPRREGSET))
919 features.htm = true;
920 }
921 }
922
923 tdesc = ppc_linux_match_description (features);
924
925 /* On 32-bit machines, check for SPE registers.
926 Set the low target's regmap field as appropriately. */
927 #ifndef __powerpc64__
928 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
929 tdesc = tdesc_powerpc_e500l;
930
931 if (!ppc_regmap_adjusted)
932 {
933 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
934 ppc_usrregs_info.regmap = ppc_regmap_e500;
935
936 /* If the FPSCR is 64-bit wide, we need to fetch the whole
937 64-bit slot and not just its second word. The PT_FPSCR
938 supplied in a 32-bit GDB compilation doesn't reflect
939 this. */
940 if (register_size (tdesc, 70) == 8)
941 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
942
943 ppc_regmap_adjusted = 1;
944 }
945 #endif
946
947 current_process ()->tdesc = tdesc;
948
949 for (regset = ppc_regsets; regset->size >= 0; regset++)
950 switch (regset->get_request)
951 {
952 case PTRACE_GETVRREGS:
953 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
954 break;
955 case PTRACE_GETVSXREGS:
956 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
957 break;
958 case PTRACE_GETEVRREGS:
959 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
960 regset->size = 32 * 4 + 8 + 4;
961 else
962 regset->size = 0;
963 break;
964 case PTRACE_GETREGSET:
965 switch (regset->nt_type)
966 {
967 case NT_PPC_PPR:
968 regset->size = (features.ppr_dscr ?
969 PPC_LINUX_SIZEOF_PPRREGSET : 0);
970 break;
971 case NT_PPC_DSCR:
972 regset->size = (features.ppr_dscr ?
973 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
974 break;
975 case NT_PPC_TAR:
976 regset->size = (features.isa207 ?
977 PPC_LINUX_SIZEOF_TARREGSET : 0);
978 break;
979 case NT_PPC_EBB:
980 regset->size = (features.isa207 ?
981 PPC_LINUX_SIZEOF_EBBREGSET : 0);
982 break;
983 case NT_PPC_PMU:
984 regset->size = (features.isa207 ?
985 PPC_LINUX_SIZEOF_PMUREGSET : 0);
986 break;
987 case NT_PPC_TM_SPR:
988 regset->size = (features.htm ?
989 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
990 break;
991 case NT_PPC_TM_CGPR:
992 if (features.wordsize == 4)
993 regset->size = (features.htm ?
994 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
995 else
996 regset->size = (features.htm ?
997 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
998 break;
999 case NT_PPC_TM_CFPR:
1000 regset->size = (features.htm ?
1001 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
1002 break;
1003 case NT_PPC_TM_CVMX:
1004 regset->size = (features.htm ?
1005 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1006 break;
1007 case NT_PPC_TM_CVSX:
1008 regset->size = (features.htm ?
1009 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1010 break;
1011 case NT_PPC_TM_CPPR:
1012 regset->size = (features.htm ?
1013 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1014 break;
1015 case NT_PPC_TM_CDSCR:
1016 regset->size = (features.htm ?
1017 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1018 break;
1019 case NT_PPC_TM_CTAR:
1020 regset->size = (features.htm ?
1021 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1022 break;
1023 default:
1024 break;
1025 }
1026 break;
1027 default:
1028 break;
1029 }
1030 }
1031
1032 /* Implementation of target ops method "supports_tracepoints". */
1033
1034 bool
1035 ppc_target::supports_tracepoints ()
1036 {
1037 return true;
1038 }
1039
1040 /* Get the thread area address. This is used to recognize which
1041 thread is which when tracing with the in-process agent library. We
1042 don't read anything from the address, and treat it as opaque; it's
1043 the address itself that we assume is unique per-thread. */
1044
1045 int
1046 ppc_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
1047 {
1048 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1049 struct thread_info *thr = get_lwp_thread (lwp);
1050 struct regcache *regcache = get_thread_regcache (thr, 1);
1051 ULONGEST tp = 0;
1052
1053 #ifdef __powerpc64__
1054 if (register_size (regcache->tdesc, 0) == 8)
1055 collect_register_by_name (regcache, "r13", &tp);
1056 else
1057 #endif
1058 collect_register_by_name (regcache, "r2", &tp);
1059
1060 *addr = tp;
1061
1062 return 0;
1063 }
1064
1065 #ifdef __powerpc64__
1066
1067 /* Older glibc doesn't provide this. */
1068
1069 #ifndef EF_PPC64_ABI
1070 #define EF_PPC64_ABI 3
1071 #endif
1072
1073 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1074 inferiors. */
1075
1076 static int
1077 is_elfv2_inferior (void)
1078 {
1079 /* To be used as fallback if we're unable to determine the right result -
1080 assume inferior uses the same ABI as gdbserver. */
1081 #if _CALL_ELF == 2
1082 const int def_res = 1;
1083 #else
1084 const int def_res = 0;
1085 #endif
1086 CORE_ADDR phdr;
1087 Elf64_Ehdr ehdr;
1088
1089 const struct target_desc *tdesc = current_process ()->tdesc;
1090 int wordsize = register_size (tdesc, 0);
1091
1092 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1093 return def_res;
1094
1095 /* Assume ELF header is at the beginning of the page where program headers
1096 are located. If it doesn't look like one, bail. */
1097
1098 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1099 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1100 return def_res;
1101
1102 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1103 }
1104
1105 #endif
1106
1107 /* Generate a ds-form instruction in BUF and return the number of bytes written
1108
1109 0 6 11 16 30 32
1110 | OPCD | RST | RA | DS |XO| */
1111
1112 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1113 static int
1114 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1115 {
1116 uint32_t insn;
1117
1118 gdb_assert ((opcd & ~0x3f) == 0);
1119 gdb_assert ((rst & ~0x1f) == 0);
1120 gdb_assert ((ra & ~0x1f) == 0);
1121 gdb_assert ((xo & ~0x3) == 0);
1122
1123 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1124 *buf = (opcd << 26) | insn;
1125 return 1;
1126 }
1127
1128 /* Followings are frequently used ds-form instructions. */
1129
1130 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1131 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1132 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1133 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1134
1135 /* Generate a d-form instruction in BUF.
1136
1137 0 6 11 16 32
1138 | OPCD | RST | RA | D | */
1139
1140 static int
1141 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1142 {
1143 uint32_t insn;
1144
1145 gdb_assert ((opcd & ~0x3f) == 0);
1146 gdb_assert ((rst & ~0x1f) == 0);
1147 gdb_assert ((ra & ~0x1f) == 0);
1148
1149 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1150 *buf = (opcd << 26) | insn;
1151 return 1;
1152 }
1153
1154 /* Followings are frequently used d-form instructions. */
1155
1156 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1157 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1158 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1159 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1160 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1161 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1162 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1163 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1164 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1165
1166 /* Generate a xfx-form instruction in BUF and return the number of bytes
1167 written.
1168
1169 0 6 11 21 31 32
1170 | OPCD | RST | RI | XO |/| */
1171
1172 static int
1173 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1174 {
1175 uint32_t insn;
1176 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1177
1178 gdb_assert ((opcd & ~0x3f) == 0);
1179 gdb_assert ((rst & ~0x1f) == 0);
1180 gdb_assert ((xo & ~0x3ff) == 0);
1181
1182 insn = (rst << 21) | (n << 11) | (xo << 1);
1183 *buf = (opcd << 26) | insn;
1184 return 1;
1185 }
1186
1187 /* Followings are frequently used xfx-form instructions. */
1188
1189 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1190 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1191 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1192 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1193 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1194 E & 0xf, 598)
1195 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1196
1197
1198 /* Generate a x-form instruction in BUF and return the number of bytes written.
1199
1200 0 6 11 16 21 31 32
1201 | OPCD | RST | RA | RB | XO |RC| */
1202
1203 static int
1204 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1205 {
1206 uint32_t insn;
1207
1208 gdb_assert ((opcd & ~0x3f) == 0);
1209 gdb_assert ((rst & ~0x1f) == 0);
1210 gdb_assert ((ra & ~0x1f) == 0);
1211 gdb_assert ((rb & ~0x1f) == 0);
1212 gdb_assert ((xo & ~0x3ff) == 0);
1213 gdb_assert ((rc & ~1) == 0);
1214
1215 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1216 *buf = (opcd << 26) | insn;
1217 return 1;
1218 }
1219
1220 /* Followings are frequently used x-form instructions. */
1221
1222 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1223 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1224 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1225 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1226 /* Assume bf = cr7. */
1227 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1228
1229
1230 /* Generate a md-form instruction in BUF and return the number of bytes written.
1231
1232 0 6 11 16 21 27 30 31 32
1233 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1234
1235 static int
1236 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1237 int xo, int rc)
1238 {
1239 uint32_t insn;
1240 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1241 unsigned int sh0_4 = sh & 0x1f;
1242 unsigned int sh5 = (sh >> 5) & 1;
1243
1244 gdb_assert ((opcd & ~0x3f) == 0);
1245 gdb_assert ((rs & ~0x1f) == 0);
1246 gdb_assert ((ra & ~0x1f) == 0);
1247 gdb_assert ((sh & ~0x3f) == 0);
1248 gdb_assert ((mb & ~0x3f) == 0);
1249 gdb_assert ((xo & ~0x7) == 0);
1250 gdb_assert ((rc & ~0x1) == 0);
1251
1252 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1253 | (sh5 << 1) | (xo << 2) | (rc & 1);
1254 *buf = (opcd << 26) | insn;
1255 return 1;
1256 }
1257
1258 /* The following are frequently used md-form instructions. */
1259
1260 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1261 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1262 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1263 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1264
1265 /* Generate a i-form instruction in BUF and return the number of bytes written.
1266
1267 0 6 30 31 32
1268 | OPCD | LI |AA|LK| */
1269
1270 static int
1271 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1272 {
1273 uint32_t insn;
1274
1275 gdb_assert ((opcd & ~0x3f) == 0);
1276
1277 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1278 *buf = (opcd << 26) | insn;
1279 return 1;
1280 }
1281
1282 /* The following are frequently used i-form instructions. */
1283
1284 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1285 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1286
1287 /* Generate a b-form instruction in BUF and return the number of bytes written.
1288
1289 0 6 11 16 30 31 32
1290 | OPCD | BO | BI | BD |AA|LK| */
1291
1292 static int
1293 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1294 int aa, int lk)
1295 {
1296 uint32_t insn;
1297
1298 gdb_assert ((opcd & ~0x3f) == 0);
1299 gdb_assert ((bo & ~0x1f) == 0);
1300 gdb_assert ((bi & ~0x1f) == 0);
1301
1302 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1303 *buf = (opcd << 26) | insn;
1304 return 1;
1305 }
1306
1307 /* The following are frequently used b-form instructions. */
1308 /* Assume bi = cr7. */
1309 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1310
1311 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1312 respectively. They are primary used for save/restore GPRs in jump-pad,
1313 not used for bytecode compiling. */
1314
1315 #ifdef __powerpc64__
1316 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1317 GEN_LD (buf, rt, ra, si) : \
1318 GEN_LWZ (buf, rt, ra, si))
1319 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1320 GEN_STD (buf, rt, ra, si) : \
1321 GEN_STW (buf, rt, ra, si))
1322 #else
1323 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1324 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1325 #endif
1326
1327 /* Generate a sequence of instructions to load IMM in the register REG.
1328 Write the instructions in BUF and return the number of bytes written. */
1329
1330 static int
1331 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1332 {
1333 uint32_t *p = buf;
1334
1335 if ((imm + 32768) < 65536)
1336 {
1337 /* li reg, imm[15:0] */
1338 p += GEN_LI (p, reg, imm);
1339 }
1340 else if ((imm >> 32) == 0)
1341 {
1342 /* lis reg, imm[31:16]
1343 ori reg, reg, imm[15:0]
1344 rldicl reg, reg, 0, 32 */
1345 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1346 if ((imm & 0xffff) != 0)
1347 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1348 /* Clear upper 32-bit if sign-bit is set. */
1349 if (imm & (1u << 31) && is_64)
1350 p += GEN_RLDICL (p, reg, reg, 0, 32);
1351 }
1352 else
1353 {
1354 gdb_assert (is_64);
1355 /* lis reg, <imm[63:48]>
1356 ori reg, reg, <imm[48:32]>
1357 rldicr reg, reg, 32, 31
1358 oris reg, reg, <imm[31:16]>
1359 ori reg, reg, <imm[15:0]> */
1360 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1361 if (((imm >> 32) & 0xffff) != 0)
1362 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1363 p += GEN_RLDICR (p, reg, reg, 32, 31);
1364 if (((imm >> 16) & 0xffff) != 0)
1365 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1366 if ((imm & 0xffff) != 0)
1367 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1368 }
1369
1370 return p - buf;
1371 }
1372
1373 /* Generate a sequence for atomically exchange at location LOCK.
1374 This code sequence clobbers r6, r7, r8. LOCK is the location for
1375 the atomic-xchg, OLD_VALUE is expected old value stored in the
1376 location, and R_NEW is a register for the new value. */
1377
1378 static int
1379 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1380 int is_64)
1381 {
1382 const int r_lock = 6;
1383 const int r_old = 7;
1384 const int r_tmp = 8;
1385 uint32_t *p = buf;
1386
1387 /*
1388 1: lwarx TMP, 0, LOCK
1389 cmpwi TMP, OLD
1390 bne 1b
1391 stwcx. NEW, 0, LOCK
1392 bne 1b */
1393
1394 p += gen_limm (p, r_lock, lock, is_64);
1395 p += gen_limm (p, r_old, old_value, is_64);
1396
1397 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1398 p += GEN_CMPW (p, r_tmp, r_old);
1399 p += GEN_BNE (p, -8);
1400 p += GEN_STWCX (p, r_new, 0, r_lock);
1401 p += GEN_BNE (p, -16);
1402
1403 return p - buf;
1404 }
1405
1406 /* Generate a sequence of instructions for calling a function
1407 at address of FN. Return the number of bytes are written in BUF. */
1408
1409 static int
1410 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1411 {
1412 uint32_t *p = buf;
1413
1414 /* Must be called by r12 for caller to calculate TOC address. */
1415 p += gen_limm (p, 12, fn, is_64);
1416 if (is_opd)
1417 {
1418 p += GEN_LOAD (p, 11, 12, 16, is_64);
1419 p += GEN_LOAD (p, 2, 12, 8, is_64);
1420 p += GEN_LOAD (p, 12, 12, 0, is_64);
1421 }
1422 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1423 *p++ = 0x4e800421; /* bctrl */
1424
1425 return p - buf;
1426 }
1427
1428 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1429 of instruction. This function is used to adjust pc-relative instructions
1430 when copying. */
1431
1432 static void
1433 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1434 {
1435 uint32_t insn, op6;
1436 long rel, newrel;
1437
1438 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1439 op6 = PPC_OP6 (insn);
1440
1441 if (op6 == 18 && (insn & 2) == 0)
1442 {
1443 /* branch && AA = 0 */
1444 rel = PPC_LI (insn);
1445 newrel = (oldloc - *to) + rel;
1446
1447 /* Out of range. Cannot relocate instruction. */
1448 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1449 return;
1450
1451 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1452 }
1453 else if (op6 == 16 && (insn & 2) == 0)
1454 {
1455 /* conditional branch && AA = 0 */
1456
1457 /* If the new relocation is too big for even a 26-bit unconditional
1458 branch, there is nothing we can do. Just abort.
1459
1460 Otherwise, if it can be fit in 16-bit conditional branch, just
1461 copy the instruction and relocate the address.
1462
1463 If the it's big for conditional-branch (16-bit), try to invert the
1464 condition and jump with 26-bit branch. For example,
1465
1466 beq .Lgoto
1467 INSN1
1468
1469 =>
1470
1471 bne 1f (+8)
1472 b .Lgoto
1473 1:INSN1
1474
1475 After this transform, we are actually jump from *TO+4 instead of *TO,
1476 so check the relocation again because it will be 1-insn farther then
1477 before if *TO is after OLDLOC.
1478
1479
1480 For BDNZT (or so) is transformed from
1481
1482 bdnzt eq, .Lgoto
1483 INSN1
1484
1485 =>
1486
1487 bdz 1f (+12)
1488 bf eq, 1f (+8)
1489 b .Lgoto
1490 1:INSN1
1491
1492 See also "BO field encodings". */
1493
1494 rel = PPC_BD (insn);
1495 newrel = (oldloc - *to) + rel;
1496
1497 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1498 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1499 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1500 {
1501 newrel -= 4;
1502
1503 /* Out of range. Cannot relocate instruction. */
1504 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1505 return;
1506
1507 if ((PPC_BO (insn) & 0x14) == 0x4)
1508 insn ^= (1 << 24);
1509 else if ((PPC_BO (insn) & 0x14) == 0x10)
1510 insn ^= (1 << 22);
1511
1512 /* Jump over the unconditional branch. */
1513 insn = (insn & ~0xfffc) | 0x8;
1514 target_write_memory (*to, (unsigned char *) &insn, 4);
1515 *to += 4;
1516
1517 /* Build a unconditional branch and copy LK bit. */
1518 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1519 target_write_memory (*to, (unsigned char *) &insn, 4);
1520 *to += 4;
1521
1522 return;
1523 }
1524 else if ((PPC_BO (insn) & 0x14) == 0)
1525 {
1526 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1527 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1528
1529 newrel -= 8;
1530
1531 /* Out of range. Cannot relocate instruction. */
1532 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1533 return;
1534
1535 /* Copy BI field. */
1536 bf_insn |= (insn & 0x1f0000);
1537
1538 /* Invert condition. */
1539 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1540 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1541
1542 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1543 *to += 4;
1544 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1545 *to += 4;
1546
1547 /* Build a unconditional branch and copy LK bit. */
1548 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1549 target_write_memory (*to, (unsigned char *) &insn, 4);
1550 *to += 4;
1551
1552 return;
1553 }
1554 else /* (BO & 0x14) == 0x14, branch always. */
1555 {
1556 /* Out of range. Cannot relocate instruction. */
1557 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1558 return;
1559
1560 /* Build a unconditional branch and copy LK bit. */
1561 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1562 target_write_memory (*to, (unsigned char *) &insn, 4);
1563 *to += 4;
1564
1565 return;
1566 }
1567 }
1568
1569 target_write_memory (*to, (unsigned char *) &insn, 4);
1570 *to += 4;
1571 }
1572
1573 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1574 See target.h for details. */
1575
1576 static int
1577 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1578 CORE_ADDR collector,
1579 CORE_ADDR lockaddr,
1580 ULONGEST orig_size,
1581 CORE_ADDR *jump_entry,
1582 CORE_ADDR *trampoline,
1583 ULONGEST *trampoline_size,
1584 unsigned char *jjump_pad_insn,
1585 ULONGEST *jjump_pad_insn_size,
1586 CORE_ADDR *adjusted_insn_addr,
1587 CORE_ADDR *adjusted_insn_addr_end,
1588 char *err)
1589 {
1590 uint32_t buf[256];
1591 uint32_t *p = buf;
1592 int j, offset;
1593 CORE_ADDR buildaddr = *jump_entry;
1594 const CORE_ADDR entryaddr = *jump_entry;
1595 int rsz, min_frame, frame_size, tp_reg;
1596 #ifdef __powerpc64__
1597 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1598 int is_64 = register_size (regcache->tdesc, 0) == 8;
1599 int is_opd = is_64 && !is_elfv2_inferior ();
1600 #else
1601 int is_64 = 0, is_opd = 0;
1602 #endif
1603
1604 #ifdef __powerpc64__
1605 if (is_64)
1606 {
1607 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1608 rsz = 8;
1609 min_frame = 112;
1610 frame_size = (40 * rsz) + min_frame;
1611 tp_reg = 13;
1612 }
1613 else
1614 {
1615 #endif
1616 rsz = 4;
1617 min_frame = 16;
1618 frame_size = (40 * rsz) + min_frame;
1619 tp_reg = 2;
1620 #ifdef __powerpc64__
1621 }
1622 #endif
1623
1624 /* Stack frame layout for this jump pad,
1625
1626 High thread_area (r13/r2) |
1627 tpoint - collecting_t obj
1628 PC/<tpaddr> | +36
1629 CTR | +35
1630 LR | +34
1631 XER | +33
1632 CR | +32
1633 R31 |
1634 R29 |
1635 ... |
1636 R1 | +1
1637 R0 - collected registers
1638 ... |
1639 ... |
1640 Low Back-chain -
1641
1642
1643 The code flow of this jump pad,
1644
1645 1. Adjust SP
1646 2. Save GPR and SPR
1647 3. Prepare argument
1648 4. Call gdb_collector
1649 5. Restore GPR and SPR
1650 6. Restore SP
1651 7. Build a jump for back to the program
1652 8. Copy/relocate original instruction
1653 9. Build a jump for replacing original instruction. */
1654
1655 /* Adjust stack pointer. */
1656 if (is_64)
1657 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1658 else
1659 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1660
1661 /* Store GPRs. Save R1 later, because it had just been modified, but
1662 we want the original value. */
1663 for (j = 2; j < 32; j++)
1664 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1665 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1666 /* Set r0 to the original value of r1 before adjusting stack frame,
1667 and then save it. */
1668 p += GEN_ADDI (p, 0, 1, frame_size);
1669 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1670
1671 /* Save CR, XER, LR, and CTR. */
1672 p += GEN_MFCR (p, 3); /* mfcr r3 */
1673 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1674 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1675 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1676 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1677 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1678 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1679 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1680
1681 /* Save PC<tpaddr> */
1682 p += gen_limm (p, 3, tpaddr, is_64);
1683 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1684
1685
1686 /* Setup arguments to collector. */
1687 /* Set r4 to collected registers. */
1688 p += GEN_ADDI (p, 4, 1, min_frame);
1689 /* Set r3 to TPOINT. */
1690 p += gen_limm (p, 3, tpoint, is_64);
1691
1692 /* Prepare collecting_t object for lock. */
1693 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1694 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1695 /* Set R5 to collecting object. */
1696 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1697
1698 p += GEN_LWSYNC (p);
1699 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1700 p += GEN_LWSYNC (p);
1701
1702 /* Call to collector. */
1703 p += gen_call (p, collector, is_64, is_opd);
1704
1705 /* Simply write 0 to release the lock. */
1706 p += gen_limm (p, 3, lockaddr, is_64);
1707 p += gen_limm (p, 4, 0, is_64);
1708 p += GEN_LWSYNC (p);
1709 p += GEN_STORE (p, 4, 3, 0, is_64);
1710
1711 /* Restore stack and registers. */
1712 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1713 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1714 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1715 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1716 p += GEN_MTCR (p, 3); /* mtcr r3 */
1717 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1718 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1719 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1720
1721 /* Restore GPRs. */
1722 for (j = 2; j < 32; j++)
1723 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1724 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1725 /* Restore SP. */
1726 p += GEN_ADDI (p, 1, 1, frame_size);
1727
1728 /* Flush instructions to inferior memory. */
1729 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1730
1731 /* Now, insert the original instruction to execute in the jump pad. */
1732 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1733 *adjusted_insn_addr_end = *adjusted_insn_addr;
1734 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1735
1736 /* Verify the relocation size. If should be 4 for normal copy,
1737 8 or 12 for some conditional branch. */
1738 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1739 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1740 {
1741 sprintf (err, "E.Unexpected instruction length = %d"
1742 "when relocate instruction.",
1743 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1744 return 1;
1745 }
1746
1747 buildaddr = *adjusted_insn_addr_end;
1748 p = buf;
1749 /* Finally, write a jump back to the program. */
1750 offset = (tpaddr + 4) - buildaddr;
1751 if (offset >= (1 << 25) || offset < -(1 << 25))
1752 {
1753 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1754 "(offset 0x%x > 26-bit).", offset);
1755 return 1;
1756 }
1757 /* b <tpaddr+4> */
1758 p += GEN_B (p, offset);
1759 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1760 *jump_entry = buildaddr + (p - buf) * 4;
1761
1762 /* The jump pad is now built. Wire in a jump to our jump pad. This
1763 is always done last (by our caller actually), so that we can
1764 install fast tracepoints with threads running. This relies on
1765 the agent's atomic write support. */
1766 offset = entryaddr - tpaddr;
1767 if (offset >= (1 << 25) || offset < -(1 << 25))
1768 {
1769 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1770 "(offset 0x%x > 26-bit).", offset);
1771 return 1;
1772 }
1773 /* b <jentry> */
1774 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1775 *jjump_pad_insn_size = 4;
1776
1777 return 0;
1778 }
1779
1780 /* Returns the minimum instruction length for installing a tracepoint. */
1781
1782 static int
1783 ppc_get_min_fast_tracepoint_insn_len (void)
1784 {
1785 return 4;
1786 }
1787
1788 /* Emits a given buffer into the target at current_insn_ptr. Length
1789 is in units of 32-bit words. */
1790
1791 static void
1792 emit_insns (uint32_t *buf, int n)
1793 {
1794 n = n * sizeof (uint32_t);
1795 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1796 current_insn_ptr += n;
1797 }
1798
1799 #define __EMIT_ASM(NAME, INSNS) \
1800 do \
1801 { \
1802 extern uint32_t start_bcax_ ## NAME []; \
1803 extern uint32_t end_bcax_ ## NAME []; \
1804 emit_insns (start_bcax_ ## NAME, \
1805 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1806 __asm__ (".section .text.__ppcbcax\n\t" \
1807 "start_bcax_" #NAME ":\n\t" \
1808 INSNS "\n\t" \
1809 "end_bcax_" #NAME ":\n\t" \
1810 ".previous\n\t"); \
1811 } while (0)
1812
1813 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1814 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1815
1816 /*
1817
1818 Bytecode execution stack frame - 32-bit
1819
1820 | LR save area (SP + 4)
1821 SP' -> +- Back chain (SP + 0)
1822 | Save r31 for access saved arguments
1823 | Save r30 for bytecode stack pointer
1824 | Save r4 for incoming argument *value
1825 | Save r3 for incoming argument regs
1826 r30 -> +- Bytecode execution stack
1827 |
1828 | 64-byte (8 doublewords) at initial.
1829 | Expand stack as needed.
1830 |
1831 +-
1832 | Some padding for minimum stack frame and 16-byte alignment.
1833 | 16 bytes.
1834 SP +- Back-chain (SP')
1835
1836 initial frame size
1837 = 16 + (4 * 4) + 64
1838 = 96
1839
1840 r30 is the stack-pointer for bytecode machine.
1841 It should point to next-empty, so we can use LDU for pop.
1842 r3 is used for cache of the high part of TOP value.
1843 It was the first argument, pointer to regs.
1844 r4 is used for cache of the low part of TOP value.
1845 It was the second argument, pointer to the result.
1846 We should set *result = TOP after leaving this function.
1847
1848 Note:
1849 * To restore stack at epilogue
1850 => sp = r31
1851 * To check stack is big enough for bytecode execution.
1852 => r30 - 8 > SP + 8
1853 * To return execution result.
1854 => 0(r4) = TOP
1855
1856 */
1857
1858 /* Regardless of endian, register 3 is always high part, 4 is low part.
1859 These defines are used when the register pair is stored/loaded.
1860 Likewise, to simplify code, have a similiar define for 5:6. */
1861
1862 #if __BYTE_ORDER == __LITTLE_ENDIAN
1863 #define TOP_FIRST "4"
1864 #define TOP_SECOND "3"
1865 #define TMP_FIRST "6"
1866 #define TMP_SECOND "5"
1867 #else
1868 #define TOP_FIRST "3"
1869 #define TOP_SECOND "4"
1870 #define TMP_FIRST "5"
1871 #define TMP_SECOND "6"
1872 #endif
1873
1874 /* Emit prologue in inferior memory. See above comments. */
1875
1876 static void
1877 ppc_emit_prologue (void)
1878 {
1879 EMIT_ASM (/* Save return address. */
1880 "mflr 0 \n"
1881 "stw 0, 4(1) \n"
1882 /* Adjust SP. 96 is the initial frame size. */
1883 "stwu 1, -96(1) \n"
1884 /* Save r30 and incoming arguments. */
1885 "stw 31, 96-4(1) \n"
1886 "stw 30, 96-8(1) \n"
1887 "stw 4, 96-12(1) \n"
1888 "stw 3, 96-16(1) \n"
1889 /* Point r31 to original r1 for access arguments. */
1890 "addi 31, 1, 96 \n"
1891 /* Set r30 to pointing stack-top. */
1892 "addi 30, 1, 64 \n"
1893 /* Initial r3/TOP to 0. */
1894 "li 3, 0 \n"
1895 "li 4, 0 \n");
1896 }
1897
1898 /* Emit epilogue in inferior memory. See above comments. */
1899
1900 static void
1901 ppc_emit_epilogue (void)
1902 {
1903 EMIT_ASM (/* *result = TOP */
1904 "lwz 5, -12(31) \n"
1905 "stw " TOP_FIRST ", 0(5) \n"
1906 "stw " TOP_SECOND ", 4(5) \n"
1907 /* Restore registers. */
1908 "lwz 31, -4(31) \n"
1909 "lwz 30, -8(31) \n"
1910 /* Restore SP. */
1911 "lwz 1, 0(1) \n"
1912 /* Restore LR. */
1913 "lwz 0, 4(1) \n"
1914 /* Return 0 for no-error. */
1915 "li 3, 0 \n"
1916 "mtlr 0 \n"
1917 "blr \n");
1918 }
1919
1920 /* TOP = stack[--sp] + TOP */
1921
1922 static void
1923 ppc_emit_add (void)
1924 {
1925 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1926 "lwz " TMP_SECOND ", 4(30)\n"
1927 "addc 4, 6, 4 \n"
1928 "adde 3, 5, 3 \n");
1929 }
1930
1931 /* TOP = stack[--sp] - TOP */
1932
1933 static void
1934 ppc_emit_sub (void)
1935 {
1936 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1937 "lwz " TMP_SECOND ", 4(30) \n"
1938 "subfc 4, 4, 6 \n"
1939 "subfe 3, 3, 5 \n");
1940 }
1941
1942 /* TOP = stack[--sp] * TOP */
1943
1944 static void
1945 ppc_emit_mul (void)
1946 {
1947 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1948 "lwz " TMP_SECOND ", 4(30) \n"
1949 "mulhwu 7, 6, 4 \n"
1950 "mullw 3, 6, 3 \n"
1951 "mullw 5, 4, 5 \n"
1952 "mullw 4, 6, 4 \n"
1953 "add 3, 5, 3 \n"
1954 "add 3, 7, 3 \n");
1955 }
1956
1957 /* TOP = stack[--sp] << TOP */
1958
1959 static void
1960 ppc_emit_lsh (void)
1961 {
1962 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1963 "lwz " TMP_SECOND ", 4(30) \n"
1964 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1965 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1966 "slw 5, 5, 4\n" /* Shift high part left */
1967 "slw 4, 6, 4\n" /* Shift low part left */
1968 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1969 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1970 "or 3, 5, 3\n"
1971 "or 3, 7, 3\n"); /* Assemble high part */
1972 }
1973
1974 /* Top = stack[--sp] >> TOP
1975 (Arithmetic shift right) */
1976
1977 static void
1978 ppc_emit_rsh_signed (void)
1979 {
1980 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1981 "lwz " TMP_SECOND ", 4(30) \n"
1982 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1983 "sraw 3, 5, 4\n" /* Shift high part right */
1984 "cmpwi 7, 1\n"
1985 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1986 "sraw 4, 5, 7\n" /* Shift high to low */
1987 "b 2f\n"
1988 "1:\n"
1989 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1990 "srw 4, 6, 4\n" /* Shift low part right */
1991 "slw 5, 5, 7\n" /* Shift high to low */
1992 "or 4, 4, 5\n" /* Assemble low part */
1993 "2:\n");
1994 }
1995
1996 /* Top = stack[--sp] >> TOP
1997 (Logical shift right) */
1998
1999 static void
2000 ppc_emit_rsh_unsigned (void)
2001 {
2002 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2003 "lwz " TMP_SECOND ", 4(30) \n"
2004 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2005 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2006 "srw 6, 6, 4\n" /* Shift low part right */
2007 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2008 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2009 "or 6, 6, 3\n"
2010 "srw 3, 5, 4\n" /* Shift high part right */
2011 "or 4, 6, 7\n"); /* Assemble low part */
2012 }
2013
2014 /* Emit code for signed-extension specified by ARG. */
2015
2016 static void
2017 ppc_emit_ext (int arg)
2018 {
2019 switch (arg)
2020 {
2021 case 8:
2022 EMIT_ASM ("extsb 4, 4\n"
2023 "srawi 3, 4, 31");
2024 break;
2025 case 16:
2026 EMIT_ASM ("extsh 4, 4\n"
2027 "srawi 3, 4, 31");
2028 break;
2029 case 32:
2030 EMIT_ASM ("srawi 3, 4, 31");
2031 break;
2032 default:
2033 emit_error = 1;
2034 }
2035 }
2036
2037 /* Emit code for zero-extension specified by ARG. */
2038
2039 static void
2040 ppc_emit_zero_ext (int arg)
2041 {
2042 switch (arg)
2043 {
2044 case 8:
2045 EMIT_ASM ("clrlwi 4,4,24\n"
2046 "li 3, 0\n");
2047 break;
2048 case 16:
2049 EMIT_ASM ("clrlwi 4,4,16\n"
2050 "li 3, 0\n");
2051 break;
2052 case 32:
2053 EMIT_ASM ("li 3, 0");
2054 break;
2055 default:
2056 emit_error = 1;
2057 }
2058 }
2059
2060 /* TOP = !TOP
2061 i.e., TOP = (TOP == 0) ? 1 : 0; */
2062
2063 static void
2064 ppc_emit_log_not (void)
2065 {
2066 EMIT_ASM ("or 4, 3, 4 \n"
2067 "cntlzw 4, 4 \n"
2068 "srwi 4, 4, 5 \n"
2069 "li 3, 0 \n");
2070 }
2071
2072 /* TOP = stack[--sp] & TOP */
2073
2074 static void
2075 ppc_emit_bit_and (void)
2076 {
2077 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2078 "lwz " TMP_SECOND ", 4(30) \n"
2079 "and 4, 6, 4 \n"
2080 "and 3, 5, 3 \n");
2081 }
2082
2083 /* TOP = stack[--sp] | TOP */
2084
2085 static void
2086 ppc_emit_bit_or (void)
2087 {
2088 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2089 "lwz " TMP_SECOND ", 4(30) \n"
2090 "or 4, 6, 4 \n"
2091 "or 3, 5, 3 \n");
2092 }
2093
2094 /* TOP = stack[--sp] ^ TOP */
2095
2096 static void
2097 ppc_emit_bit_xor (void)
2098 {
2099 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2100 "lwz " TMP_SECOND ", 4(30) \n"
2101 "xor 4, 6, 4 \n"
2102 "xor 3, 5, 3 \n");
2103 }
2104
2105 /* TOP = ~TOP
2106 i.e., TOP = ~(TOP | TOP) */
2107
2108 static void
2109 ppc_emit_bit_not (void)
2110 {
2111 EMIT_ASM ("nor 3, 3, 3 \n"
2112 "nor 4, 4, 4 \n");
2113 }
2114
2115 /* TOP = stack[--sp] == TOP */
2116
2117 static void
2118 ppc_emit_equal (void)
2119 {
2120 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2121 "lwz " TMP_SECOND ", 4(30) \n"
2122 "xor 4, 6, 4 \n"
2123 "xor 3, 5, 3 \n"
2124 "or 4, 3, 4 \n"
2125 "cntlzw 4, 4 \n"
2126 "srwi 4, 4, 5 \n"
2127 "li 3, 0 \n");
2128 }
2129
2130 /* TOP = stack[--sp] < TOP
2131 (Signed comparison) */
2132
2133 static void
2134 ppc_emit_less_signed (void)
2135 {
2136 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2137 "lwz " TMP_SECOND ", 4(30) \n"
2138 "cmplw 6, 6, 4 \n"
2139 "cmpw 7, 5, 3 \n"
2140 /* CR6 bit 0 = low less and high equal */
2141 "crand 6*4+0, 6*4+0, 7*4+2\n"
2142 /* CR7 bit 0 = (low less and high equal) or high less */
2143 "cror 7*4+0, 7*4+0, 6*4+0\n"
2144 "mfcr 4 \n"
2145 "rlwinm 4, 4, 29, 31, 31 \n"
2146 "li 3, 0 \n");
2147 }
2148
2149 /* TOP = stack[--sp] < TOP
2150 (Unsigned comparison) */
2151
2152 static void
2153 ppc_emit_less_unsigned (void)
2154 {
2155 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2156 "lwz " TMP_SECOND ", 4(30) \n"
2157 "cmplw 6, 6, 4 \n"
2158 "cmplw 7, 5, 3 \n"
2159 /* CR6 bit 0 = low less and high equal */
2160 "crand 6*4+0, 6*4+0, 7*4+2\n"
2161 /* CR7 bit 0 = (low less and high equal) or high less */
2162 "cror 7*4+0, 7*4+0, 6*4+0\n"
2163 "mfcr 4 \n"
2164 "rlwinm 4, 4, 29, 31, 31 \n"
2165 "li 3, 0 \n");
2166 }
2167
2168 /* Access the memory address in TOP in size of SIZE.
2169 Zero-extend the read value. */
2170
2171 static void
2172 ppc_emit_ref (int size)
2173 {
2174 switch (size)
2175 {
2176 case 1:
2177 EMIT_ASM ("lbz 4, 0(4)\n"
2178 "li 3, 0");
2179 break;
2180 case 2:
2181 EMIT_ASM ("lhz 4, 0(4)\n"
2182 "li 3, 0");
2183 break;
2184 case 4:
2185 EMIT_ASM ("lwz 4, 0(4)\n"
2186 "li 3, 0");
2187 break;
2188 case 8:
2189 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2190 EMIT_ASM ("lwz 3, 4(4)\n"
2191 "lwz 4, 0(4)");
2192 else
2193 EMIT_ASM ("lwz 3, 0(4)\n"
2194 "lwz 4, 4(4)");
2195 break;
2196 }
2197 }
2198
2199 /* TOP = NUM */
2200
2201 static void
2202 ppc_emit_const (LONGEST num)
2203 {
2204 uint32_t buf[10];
2205 uint32_t *p = buf;
2206
2207 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2208 p += gen_limm (p, 4, num & 0xffffffff, 0);
2209
2210 emit_insns (buf, p - buf);
2211 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2212 }
2213
2214 /* Set TOP to the value of register REG by calling get_raw_reg function
2215 with two argument, collected buffer and register number. */
2216
2217 static void
2218 ppc_emit_reg (int reg)
2219 {
2220 uint32_t buf[13];
2221 uint32_t *p = buf;
2222
2223 /* fctx->regs is passed in r3 and then saved in -16(31). */
2224 p += GEN_LWZ (p, 3, 31, -16);
2225 p += GEN_LI (p, 4, reg); /* li r4, reg */
2226 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2227
2228 emit_insns (buf, p - buf);
2229 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2230
2231 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2232 {
2233 EMIT_ASM ("mr 5, 4\n"
2234 "mr 4, 3\n"
2235 "mr 3, 5\n");
2236 }
2237 }
2238
2239 /* TOP = stack[--sp] */
2240
2241 static void
2242 ppc_emit_pop (void)
2243 {
2244 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2245 "lwz " TOP_SECOND ", 4(30) \n");
2246 }
2247
2248 /* stack[sp++] = TOP
2249
2250 Because we may use up bytecode stack, expand 8 doublewords more
2251 if needed. */
2252
2253 static void
2254 ppc_emit_stack_flush (void)
2255 {
2256 /* Make sure bytecode stack is big enough before push.
2257 Otherwise, expand 64-byte more. */
2258
2259 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2260 " stw " TOP_SECOND ", 4(30)\n"
2261 " addi 5, 30, -(8 + 8) \n"
2262 " cmpw 7, 5, 1 \n"
2263 " bgt 7, 1f \n"
2264 " stwu 31, -64(1) \n"
2265 "1:addi 30, 30, -8 \n");
2266 }
2267
2268 /* Swap TOP and stack[sp-1] */
2269
2270 static void
2271 ppc_emit_swap (void)
2272 {
2273 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2274 "lwz " TMP_SECOND ", 12(30) \n"
2275 "stw " TOP_FIRST ", 8(30) \n"
2276 "stw " TOP_SECOND ", 12(30) \n"
2277 "mr 3, 5 \n"
2278 "mr 4, 6 \n");
2279 }
2280
2281 /* Discard N elements in the stack. Also used for ppc64. */
2282
2283 static void
2284 ppc_emit_stack_adjust (int n)
2285 {
2286 uint32_t buf[6];
2287 uint32_t *p = buf;
2288
2289 n = n << 3;
2290 if ((n >> 15) != 0)
2291 {
2292 emit_error = 1;
2293 return;
2294 }
2295
2296 p += GEN_ADDI (p, 30, 30, n);
2297
2298 emit_insns (buf, p - buf);
2299 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2300 }
2301
2302 /* Call function FN. */
2303
2304 static void
2305 ppc_emit_call (CORE_ADDR fn)
2306 {
2307 uint32_t buf[11];
2308 uint32_t *p = buf;
2309
2310 p += gen_call (p, fn, 0, 0);
2311
2312 emit_insns (buf, p - buf);
2313 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2314 }
2315
2316 /* FN's prototype is `LONGEST(*fn)(int)'.
2317 TOP = fn (arg1)
2318 */
2319
2320 static void
2321 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2322 {
2323 uint32_t buf[15];
2324 uint32_t *p = buf;
2325
2326 /* Setup argument. arg1 is a 16-bit value. */
2327 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2328 p += gen_call (p, fn, 0, 0);
2329
2330 emit_insns (buf, p - buf);
2331 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2332
2333 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2334 {
2335 EMIT_ASM ("mr 5, 4\n"
2336 "mr 4, 3\n"
2337 "mr 3, 5\n");
2338 }
2339 }
2340
2341 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2342 fn (arg1, TOP)
2343
2344 TOP should be preserved/restored before/after the call. */
2345
2346 static void
2347 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2348 {
2349 uint32_t buf[21];
2350 uint32_t *p = buf;
2351
2352 /* Save TOP. 0(30) is next-empty. */
2353 p += GEN_STW (p, 3, 30, 0);
2354 p += GEN_STW (p, 4, 30, 4);
2355
2356 /* Setup argument. arg1 is a 16-bit value. */
2357 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2358 {
2359 p += GEN_MR (p, 5, 4);
2360 p += GEN_MR (p, 6, 3);
2361 }
2362 else
2363 {
2364 p += GEN_MR (p, 5, 3);
2365 p += GEN_MR (p, 6, 4);
2366 }
2367 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2368 p += gen_call (p, fn, 0, 0);
2369
2370 /* Restore TOP */
2371 p += GEN_LWZ (p, 3, 30, 0);
2372 p += GEN_LWZ (p, 4, 30, 4);
2373
2374 emit_insns (buf, p - buf);
2375 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2376 }
2377
2378 /* Note in the following goto ops:
2379
2380 When emitting goto, the target address is later relocated by
2381 write_goto_address. OFFSET_P is the offset of the branch instruction
2382 in the code sequence, and SIZE_P is how to relocate the instruction,
2383 recognized by ppc_write_goto_address. In current implementation,
2384 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2385 */
2386
2387 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2388
2389 static void
2390 ppc_emit_if_goto (int *offset_p, int *size_p)
2391 {
2392 EMIT_ASM ("or. 3, 3, 4 \n"
2393 "lwzu " TOP_FIRST ", 8(30) \n"
2394 "lwz " TOP_SECOND ", 4(30) \n"
2395 "1:bne 0, 1b \n");
2396
2397 if (offset_p)
2398 *offset_p = 12;
2399 if (size_p)
2400 *size_p = 14;
2401 }
2402
2403 /* Unconditional goto. Also used for ppc64. */
2404
2405 static void
2406 ppc_emit_goto (int *offset_p, int *size_p)
2407 {
2408 EMIT_ASM ("1:b 1b");
2409
2410 if (offset_p)
2411 *offset_p = 0;
2412 if (size_p)
2413 *size_p = 24;
2414 }
2415
2416 /* Goto if stack[--sp] == TOP */
2417
2418 static void
2419 ppc_emit_eq_goto (int *offset_p, int *size_p)
2420 {
2421 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2422 "lwz " TMP_SECOND ", 4(30) \n"
2423 "xor 4, 6, 4 \n"
2424 "xor 3, 5, 3 \n"
2425 "or. 3, 3, 4 \n"
2426 "lwzu " TOP_FIRST ", 8(30) \n"
2427 "lwz " TOP_SECOND ", 4(30) \n"
2428 "1:beq 0, 1b \n");
2429
2430 if (offset_p)
2431 *offset_p = 28;
2432 if (size_p)
2433 *size_p = 14;
2434 }
2435
2436 /* Goto if stack[--sp] != TOP */
2437
2438 static void
2439 ppc_emit_ne_goto (int *offset_p, int *size_p)
2440 {
2441 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2442 "lwz " TMP_SECOND ", 4(30) \n"
2443 "xor 4, 6, 4 \n"
2444 "xor 3, 5, 3 \n"
2445 "or. 3, 3, 4 \n"
2446 "lwzu " TOP_FIRST ", 8(30) \n"
2447 "lwz " TOP_SECOND ", 4(30) \n"
2448 "1:bne 0, 1b \n");
2449
2450 if (offset_p)
2451 *offset_p = 28;
2452 if (size_p)
2453 *size_p = 14;
2454 }
2455
2456 /* Goto if stack[--sp] < TOP */
2457
2458 static void
2459 ppc_emit_lt_goto (int *offset_p, int *size_p)
2460 {
2461 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2462 "lwz " TMP_SECOND ", 4(30) \n"
2463 "cmplw 6, 6, 4 \n"
2464 "cmpw 7, 5, 3 \n"
2465 /* CR6 bit 0 = low less and high equal */
2466 "crand 6*4+0, 6*4+0, 7*4+2\n"
2467 /* CR7 bit 0 = (low less and high equal) or high less */
2468 "cror 7*4+0, 7*4+0, 6*4+0\n"
2469 "lwzu " TOP_FIRST ", 8(30) \n"
2470 "lwz " TOP_SECOND ", 4(30)\n"
2471 "1:blt 7, 1b \n");
2472
2473 if (offset_p)
2474 *offset_p = 32;
2475 if (size_p)
2476 *size_p = 14;
2477 }
2478
2479 /* Goto if stack[--sp] <= TOP */
2480
2481 static void
2482 ppc_emit_le_goto (int *offset_p, int *size_p)
2483 {
2484 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2485 "lwz " TMP_SECOND ", 4(30) \n"
2486 "cmplw 6, 6, 4 \n"
2487 "cmpw 7, 5, 3 \n"
2488 /* CR6 bit 0 = low less/equal and high equal */
2489 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2490 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2491 "cror 7*4+0, 7*4+0, 6*4+0\n"
2492 "lwzu " TOP_FIRST ", 8(30) \n"
2493 "lwz " TOP_SECOND ", 4(30)\n"
2494 "1:blt 7, 1b \n");
2495
2496 if (offset_p)
2497 *offset_p = 32;
2498 if (size_p)
2499 *size_p = 14;
2500 }
2501
2502 /* Goto if stack[--sp] > TOP */
2503
2504 static void
2505 ppc_emit_gt_goto (int *offset_p, int *size_p)
2506 {
2507 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2508 "lwz " TMP_SECOND ", 4(30) \n"
2509 "cmplw 6, 6, 4 \n"
2510 "cmpw 7, 5, 3 \n"
2511 /* CR6 bit 0 = low greater and high equal */
2512 "crand 6*4+0, 6*4+1, 7*4+2\n"
2513 /* CR7 bit 0 = (low greater and high equal) or high greater */
2514 "cror 7*4+0, 7*4+1, 6*4+0\n"
2515 "lwzu " TOP_FIRST ", 8(30) \n"
2516 "lwz " TOP_SECOND ", 4(30)\n"
2517 "1:blt 7, 1b \n");
2518
2519 if (offset_p)
2520 *offset_p = 32;
2521 if (size_p)
2522 *size_p = 14;
2523 }
2524
2525 /* Goto if stack[--sp] >= TOP */
2526
2527 static void
2528 ppc_emit_ge_goto (int *offset_p, int *size_p)
2529 {
2530 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2531 "lwz " TMP_SECOND ", 4(30) \n"
2532 "cmplw 6, 6, 4 \n"
2533 "cmpw 7, 5, 3 \n"
2534 /* CR6 bit 0 = low ge and high equal */
2535 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2536 /* CR7 bit 0 = (low ge and high equal) or high greater */
2537 "cror 7*4+0, 7*4+1, 6*4+0\n"
2538 "lwzu " TOP_FIRST ", 8(30)\n"
2539 "lwz " TOP_SECOND ", 4(30)\n"
2540 "1:blt 7, 1b \n");
2541
2542 if (offset_p)
2543 *offset_p = 32;
2544 if (size_p)
2545 *size_p = 14;
2546 }
2547
2548 /* Relocate previous emitted branch instruction. FROM is the address
2549 of the branch instruction, TO is the goto target address, and SIZE
2550 if the value we set by *SIZE_P before. Currently, it is either
2551 24 or 14 of branch and conditional-branch instruction.
2552 Also used for ppc64. */
2553
2554 static void
2555 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2556 {
2557 long rel = to - from;
2558 uint32_t insn;
2559 int opcd;
2560
2561 read_inferior_memory (from, (unsigned char *) &insn, 4);
2562 opcd = (insn >> 26) & 0x3f;
2563
2564 switch (size)
2565 {
2566 case 14:
2567 if (opcd != 16
2568 || (rel >= (1 << 15) || rel < -(1 << 15)))
2569 emit_error = 1;
2570 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2571 break;
2572 case 24:
2573 if (opcd != 18
2574 || (rel >= (1 << 25) || rel < -(1 << 25)))
2575 emit_error = 1;
2576 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2577 break;
2578 default:
2579 emit_error = 1;
2580 }
2581
2582 if (!emit_error)
2583 target_write_memory (from, (unsigned char *) &insn, 4);
2584 }
2585
2586 /* Table of emit ops for 32-bit. */
2587
2588 static struct emit_ops ppc_emit_ops_impl =
2589 {
2590 ppc_emit_prologue,
2591 ppc_emit_epilogue,
2592 ppc_emit_add,
2593 ppc_emit_sub,
2594 ppc_emit_mul,
2595 ppc_emit_lsh,
2596 ppc_emit_rsh_signed,
2597 ppc_emit_rsh_unsigned,
2598 ppc_emit_ext,
2599 ppc_emit_log_not,
2600 ppc_emit_bit_and,
2601 ppc_emit_bit_or,
2602 ppc_emit_bit_xor,
2603 ppc_emit_bit_not,
2604 ppc_emit_equal,
2605 ppc_emit_less_signed,
2606 ppc_emit_less_unsigned,
2607 ppc_emit_ref,
2608 ppc_emit_if_goto,
2609 ppc_emit_goto,
2610 ppc_write_goto_address,
2611 ppc_emit_const,
2612 ppc_emit_call,
2613 ppc_emit_reg,
2614 ppc_emit_pop,
2615 ppc_emit_stack_flush,
2616 ppc_emit_zero_ext,
2617 ppc_emit_swap,
2618 ppc_emit_stack_adjust,
2619 ppc_emit_int_call_1,
2620 ppc_emit_void_call_2,
2621 ppc_emit_eq_goto,
2622 ppc_emit_ne_goto,
2623 ppc_emit_lt_goto,
2624 ppc_emit_le_goto,
2625 ppc_emit_gt_goto,
2626 ppc_emit_ge_goto
2627 };
2628
2629 #ifdef __powerpc64__
2630
2631 /*
2632
2633 Bytecode execution stack frame - 64-bit
2634
2635 | LR save area (SP + 16)
2636 | CR save area (SP + 8)
2637 SP' -> +- Back chain (SP + 0)
2638 | Save r31 for access saved arguments
2639 | Save r30 for bytecode stack pointer
2640 | Save r4 for incoming argument *value
2641 | Save r3 for incoming argument regs
2642 r30 -> +- Bytecode execution stack
2643 |
2644 | 64-byte (8 doublewords) at initial.
2645 | Expand stack as needed.
2646 |
2647 +-
2648 | Some padding for minimum stack frame.
2649 | 112 for ELFv1.
2650 SP +- Back-chain (SP')
2651
2652 initial frame size
2653 = 112 + (4 * 8) + 64
2654 = 208
2655
2656 r30 is the stack-pointer for bytecode machine.
2657 It should point to next-empty, so we can use LDU for pop.
2658 r3 is used for cache of TOP value.
2659 It was the first argument, pointer to regs.
2660 r4 is the second argument, pointer to the result.
2661 We should set *result = TOP after leaving this function.
2662
2663 Note:
2664 * To restore stack at epilogue
2665 => sp = r31
2666 * To check stack is big enough for bytecode execution.
2667 => r30 - 8 > SP + 112
2668 * To return execution result.
2669 => 0(r4) = TOP
2670
2671 */
2672
2673 /* Emit prologue in inferior memory. See above comments. */
2674
2675 static void
2676 ppc64v1_emit_prologue (void)
2677 {
2678 /* On ELFv1, function pointers really point to function descriptor,
2679 so emit one here. We don't care about contents of words 1 and 2,
2680 so let them just overlap out code. */
2681 uint64_t opd = current_insn_ptr + 8;
2682 uint32_t buf[2];
2683
2684 /* Mind the strict aliasing rules. */
2685 memcpy (buf, &opd, sizeof buf);
2686 emit_insns(buf, 2);
2687 EMIT_ASM (/* Save return address. */
2688 "mflr 0 \n"
2689 "std 0, 16(1) \n"
2690 /* Save r30 and incoming arguments. */
2691 "std 31, -8(1) \n"
2692 "std 30, -16(1) \n"
2693 "std 4, -24(1) \n"
2694 "std 3, -32(1) \n"
2695 /* Point r31 to current r1 for access arguments. */
2696 "mr 31, 1 \n"
2697 /* Adjust SP. 208 is the initial frame size. */
2698 "stdu 1, -208(1) \n"
2699 /* Set r30 to pointing stack-top. */
2700 "addi 30, 1, 168 \n"
2701 /* Initial r3/TOP to 0. */
2702 "li 3, 0 \n");
2703 }
2704
2705 /* Emit prologue in inferior memory. See above comments. */
2706
2707 static void
2708 ppc64v2_emit_prologue (void)
2709 {
2710 EMIT_ASM (/* Save return address. */
2711 "mflr 0 \n"
2712 "std 0, 16(1) \n"
2713 /* Save r30 and incoming arguments. */
2714 "std 31, -8(1) \n"
2715 "std 30, -16(1) \n"
2716 "std 4, -24(1) \n"
2717 "std 3, -32(1) \n"
2718 /* Point r31 to current r1 for access arguments. */
2719 "mr 31, 1 \n"
2720 /* Adjust SP. 208 is the initial frame size. */
2721 "stdu 1, -208(1) \n"
2722 /* Set r30 to pointing stack-top. */
2723 "addi 30, 1, 168 \n"
2724 /* Initial r3/TOP to 0. */
2725 "li 3, 0 \n");
2726 }
2727
2728 /* Emit epilogue in inferior memory. See above comments. */
2729
2730 static void
2731 ppc64_emit_epilogue (void)
2732 {
2733 EMIT_ASM (/* Restore SP. */
2734 "ld 1, 0(1) \n"
2735 /* *result = TOP */
2736 "ld 4, -24(1) \n"
2737 "std 3, 0(4) \n"
2738 /* Restore registers. */
2739 "ld 31, -8(1) \n"
2740 "ld 30, -16(1) \n"
2741 /* Restore LR. */
2742 "ld 0, 16(1) \n"
2743 /* Return 0 for no-error. */
2744 "li 3, 0 \n"
2745 "mtlr 0 \n"
2746 "blr \n");
2747 }
2748
2749 /* TOP = stack[--sp] + TOP */
2750
2751 static void
2752 ppc64_emit_add (void)
2753 {
2754 EMIT_ASM ("ldu 4, 8(30) \n"
2755 "add 3, 4, 3 \n");
2756 }
2757
2758 /* TOP = stack[--sp] - TOP */
2759
2760 static void
2761 ppc64_emit_sub (void)
2762 {
2763 EMIT_ASM ("ldu 4, 8(30) \n"
2764 "sub 3, 4, 3 \n");
2765 }
2766
2767 /* TOP = stack[--sp] * TOP */
2768
2769 static void
2770 ppc64_emit_mul (void)
2771 {
2772 EMIT_ASM ("ldu 4, 8(30) \n"
2773 "mulld 3, 4, 3 \n");
2774 }
2775
2776 /* TOP = stack[--sp] << TOP */
2777
2778 static void
2779 ppc64_emit_lsh (void)
2780 {
2781 EMIT_ASM ("ldu 4, 8(30) \n"
2782 "sld 3, 4, 3 \n");
2783 }
2784
2785 /* Top = stack[--sp] >> TOP
2786 (Arithmetic shift right) */
2787
2788 static void
2789 ppc64_emit_rsh_signed (void)
2790 {
2791 EMIT_ASM ("ldu 4, 8(30) \n"
2792 "srad 3, 4, 3 \n");
2793 }
2794
2795 /* Top = stack[--sp] >> TOP
2796 (Logical shift right) */
2797
2798 static void
2799 ppc64_emit_rsh_unsigned (void)
2800 {
2801 EMIT_ASM ("ldu 4, 8(30) \n"
2802 "srd 3, 4, 3 \n");
2803 }
2804
2805 /* Emit code for signed-extension specified by ARG. */
2806
2807 static void
2808 ppc64_emit_ext (int arg)
2809 {
2810 switch (arg)
2811 {
2812 case 8:
2813 EMIT_ASM ("extsb 3, 3");
2814 break;
2815 case 16:
2816 EMIT_ASM ("extsh 3, 3");
2817 break;
2818 case 32:
2819 EMIT_ASM ("extsw 3, 3");
2820 break;
2821 default:
2822 emit_error = 1;
2823 }
2824 }
2825
2826 /* Emit code for zero-extension specified by ARG. */
2827
2828 static void
2829 ppc64_emit_zero_ext (int arg)
2830 {
2831 switch (arg)
2832 {
2833 case 8:
2834 EMIT_ASM ("rldicl 3,3,0,56");
2835 break;
2836 case 16:
2837 EMIT_ASM ("rldicl 3,3,0,48");
2838 break;
2839 case 32:
2840 EMIT_ASM ("rldicl 3,3,0,32");
2841 break;
2842 default:
2843 emit_error = 1;
2844 }
2845 }
2846
2847 /* TOP = !TOP
2848 i.e., TOP = (TOP == 0) ? 1 : 0; */
2849
2850 static void
2851 ppc64_emit_log_not (void)
2852 {
2853 EMIT_ASM ("cntlzd 3, 3 \n"
2854 "srdi 3, 3, 6 \n");
2855 }
2856
2857 /* TOP = stack[--sp] & TOP */
2858
2859 static void
2860 ppc64_emit_bit_and (void)
2861 {
2862 EMIT_ASM ("ldu 4, 8(30) \n"
2863 "and 3, 4, 3 \n");
2864 }
2865
2866 /* TOP = stack[--sp] | TOP */
2867
2868 static void
2869 ppc64_emit_bit_or (void)
2870 {
2871 EMIT_ASM ("ldu 4, 8(30) \n"
2872 "or 3, 4, 3 \n");
2873 }
2874
2875 /* TOP = stack[--sp] ^ TOP */
2876
2877 static void
2878 ppc64_emit_bit_xor (void)
2879 {
2880 EMIT_ASM ("ldu 4, 8(30) \n"
2881 "xor 3, 4, 3 \n");
2882 }
2883
2884 /* TOP = ~TOP
2885 i.e., TOP = ~(TOP | TOP) */
2886
2887 static void
2888 ppc64_emit_bit_not (void)
2889 {
2890 EMIT_ASM ("nor 3, 3, 3 \n");
2891 }
2892
2893 /* TOP = stack[--sp] == TOP */
2894
2895 static void
2896 ppc64_emit_equal (void)
2897 {
2898 EMIT_ASM ("ldu 4, 8(30) \n"
2899 "xor 3, 3, 4 \n"
2900 "cntlzd 3, 3 \n"
2901 "srdi 3, 3, 6 \n");
2902 }
2903
2904 /* TOP = stack[--sp] < TOP
2905 (Signed comparison) */
2906
2907 static void
2908 ppc64_emit_less_signed (void)
2909 {
2910 EMIT_ASM ("ldu 4, 8(30) \n"
2911 "cmpd 7, 4, 3 \n"
2912 "mfcr 3 \n"
2913 "rlwinm 3, 3, 29, 31, 31 \n");
2914 }
2915
2916 /* TOP = stack[--sp] < TOP
2917 (Unsigned comparison) */
2918
2919 static void
2920 ppc64_emit_less_unsigned (void)
2921 {
2922 EMIT_ASM ("ldu 4, 8(30) \n"
2923 "cmpld 7, 4, 3 \n"
2924 "mfcr 3 \n"
2925 "rlwinm 3, 3, 29, 31, 31 \n");
2926 }
2927
2928 /* Access the memory address in TOP in size of SIZE.
2929 Zero-extend the read value. */
2930
2931 static void
2932 ppc64_emit_ref (int size)
2933 {
2934 switch (size)
2935 {
2936 case 1:
2937 EMIT_ASM ("lbz 3, 0(3)");
2938 break;
2939 case 2:
2940 EMIT_ASM ("lhz 3, 0(3)");
2941 break;
2942 case 4:
2943 EMIT_ASM ("lwz 3, 0(3)");
2944 break;
2945 case 8:
2946 EMIT_ASM ("ld 3, 0(3)");
2947 break;
2948 }
2949 }
2950
2951 /* TOP = NUM */
2952
2953 static void
2954 ppc64_emit_const (LONGEST num)
2955 {
2956 uint32_t buf[5];
2957 uint32_t *p = buf;
2958
2959 p += gen_limm (p, 3, num, 1);
2960
2961 emit_insns (buf, p - buf);
2962 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2963 }
2964
2965 /* Set TOP to the value of register REG by calling get_raw_reg function
2966 with two argument, collected buffer and register number. */
2967
2968 static void
2969 ppc64v1_emit_reg (int reg)
2970 {
2971 uint32_t buf[15];
2972 uint32_t *p = buf;
2973
2974 /* fctx->regs is passed in r3 and then saved in 176(1). */
2975 p += GEN_LD (p, 3, 31, -32);
2976 p += GEN_LI (p, 4, reg);
2977 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2978 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2979 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2980
2981 emit_insns (buf, p - buf);
2982 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2983 }
2984
2985 /* Likewise, for ELFv2. */
2986
2987 static void
2988 ppc64v2_emit_reg (int reg)
2989 {
2990 uint32_t buf[12];
2991 uint32_t *p = buf;
2992
2993 /* fctx->regs is passed in r3 and then saved in 176(1). */
2994 p += GEN_LD (p, 3, 31, -32);
2995 p += GEN_LI (p, 4, reg);
2996 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2997 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2998 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2999
3000 emit_insns (buf, p - buf);
3001 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3002 }
3003
3004 /* TOP = stack[--sp] */
3005
3006 static void
3007 ppc64_emit_pop (void)
3008 {
3009 EMIT_ASM ("ldu 3, 8(30)");
3010 }
3011
3012 /* stack[sp++] = TOP
3013
3014 Because we may use up bytecode stack, expand 8 doublewords more
3015 if needed. */
3016
3017 static void
3018 ppc64_emit_stack_flush (void)
3019 {
3020 /* Make sure bytecode stack is big enough before push.
3021 Otherwise, expand 64-byte more. */
3022
3023 EMIT_ASM (" std 3, 0(30) \n"
3024 " addi 4, 30, -(112 + 8) \n"
3025 " cmpd 7, 4, 1 \n"
3026 " bgt 7, 1f \n"
3027 " stdu 31, -64(1) \n"
3028 "1:addi 30, 30, -8 \n");
3029 }
3030
3031 /* Swap TOP and stack[sp-1] */
3032
3033 static void
3034 ppc64_emit_swap (void)
3035 {
3036 EMIT_ASM ("ld 4, 8(30) \n"
3037 "std 3, 8(30) \n"
3038 "mr 3, 4 \n");
3039 }
3040
3041 /* Call function FN - ELFv1. */
3042
3043 static void
3044 ppc64v1_emit_call (CORE_ADDR fn)
3045 {
3046 uint32_t buf[13];
3047 uint32_t *p = buf;
3048
3049 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3050 p += gen_call (p, fn, 1, 1);
3051 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3052
3053 emit_insns (buf, p - buf);
3054 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3055 }
3056
3057 /* Call function FN - ELFv2. */
3058
3059 static void
3060 ppc64v2_emit_call (CORE_ADDR fn)
3061 {
3062 uint32_t buf[10];
3063 uint32_t *p = buf;
3064
3065 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3066 p += gen_call (p, fn, 1, 0);
3067 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3068
3069 emit_insns (buf, p - buf);
3070 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3071 }
3072
3073 /* FN's prototype is `LONGEST(*fn)(int)'.
3074 TOP = fn (arg1)
3075 */
3076
3077 static void
3078 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3079 {
3080 uint32_t buf[13];
3081 uint32_t *p = buf;
3082
3083 /* Setup argument. arg1 is a 16-bit value. */
3084 p += gen_limm (p, 3, arg1, 1);
3085 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3086 p += gen_call (p, fn, 1, 1);
3087 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3088
3089 emit_insns (buf, p - buf);
3090 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3091 }
3092
3093 /* Likewise for ELFv2. */
3094
3095 static void
3096 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3097 {
3098 uint32_t buf[10];
3099 uint32_t *p = buf;
3100
3101 /* Setup argument. arg1 is a 16-bit value. */
3102 p += gen_limm (p, 3, arg1, 1);
3103 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3104 p += gen_call (p, fn, 1, 0);
3105 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3106
3107 emit_insns (buf, p - buf);
3108 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3109 }
3110
3111 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3112 fn (arg1, TOP)
3113
3114 TOP should be preserved/restored before/after the call. */
3115
3116 static void
3117 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3118 {
3119 uint32_t buf[17];
3120 uint32_t *p = buf;
3121
3122 /* Save TOP. 0(30) is next-empty. */
3123 p += GEN_STD (p, 3, 30, 0);
3124
3125 /* Setup argument. arg1 is a 16-bit value. */
3126 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3127 p += gen_limm (p, 3, arg1, 1);
3128 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3129 p += gen_call (p, fn, 1, 1);
3130 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3131
3132 /* Restore TOP */
3133 p += GEN_LD (p, 3, 30, 0);
3134
3135 emit_insns (buf, p - buf);
3136 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3137 }
3138
3139 /* Likewise for ELFv2. */
3140
3141 static void
3142 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3143 {
3144 uint32_t buf[14];
3145 uint32_t *p = buf;
3146
3147 /* Save TOP. 0(30) is next-empty. */
3148 p += GEN_STD (p, 3, 30, 0);
3149
3150 /* Setup argument. arg1 is a 16-bit value. */
3151 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3152 p += gen_limm (p, 3, arg1, 1);
3153 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3154 p += gen_call (p, fn, 1, 0);
3155 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3156
3157 /* Restore TOP */
3158 p += GEN_LD (p, 3, 30, 0);
3159
3160 emit_insns (buf, p - buf);
3161 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3162 }
3163
3164 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3165
3166 static void
3167 ppc64_emit_if_goto (int *offset_p, int *size_p)
3168 {
3169 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3170 "ldu 3, 8(30) \n"
3171 "1:bne 7, 1b \n");
3172
3173 if (offset_p)
3174 *offset_p = 8;
3175 if (size_p)
3176 *size_p = 14;
3177 }
3178
3179 /* Goto if stack[--sp] == TOP */
3180
3181 static void
3182 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3183 {
3184 EMIT_ASM ("ldu 4, 8(30) \n"
3185 "cmpd 7, 4, 3 \n"
3186 "ldu 3, 8(30) \n"
3187 "1:beq 7, 1b \n");
3188
3189 if (offset_p)
3190 *offset_p = 12;
3191 if (size_p)
3192 *size_p = 14;
3193 }
3194
3195 /* Goto if stack[--sp] != TOP */
3196
3197 static void
3198 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3199 {
3200 EMIT_ASM ("ldu 4, 8(30) \n"
3201 "cmpd 7, 4, 3 \n"
3202 "ldu 3, 8(30) \n"
3203 "1:bne 7, 1b \n");
3204
3205 if (offset_p)
3206 *offset_p = 12;
3207 if (size_p)
3208 *size_p = 14;
3209 }
3210
3211 /* Goto if stack[--sp] < TOP */
3212
3213 static void
3214 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3215 {
3216 EMIT_ASM ("ldu 4, 8(30) \n"
3217 "cmpd 7, 4, 3 \n"
3218 "ldu 3, 8(30) \n"
3219 "1:blt 7, 1b \n");
3220
3221 if (offset_p)
3222 *offset_p = 12;
3223 if (size_p)
3224 *size_p = 14;
3225 }
3226
3227 /* Goto if stack[--sp] <= TOP */
3228
3229 static void
3230 ppc64_emit_le_goto (int *offset_p, int *size_p)
3231 {
3232 EMIT_ASM ("ldu 4, 8(30) \n"
3233 "cmpd 7, 4, 3 \n"
3234 "ldu 3, 8(30) \n"
3235 "1:ble 7, 1b \n");
3236
3237 if (offset_p)
3238 *offset_p = 12;
3239 if (size_p)
3240 *size_p = 14;
3241 }
3242
3243 /* Goto if stack[--sp] > TOP */
3244
3245 static void
3246 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3247 {
3248 EMIT_ASM ("ldu 4, 8(30) \n"
3249 "cmpd 7, 4, 3 \n"
3250 "ldu 3, 8(30) \n"
3251 "1:bgt 7, 1b \n");
3252
3253 if (offset_p)
3254 *offset_p = 12;
3255 if (size_p)
3256 *size_p = 14;
3257 }
3258
3259 /* Goto if stack[--sp] >= TOP */
3260
3261 static void
3262 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3263 {
3264 EMIT_ASM ("ldu 4, 8(30) \n"
3265 "cmpd 7, 4, 3 \n"
3266 "ldu 3, 8(30) \n"
3267 "1:bge 7, 1b \n");
3268
3269 if (offset_p)
3270 *offset_p = 12;
3271 if (size_p)
3272 *size_p = 14;
3273 }
3274
3275 /* Table of emit ops for 64-bit ELFv1. */
3276
3277 static struct emit_ops ppc64v1_emit_ops_impl =
3278 {
3279 ppc64v1_emit_prologue,
3280 ppc64_emit_epilogue,
3281 ppc64_emit_add,
3282 ppc64_emit_sub,
3283 ppc64_emit_mul,
3284 ppc64_emit_lsh,
3285 ppc64_emit_rsh_signed,
3286 ppc64_emit_rsh_unsigned,
3287 ppc64_emit_ext,
3288 ppc64_emit_log_not,
3289 ppc64_emit_bit_and,
3290 ppc64_emit_bit_or,
3291 ppc64_emit_bit_xor,
3292 ppc64_emit_bit_not,
3293 ppc64_emit_equal,
3294 ppc64_emit_less_signed,
3295 ppc64_emit_less_unsigned,
3296 ppc64_emit_ref,
3297 ppc64_emit_if_goto,
3298 ppc_emit_goto,
3299 ppc_write_goto_address,
3300 ppc64_emit_const,
3301 ppc64v1_emit_call,
3302 ppc64v1_emit_reg,
3303 ppc64_emit_pop,
3304 ppc64_emit_stack_flush,
3305 ppc64_emit_zero_ext,
3306 ppc64_emit_swap,
3307 ppc_emit_stack_adjust,
3308 ppc64v1_emit_int_call_1,
3309 ppc64v1_emit_void_call_2,
3310 ppc64_emit_eq_goto,
3311 ppc64_emit_ne_goto,
3312 ppc64_emit_lt_goto,
3313 ppc64_emit_le_goto,
3314 ppc64_emit_gt_goto,
3315 ppc64_emit_ge_goto
3316 };
3317
3318 /* Table of emit ops for 64-bit ELFv2. */
3319
3320 static struct emit_ops ppc64v2_emit_ops_impl =
3321 {
3322 ppc64v2_emit_prologue,
3323 ppc64_emit_epilogue,
3324 ppc64_emit_add,
3325 ppc64_emit_sub,
3326 ppc64_emit_mul,
3327 ppc64_emit_lsh,
3328 ppc64_emit_rsh_signed,
3329 ppc64_emit_rsh_unsigned,
3330 ppc64_emit_ext,
3331 ppc64_emit_log_not,
3332 ppc64_emit_bit_and,
3333 ppc64_emit_bit_or,
3334 ppc64_emit_bit_xor,
3335 ppc64_emit_bit_not,
3336 ppc64_emit_equal,
3337 ppc64_emit_less_signed,
3338 ppc64_emit_less_unsigned,
3339 ppc64_emit_ref,
3340 ppc64_emit_if_goto,
3341 ppc_emit_goto,
3342 ppc_write_goto_address,
3343 ppc64_emit_const,
3344 ppc64v2_emit_call,
3345 ppc64v2_emit_reg,
3346 ppc64_emit_pop,
3347 ppc64_emit_stack_flush,
3348 ppc64_emit_zero_ext,
3349 ppc64_emit_swap,
3350 ppc_emit_stack_adjust,
3351 ppc64v2_emit_int_call_1,
3352 ppc64v2_emit_void_call_2,
3353 ppc64_emit_eq_goto,
3354 ppc64_emit_ne_goto,
3355 ppc64_emit_lt_goto,
3356 ppc64_emit_le_goto,
3357 ppc64_emit_gt_goto,
3358 ppc64_emit_ge_goto
3359 };
3360
3361 #endif
3362
3363 /* Implementation of linux_target_ops method "emit_ops". */
3364
3365 static struct emit_ops *
3366 ppc_emit_ops (void)
3367 {
3368 #ifdef __powerpc64__
3369 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3370
3371 if (register_size (regcache->tdesc, 0) == 8)
3372 {
3373 if (is_elfv2_inferior ())
3374 return &ppc64v2_emit_ops_impl;
3375 else
3376 return &ppc64v1_emit_ops_impl;
3377 }
3378 #endif
3379 return &ppc_emit_ops_impl;
3380 }
3381
3382 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3383
3384 static int
3385 ppc_get_ipa_tdesc_idx (void)
3386 {
3387 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3388 const struct target_desc *tdesc = regcache->tdesc;
3389
3390 #ifdef __powerpc64__
3391 if (tdesc == tdesc_powerpc_64l)
3392 return PPC_TDESC_BASE;
3393 if (tdesc == tdesc_powerpc_altivec64l)
3394 return PPC_TDESC_ALTIVEC;
3395 if (tdesc == tdesc_powerpc_vsx64l)
3396 return PPC_TDESC_VSX;
3397 if (tdesc == tdesc_powerpc_isa205_64l)
3398 return PPC_TDESC_ISA205;
3399 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3400 return PPC_TDESC_ISA205_ALTIVEC;
3401 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3402 return PPC_TDESC_ISA205_VSX;
3403 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3404 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3405 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3406 return PPC_TDESC_ISA207_VSX;
3407 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3408 return PPC_TDESC_ISA207_HTM_VSX;
3409 #endif
3410
3411 if (tdesc == tdesc_powerpc_32l)
3412 return PPC_TDESC_BASE;
3413 if (tdesc == tdesc_powerpc_altivec32l)
3414 return PPC_TDESC_ALTIVEC;
3415 if (tdesc == tdesc_powerpc_vsx32l)
3416 return PPC_TDESC_VSX;
3417 if (tdesc == tdesc_powerpc_isa205_32l)
3418 return PPC_TDESC_ISA205;
3419 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3420 return PPC_TDESC_ISA205_ALTIVEC;
3421 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3422 return PPC_TDESC_ISA205_VSX;
3423 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3424 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3425 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3426 return PPC_TDESC_ISA207_VSX;
3427 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3428 return PPC_TDESC_ISA207_HTM_VSX;
3429 if (tdesc == tdesc_powerpc_e500l)
3430 return PPC_TDESC_E500;
3431
3432 return 0;
3433 }
3434
3435 struct linux_target_ops the_low_target = {
3436 ppc_install_fast_tracepoint_jump_pad,
3437 ppc_emit_ops,
3438 ppc_get_min_fast_tracepoint_insn_len,
3439 NULL, /* supports_range_stepping */
3440 ppc_supports_hardware_single_step,
3441 NULL, /* get_syscall_trapinfo */
3442 ppc_get_ipa_tdesc_idx,
3443 };
3444
3445 /* The linux target ops object. */
3446
3447 linux_process_target *the_linux_target = &the_ppc_target;
3448
3449 void
3450 initialize_low_arch (void)
3451 {
3452 /* Initialize the Linux target descriptions. */
3453
3454 init_registers_powerpc_32l ();
3455 init_registers_powerpc_altivec32l ();
3456 init_registers_powerpc_vsx32l ();
3457 init_registers_powerpc_isa205_32l ();
3458 init_registers_powerpc_isa205_altivec32l ();
3459 init_registers_powerpc_isa205_vsx32l ();
3460 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3461 init_registers_powerpc_isa207_vsx32l ();
3462 init_registers_powerpc_isa207_htm_vsx32l ();
3463 init_registers_powerpc_e500l ();
3464 #if __powerpc64__
3465 init_registers_powerpc_64l ();
3466 init_registers_powerpc_altivec64l ();
3467 init_registers_powerpc_vsx64l ();
3468 init_registers_powerpc_isa205_64l ();
3469 init_registers_powerpc_isa205_altivec64l ();
3470 init_registers_powerpc_isa205_vsx64l ();
3471 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3472 init_registers_powerpc_isa207_vsx64l ();
3473 init_registers_powerpc_isa207_htm_vsx64l ();
3474 #endif
3475
3476 initialize_regsets_info (&ppc_regsets_info);
3477 }
This page took 0.097101 seconds and 5 git commands to generate.