gdbserver/linux-low: turn fast tracepoint ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 bool supports_z_point_type (char z_type) override;
58
59
60 void low_collect_ptrace_register (regcache *regcache, int regno,
61 char *buf) override;
62
63 void low_supply_ptrace_register (regcache *regcache, int regno,
64 const char *buf) override;
65
66 bool supports_tracepoints () override;
67
68 bool supports_fast_tracepoints () override;
69
70 int install_fast_tracepoint_jump_pad
71 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
72 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
73 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
74 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
75 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
76 char *err) override;
77
78 int get_min_fast_tracepoint_insn_len () override;
79
80 protected:
81
82 void low_arch_setup () override;
83
84 bool low_cannot_fetch_register (int regno) override;
85
86 bool low_cannot_store_register (int regno) override;
87
88 bool low_supports_breakpoints () override;
89
90 CORE_ADDR low_get_pc (regcache *regcache) override;
91
92 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
93
94 bool low_breakpoint_at (CORE_ADDR pc) override;
95
96 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
97 int size, raw_breakpoint *bp) override;
98
99 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
100 int size, raw_breakpoint *bp) override;
101
102 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
103 };
104
105 /* The singleton target ops object. */
106
107 static ppc_target the_ppc_target;
108
109 /* Holds the AT_HWCAP auxv entry. */
110
111 static unsigned long ppc_hwcap;
112
113 /* Holds the AT_HWCAP2 auxv entry. */
114
115 static unsigned long ppc_hwcap2;
116
117
118 #define ppc_num_regs 73
119
120 #ifdef __powerpc64__
121 /* We use a constant for FPSCR instead of PT_FPSCR, because
122 many shipped PPC64 kernels had the wrong value in ptrace.h. */
123 static int ppc_regmap[] =
124 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
125 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
126 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
127 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
128 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
129 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
130 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
131 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
132 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
133 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
134 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
135 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
136 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
137 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
138 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
139 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
140 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
141 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
142 PT_ORIG_R3 * 8, PT_TRAP * 8 };
143 #else
144 /* Currently, don't check/send MQ. */
145 static int ppc_regmap[] =
146 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
147 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
148 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
149 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
150 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
151 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
152 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
153 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
154 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
155 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
156 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
157 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
158 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
159 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
160 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
161 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
162 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
163 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
164 PT_ORIG_R3 * 4, PT_TRAP * 4
165 };
166
167 static int ppc_regmap_e500[] =
168 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
169 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
170 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
171 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
172 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
173 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
174 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
175 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
176 -1, -1, -1, -1,
177 -1, -1, -1, -1,
178 -1, -1, -1, -1,
179 -1, -1, -1, -1,
180 -1, -1, -1, -1,
181 -1, -1, -1, -1,
182 -1, -1, -1, -1,
183 -1, -1, -1, -1,
184 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
185 PT_CTR * 4, PT_XER * 4, -1,
186 PT_ORIG_R3 * 4, PT_TRAP * 4
187 };
188 #endif
189
190 /* Check whether the kernel provides a register set with number
191 REGSET_ID of size REGSETSIZE for process/thread TID. */
192
193 static int
194 ppc_check_regset (int tid, int regset_id, int regsetsize)
195 {
196 void *buf = alloca (regsetsize);
197 struct iovec iov;
198
199 iov.iov_base = buf;
200 iov.iov_len = regsetsize;
201
202 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
203 || errno == ENODATA)
204 return 1;
205 return 0;
206 }
207
208 bool
209 ppc_target::low_cannot_store_register (int regno)
210 {
211 const struct target_desc *tdesc = current_process ()->tdesc;
212
213 #ifndef __powerpc64__
214 /* Some kernels do not allow us to store fpscr. */
215 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
216 && regno == find_regno (tdesc, "fpscr"))
217 return true;
218 #endif
219
220 /* Some kernels do not allow us to store orig_r3 or trap. */
221 if (regno == find_regno (tdesc, "orig_r3")
222 || regno == find_regno (tdesc, "trap"))
223 return true;
224
225 return false;
226 }
227
228 bool
229 ppc_target::low_cannot_fetch_register (int regno)
230 {
231 return false;
232 }
233
234 void
235 ppc_target::low_collect_ptrace_register (regcache *regcache, int regno,
236 char *buf)
237 {
238 memset (buf, 0, sizeof (long));
239
240 if (__BYTE_ORDER == __LITTLE_ENDIAN)
241 {
242 /* Little-endian values always sit at the left end of the buffer. */
243 collect_register (regcache, regno, buf);
244 }
245 else if (__BYTE_ORDER == __BIG_ENDIAN)
246 {
247 /* Big-endian values sit at the right end of the buffer. In case of
248 registers whose sizes are smaller than sizeof (long), we must use a
249 padding to access them correctly. */
250 int size = register_size (regcache->tdesc, regno);
251
252 if (size < sizeof (long))
253 collect_register (regcache, regno, buf + sizeof (long) - size);
254 else
255 collect_register (regcache, regno, buf);
256 }
257 else
258 perror_with_name ("Unexpected byte order");
259 }
260
261 void
262 ppc_target::low_supply_ptrace_register (regcache *regcache, int regno,
263 const char *buf)
264 {
265 if (__BYTE_ORDER == __LITTLE_ENDIAN)
266 {
267 /* Little-endian values always sit at the left end of the buffer. */
268 supply_register (regcache, regno, buf);
269 }
270 else if (__BYTE_ORDER == __BIG_ENDIAN)
271 {
272 /* Big-endian values sit at the right end of the buffer. In case of
273 registers whose sizes are smaller than sizeof (long), we must use a
274 padding to access them correctly. */
275 int size = register_size (regcache->tdesc, regno);
276
277 if (size < sizeof (long))
278 supply_register (regcache, regno, buf + sizeof (long) - size);
279 else
280 supply_register (regcache, regno, buf);
281 }
282 else
283 perror_with_name ("Unexpected byte order");
284 }
285
286 bool
287 ppc_target::low_supports_breakpoints ()
288 {
289 return true;
290 }
291
292 CORE_ADDR
293 ppc_target::low_get_pc (regcache *regcache)
294 {
295 if (register_size (regcache->tdesc, 0) == 4)
296 {
297 unsigned int pc;
298 collect_register_by_name (regcache, "pc", &pc);
299 return (CORE_ADDR) pc;
300 }
301 else
302 {
303 unsigned long pc;
304 collect_register_by_name (regcache, "pc", &pc);
305 return (CORE_ADDR) pc;
306 }
307 }
308
309 void
310 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
311 {
312 if (register_size (regcache->tdesc, 0) == 4)
313 {
314 unsigned int newpc = pc;
315 supply_register_by_name (regcache, "pc", &newpc);
316 }
317 else
318 {
319 unsigned long newpc = pc;
320 supply_register_by_name (regcache, "pc", &newpc);
321 }
322 }
323
324 #ifndef __powerpc64__
325 static int ppc_regmap_adjusted;
326 #endif
327
328
329 /* Correct in either endianness.
330 This instruction is "twge r2, r2", which GDB uses as a software
331 breakpoint. */
332 static const unsigned int ppc_breakpoint = 0x7d821008;
333 #define ppc_breakpoint_len 4
334
335 /* Implementation of target ops method "sw_breakpoint_from_kind". */
336
337 const gdb_byte *
338 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
339 {
340 *size = ppc_breakpoint_len;
341 return (const gdb_byte *) &ppc_breakpoint;
342 }
343
344 bool
345 ppc_target::low_breakpoint_at (CORE_ADDR where)
346 {
347 unsigned int insn;
348
349 read_memory (where, (unsigned char *) &insn, 4);
350 if (insn == ppc_breakpoint)
351 return true;
352 /* If necessary, recognize more trap instructions here. GDB only uses
353 the one. */
354
355 return false;
356 }
357
358 /* Implement supports_z_point_type target-ops.
359 Returns true if type Z_TYPE breakpoint is supported.
360
361 Handling software breakpoint at server side, so tracepoints
362 and breakpoints can be inserted at the same location. */
363
364 bool
365 ppc_target::supports_z_point_type (char z_type)
366 {
367 switch (z_type)
368 {
369 case Z_PACKET_SW_BP:
370 return true;
371 case Z_PACKET_HW_BP:
372 case Z_PACKET_WRITE_WP:
373 case Z_PACKET_ACCESS_WP:
374 default:
375 return false;
376 }
377 }
378
379 /* Implement the low_insert_point linux target op.
380 Returns 0 on success, -1 on failure and 1 on unsupported. */
381
382 int
383 ppc_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
384 int size, raw_breakpoint *bp)
385 {
386 switch (type)
387 {
388 case raw_bkpt_type_sw:
389 return insert_memory_breakpoint (bp);
390
391 case raw_bkpt_type_hw:
392 case raw_bkpt_type_write_wp:
393 case raw_bkpt_type_access_wp:
394 default:
395 /* Unsupported. */
396 return 1;
397 }
398 }
399
400 /* Implement the low_remove_point linux target op.
401 Returns 0 on success, -1 on failure and 1 on unsupported. */
402
403 int
404 ppc_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
405 int size, raw_breakpoint *bp)
406 {
407 switch (type)
408 {
409 case raw_bkpt_type_sw:
410 return remove_memory_breakpoint (bp);
411
412 case raw_bkpt_type_hw:
413 case raw_bkpt_type_write_wp:
414 case raw_bkpt_type_access_wp:
415 default:
416 /* Unsupported. */
417 return 1;
418 }
419 }
420
421 /* Provide only a fill function for the general register set. ps_lgetregs
422 will use this for NPTL support. */
423
424 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
425 {
426 int i;
427
428 ppc_target *my_ppc_target = (ppc_target *) the_linux_target;
429
430 for (i = 0; i < 32; i++)
431 my_ppc_target->low_collect_ptrace_register (regcache, i,
432 (char *) buf + ppc_regmap[i]);
433
434 for (i = 64; i < 70; i++)
435 my_ppc_target->low_collect_ptrace_register (regcache, i,
436 (char *) buf + ppc_regmap[i]);
437
438 for (i = 71; i < 73; i++)
439 my_ppc_target->low_collect_ptrace_register (regcache, i,
440 (char *) buf + ppc_regmap[i]);
441 }
442
443 /* Program Priority Register regset fill function. */
444
445 static void
446 ppc_fill_pprregset (struct regcache *regcache, void *buf)
447 {
448 char *ppr = (char *) buf;
449
450 collect_register_by_name (regcache, "ppr", ppr);
451 }
452
453 /* Program Priority Register regset store function. */
454
455 static void
456 ppc_store_pprregset (struct regcache *regcache, const void *buf)
457 {
458 const char *ppr = (const char *) buf;
459
460 supply_register_by_name (regcache, "ppr", ppr);
461 }
462
463 /* Data Stream Control Register regset fill function. */
464
465 static void
466 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
467 {
468 char *dscr = (char *) buf;
469
470 collect_register_by_name (regcache, "dscr", dscr);
471 }
472
473 /* Data Stream Control Register regset store function. */
474
475 static void
476 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
477 {
478 const char *dscr = (const char *) buf;
479
480 supply_register_by_name (regcache, "dscr", dscr);
481 }
482
483 /* Target Address Register regset fill function. */
484
485 static void
486 ppc_fill_tarregset (struct regcache *regcache, void *buf)
487 {
488 char *tar = (char *) buf;
489
490 collect_register_by_name (regcache, "tar", tar);
491 }
492
493 /* Target Address Register regset store function. */
494
495 static void
496 ppc_store_tarregset (struct regcache *regcache, const void *buf)
497 {
498 const char *tar = (const char *) buf;
499
500 supply_register_by_name (regcache, "tar", tar);
501 }
502
503 /* Event-Based Branching regset store function. Unless the inferior
504 has a perf event open, ptrace can return in error when reading and
505 writing to the regset, with ENODATA. For reading, the registers
506 will correctly show as unavailable. For writing, gdbserver
507 currently only caches any register writes from P and G packets and
508 the stub always tries to write all the regsets when resuming the
509 inferior, which would result in frequent warnings. For this
510 reason, we don't define a fill function. This also means that the
511 client-side regcache will be dirty if the user tries to write to
512 the EBB registers. G packets that the client sends to write to
513 unrelated registers will also include data for EBB registers, even
514 if they are unavailable. */
515
516 static void
517 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
518 {
519 const char *regset = (const char *) buf;
520
521 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
522 .dat file is BESCR, EBBHR, EBBRR. */
523 supply_register_by_name (regcache, "ebbrr", &regset[0]);
524 supply_register_by_name (regcache, "ebbhr", &regset[8]);
525 supply_register_by_name (regcache, "bescr", &regset[16]);
526 }
527
528 /* Performance Monitoring Unit regset fill function. */
529
530 static void
531 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
532 {
533 char *regset = (char *) buf;
534
535 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
536 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
537 collect_register_by_name (regcache, "siar", &regset[0]);
538 collect_register_by_name (regcache, "sdar", &regset[8]);
539 collect_register_by_name (regcache, "sier", &regset[16]);
540 collect_register_by_name (regcache, "mmcr2", &regset[24]);
541 collect_register_by_name (regcache, "mmcr0", &regset[32]);
542 }
543
544 /* Performance Monitoring Unit regset store function. */
545
546 static void
547 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
548 {
549 const char *regset = (const char *) buf;
550
551 supply_register_by_name (regcache, "siar", &regset[0]);
552 supply_register_by_name (regcache, "sdar", &regset[8]);
553 supply_register_by_name (regcache, "sier", &regset[16]);
554 supply_register_by_name (regcache, "mmcr2", &regset[24]);
555 supply_register_by_name (regcache, "mmcr0", &regset[32]);
556 }
557
558 /* Hardware Transactional Memory special-purpose register regset fill
559 function. */
560
561 static void
562 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
563 {
564 int i, base;
565 char *regset = (char *) buf;
566
567 base = find_regno (regcache->tdesc, "tfhar");
568 for (i = 0; i < 3; i++)
569 collect_register (regcache, base + i, &regset[i * 8]);
570 }
571
572 /* Hardware Transactional Memory special-purpose register regset store
573 function. */
574
575 static void
576 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
577 {
578 int i, base;
579 const char *regset = (const char *) buf;
580
581 base = find_regno (regcache->tdesc, "tfhar");
582 for (i = 0; i < 3; i++)
583 supply_register (regcache, base + i, &regset[i * 8]);
584 }
585
586 /* For the same reasons as the EBB regset, none of the HTM
587 checkpointed regsets have a fill function. These registers are
588 only available if the inferior is in a transaction. */
589
590 /* Hardware Transactional Memory checkpointed general-purpose regset
591 store function. */
592
593 static void
594 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
595 {
596 int i, base, size, endian_offset;
597 const char *regset = (const char *) buf;
598
599 base = find_regno (regcache->tdesc, "cr0");
600 size = register_size (regcache->tdesc, base);
601
602 gdb_assert (size == 4 || size == 8);
603
604 for (i = 0; i < 32; i++)
605 supply_register (regcache, base + i, &regset[i * size]);
606
607 endian_offset = 0;
608
609 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
610 endian_offset = 4;
611
612 supply_register_by_name (regcache, "ccr",
613 &regset[PT_CCR * size + endian_offset]);
614
615 supply_register_by_name (regcache, "cxer",
616 &regset[PT_XER * size + endian_offset]);
617
618 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
619 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
620 }
621
622 /* Hardware Transactional Memory checkpointed floating-point regset
623 store function. */
624
625 static void
626 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
627 {
628 int i, base;
629 const char *regset = (const char *) buf;
630
631 base = find_regno (regcache->tdesc, "cf0");
632
633 for (i = 0; i < 32; i++)
634 supply_register (regcache, base + i, &regset[i * 8]);
635
636 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
637 }
638
639 /* Hardware Transactional Memory checkpointed vector regset store
640 function. */
641
642 static void
643 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
644 {
645 int i, base;
646 const char *regset = (const char *) buf;
647 int vscr_offset = 0;
648
649 base = find_regno (regcache->tdesc, "cvr0");
650
651 for (i = 0; i < 32; i++)
652 supply_register (regcache, base + i, &regset[i * 16]);
653
654 if (__BYTE_ORDER == __BIG_ENDIAN)
655 vscr_offset = 12;
656
657 supply_register_by_name (regcache, "cvscr",
658 &regset[32 * 16 + vscr_offset]);
659
660 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
661 }
662
663 /* Hardware Transactional Memory checkpointed vector-scalar regset
664 store function. */
665
666 static void
667 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
668 {
669 int i, base;
670 const char *regset = (const char *) buf;
671
672 base = find_regno (regcache->tdesc, "cvs0h");
673 for (i = 0; i < 32; i++)
674 supply_register (regcache, base + i, &regset[i * 8]);
675 }
676
677 /* Hardware Transactional Memory checkpointed Program Priority
678 Register regset store function. */
679
680 static void
681 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
682 {
683 const char *cppr = (const char *) buf;
684
685 supply_register_by_name (regcache, "cppr", cppr);
686 }
687
688 /* Hardware Transactional Memory checkpointed Data Stream Control
689 Register regset store function. */
690
691 static void
692 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
693 {
694 const char *cdscr = (const char *) buf;
695
696 supply_register_by_name (regcache, "cdscr", cdscr);
697 }
698
699 /* Hardware Transactional Memory checkpointed Target Address Register
700 regset store function. */
701
702 static void
703 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
704 {
705 const char *ctar = (const char *) buf;
706
707 supply_register_by_name (regcache, "ctar", ctar);
708 }
709
710 static void
711 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
712 {
713 int i, base;
714 char *regset = (char *) buf;
715
716 base = find_regno (regcache->tdesc, "vs0h");
717 for (i = 0; i < 32; i++)
718 collect_register (regcache, base + i, &regset[i * 8]);
719 }
720
721 static void
722 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
723 {
724 int i, base;
725 const char *regset = (const char *) buf;
726
727 base = find_regno (regcache->tdesc, "vs0h");
728 for (i = 0; i < 32; i++)
729 supply_register (regcache, base + i, &regset[i * 8]);
730 }
731
732 static void
733 ppc_fill_vrregset (struct regcache *regcache, void *buf)
734 {
735 int i, base;
736 char *regset = (char *) buf;
737 int vscr_offset = 0;
738
739 base = find_regno (regcache->tdesc, "vr0");
740 for (i = 0; i < 32; i++)
741 collect_register (regcache, base + i, &regset[i * 16]);
742
743 if (__BYTE_ORDER == __BIG_ENDIAN)
744 vscr_offset = 12;
745
746 collect_register_by_name (regcache, "vscr",
747 &regset[32 * 16 + vscr_offset]);
748
749 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
750 }
751
752 static void
753 ppc_store_vrregset (struct regcache *regcache, const void *buf)
754 {
755 int i, base;
756 const char *regset = (const char *) buf;
757 int vscr_offset = 0;
758
759 base = find_regno (regcache->tdesc, "vr0");
760 for (i = 0; i < 32; i++)
761 supply_register (regcache, base + i, &regset[i * 16]);
762
763 if (__BYTE_ORDER == __BIG_ENDIAN)
764 vscr_offset = 12;
765
766 supply_register_by_name (regcache, "vscr",
767 &regset[32 * 16 + vscr_offset]);
768 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
769 }
770
771 struct gdb_evrregset_t
772 {
773 unsigned long evr[32];
774 unsigned long long acc;
775 unsigned long spefscr;
776 };
777
778 static void
779 ppc_fill_evrregset (struct regcache *regcache, void *buf)
780 {
781 int i, ev0;
782 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
783
784 ev0 = find_regno (regcache->tdesc, "ev0h");
785 for (i = 0; i < 32; i++)
786 collect_register (regcache, ev0 + i, &regset->evr[i]);
787
788 collect_register_by_name (regcache, "acc", &regset->acc);
789 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
790 }
791
792 static void
793 ppc_store_evrregset (struct regcache *regcache, const void *buf)
794 {
795 int i, ev0;
796 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
797
798 ev0 = find_regno (regcache->tdesc, "ev0h");
799 for (i = 0; i < 32; i++)
800 supply_register (regcache, ev0 + i, &regset->evr[i]);
801
802 supply_register_by_name (regcache, "acc", &regset->acc);
803 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
804 }
805
806 /* Support for hardware single step. */
807
808 static int
809 ppc_supports_hardware_single_step (void)
810 {
811 return 1;
812 }
813
814 static struct regset_info ppc_regsets[] = {
815 /* List the extra register sets before GENERAL_REGS. That way we will
816 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
817 general registers. Some kernels support these, but not the newer
818 PPC_PTRACE_GETREGS. */
819 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
820 NULL, ppc_store_tm_ctarregset },
821 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
822 NULL, ppc_store_tm_cdscrregset },
823 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
824 NULL, ppc_store_tm_cpprregset },
825 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
826 NULL, ppc_store_tm_cvsxregset },
827 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
828 NULL, ppc_store_tm_cvrregset },
829 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
830 NULL, ppc_store_tm_cfprregset },
831 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
832 NULL, ppc_store_tm_cgprregset },
833 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
834 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
835 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
836 NULL, ppc_store_ebbregset },
837 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
838 ppc_fill_pmuregset, ppc_store_pmuregset },
839 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
840 ppc_fill_tarregset, ppc_store_tarregset },
841 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
842 ppc_fill_pprregset, ppc_store_pprregset },
843 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
844 ppc_fill_dscrregset, ppc_store_dscrregset },
845 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
846 ppc_fill_vsxregset, ppc_store_vsxregset },
847 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
848 ppc_fill_vrregset, ppc_store_vrregset },
849 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
850 ppc_fill_evrregset, ppc_store_evrregset },
851 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
852 NULL_REGSET
853 };
854
855 static struct usrregs_info ppc_usrregs_info =
856 {
857 ppc_num_regs,
858 ppc_regmap,
859 };
860
861 static struct regsets_info ppc_regsets_info =
862 {
863 ppc_regsets, /* regsets */
864 0, /* num_regsets */
865 NULL, /* disabled_regsets */
866 };
867
868 static struct regs_info myregs_info =
869 {
870 NULL, /* regset_bitmap */
871 &ppc_usrregs_info,
872 &ppc_regsets_info
873 };
874
875 const regs_info *
876 ppc_target::get_regs_info ()
877 {
878 return &myregs_info;
879 }
880
881 void
882 ppc_target::low_arch_setup ()
883 {
884 const struct target_desc *tdesc;
885 struct regset_info *regset;
886 struct ppc_linux_features features = ppc_linux_no_features;
887
888 int tid = lwpid_of (current_thread);
889
890 features.wordsize = ppc_linux_target_wordsize (tid);
891
892 if (features.wordsize == 4)
893 tdesc = tdesc_powerpc_32l;
894 else
895 tdesc = tdesc_powerpc_64l;
896
897 current_process ()->tdesc = tdesc;
898
899 /* The value of current_process ()->tdesc needs to be set for this
900 call. */
901 ppc_hwcap = linux_get_hwcap (features.wordsize);
902 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
903
904 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
905
906 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
907 features.vsx = true;
908
909 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
910 features.altivec = true;
911
912 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
913 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
914 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
915 {
916 features.ppr_dscr = true;
917 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
918 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
919 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
920 && ppc_check_regset (tid, NT_PPC_TAR,
921 PPC_LINUX_SIZEOF_TARREGSET)
922 && ppc_check_regset (tid, NT_PPC_EBB,
923 PPC_LINUX_SIZEOF_EBBREGSET)
924 && ppc_check_regset (tid, NT_PPC_PMU,
925 PPC_LINUX_SIZEOF_PMUREGSET))
926 {
927 features.isa207 = true;
928 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
929 && ppc_check_regset (tid, NT_PPC_TM_SPR,
930 PPC_LINUX_SIZEOF_TM_SPRREGSET))
931 features.htm = true;
932 }
933 }
934
935 tdesc = ppc_linux_match_description (features);
936
937 /* On 32-bit machines, check for SPE registers.
938 Set the low target's regmap field as appropriately. */
939 #ifndef __powerpc64__
940 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
941 tdesc = tdesc_powerpc_e500l;
942
943 if (!ppc_regmap_adjusted)
944 {
945 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
946 ppc_usrregs_info.regmap = ppc_regmap_e500;
947
948 /* If the FPSCR is 64-bit wide, we need to fetch the whole
949 64-bit slot and not just its second word. The PT_FPSCR
950 supplied in a 32-bit GDB compilation doesn't reflect
951 this. */
952 if (register_size (tdesc, 70) == 8)
953 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
954
955 ppc_regmap_adjusted = 1;
956 }
957 #endif
958
959 current_process ()->tdesc = tdesc;
960
961 for (regset = ppc_regsets; regset->size >= 0; regset++)
962 switch (regset->get_request)
963 {
964 case PTRACE_GETVRREGS:
965 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
966 break;
967 case PTRACE_GETVSXREGS:
968 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
969 break;
970 case PTRACE_GETEVRREGS:
971 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
972 regset->size = 32 * 4 + 8 + 4;
973 else
974 regset->size = 0;
975 break;
976 case PTRACE_GETREGSET:
977 switch (regset->nt_type)
978 {
979 case NT_PPC_PPR:
980 regset->size = (features.ppr_dscr ?
981 PPC_LINUX_SIZEOF_PPRREGSET : 0);
982 break;
983 case NT_PPC_DSCR:
984 regset->size = (features.ppr_dscr ?
985 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
986 break;
987 case NT_PPC_TAR:
988 regset->size = (features.isa207 ?
989 PPC_LINUX_SIZEOF_TARREGSET : 0);
990 break;
991 case NT_PPC_EBB:
992 regset->size = (features.isa207 ?
993 PPC_LINUX_SIZEOF_EBBREGSET : 0);
994 break;
995 case NT_PPC_PMU:
996 regset->size = (features.isa207 ?
997 PPC_LINUX_SIZEOF_PMUREGSET : 0);
998 break;
999 case NT_PPC_TM_SPR:
1000 regset->size = (features.htm ?
1001 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
1002 break;
1003 case NT_PPC_TM_CGPR:
1004 if (features.wordsize == 4)
1005 regset->size = (features.htm ?
1006 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
1007 else
1008 regset->size = (features.htm ?
1009 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
1010 break;
1011 case NT_PPC_TM_CFPR:
1012 regset->size = (features.htm ?
1013 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
1014 break;
1015 case NT_PPC_TM_CVMX:
1016 regset->size = (features.htm ?
1017 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1018 break;
1019 case NT_PPC_TM_CVSX:
1020 regset->size = (features.htm ?
1021 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1022 break;
1023 case NT_PPC_TM_CPPR:
1024 regset->size = (features.htm ?
1025 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1026 break;
1027 case NT_PPC_TM_CDSCR:
1028 regset->size = (features.htm ?
1029 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1030 break;
1031 case NT_PPC_TM_CTAR:
1032 regset->size = (features.htm ?
1033 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1034 break;
1035 default:
1036 break;
1037 }
1038 break;
1039 default:
1040 break;
1041 }
1042 }
1043
1044 /* Implementation of target ops method "supports_tracepoints". */
1045
1046 bool
1047 ppc_target::supports_tracepoints ()
1048 {
1049 return true;
1050 }
1051
1052 /* Get the thread area address. This is used to recognize which
1053 thread is which when tracing with the in-process agent library. We
1054 don't read anything from the address, and treat it as opaque; it's
1055 the address itself that we assume is unique per-thread. */
1056
1057 int
1058 ppc_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
1059 {
1060 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1061 struct thread_info *thr = get_lwp_thread (lwp);
1062 struct regcache *regcache = get_thread_regcache (thr, 1);
1063 ULONGEST tp = 0;
1064
1065 #ifdef __powerpc64__
1066 if (register_size (regcache->tdesc, 0) == 8)
1067 collect_register_by_name (regcache, "r13", &tp);
1068 else
1069 #endif
1070 collect_register_by_name (regcache, "r2", &tp);
1071
1072 *addr = tp;
1073
1074 return 0;
1075 }
1076
1077 #ifdef __powerpc64__
1078
1079 /* Older glibc doesn't provide this. */
1080
1081 #ifndef EF_PPC64_ABI
1082 #define EF_PPC64_ABI 3
1083 #endif
1084
1085 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1086 inferiors. */
1087
1088 static int
1089 is_elfv2_inferior (void)
1090 {
1091 /* To be used as fallback if we're unable to determine the right result -
1092 assume inferior uses the same ABI as gdbserver. */
1093 #if _CALL_ELF == 2
1094 const int def_res = 1;
1095 #else
1096 const int def_res = 0;
1097 #endif
1098 CORE_ADDR phdr;
1099 Elf64_Ehdr ehdr;
1100
1101 const struct target_desc *tdesc = current_process ()->tdesc;
1102 int wordsize = register_size (tdesc, 0);
1103
1104 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1105 return def_res;
1106
1107 /* Assume ELF header is at the beginning of the page where program headers
1108 are located. If it doesn't look like one, bail. */
1109
1110 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1111 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1112 return def_res;
1113
1114 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1115 }
1116
1117 #endif
1118
1119 /* Generate a ds-form instruction in BUF and return the number of bytes written
1120
1121 0 6 11 16 30 32
1122 | OPCD | RST | RA | DS |XO| */
1123
1124 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1125 static int
1126 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1127 {
1128 uint32_t insn;
1129
1130 gdb_assert ((opcd & ~0x3f) == 0);
1131 gdb_assert ((rst & ~0x1f) == 0);
1132 gdb_assert ((ra & ~0x1f) == 0);
1133 gdb_assert ((xo & ~0x3) == 0);
1134
1135 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1136 *buf = (opcd << 26) | insn;
1137 return 1;
1138 }
1139
1140 /* Followings are frequently used ds-form instructions. */
1141
1142 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1143 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1144 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1145 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1146
1147 /* Generate a d-form instruction in BUF.
1148
1149 0 6 11 16 32
1150 | OPCD | RST | RA | D | */
1151
1152 static int
1153 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1154 {
1155 uint32_t insn;
1156
1157 gdb_assert ((opcd & ~0x3f) == 0);
1158 gdb_assert ((rst & ~0x1f) == 0);
1159 gdb_assert ((ra & ~0x1f) == 0);
1160
1161 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1162 *buf = (opcd << 26) | insn;
1163 return 1;
1164 }
1165
1166 /* Followings are frequently used d-form instructions. */
1167
1168 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1169 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1170 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1171 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1172 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1173 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1174 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1175 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1176 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1177
1178 /* Generate a xfx-form instruction in BUF and return the number of bytes
1179 written.
1180
1181 0 6 11 21 31 32
1182 | OPCD | RST | RI | XO |/| */
1183
1184 static int
1185 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1186 {
1187 uint32_t insn;
1188 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1189
1190 gdb_assert ((opcd & ~0x3f) == 0);
1191 gdb_assert ((rst & ~0x1f) == 0);
1192 gdb_assert ((xo & ~0x3ff) == 0);
1193
1194 insn = (rst << 21) | (n << 11) | (xo << 1);
1195 *buf = (opcd << 26) | insn;
1196 return 1;
1197 }
1198
1199 /* Followings are frequently used xfx-form instructions. */
1200
1201 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1202 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1203 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1204 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1205 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1206 E & 0xf, 598)
1207 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1208
1209
1210 /* Generate a x-form instruction in BUF and return the number of bytes written.
1211
1212 0 6 11 16 21 31 32
1213 | OPCD | RST | RA | RB | XO |RC| */
1214
1215 static int
1216 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1217 {
1218 uint32_t insn;
1219
1220 gdb_assert ((opcd & ~0x3f) == 0);
1221 gdb_assert ((rst & ~0x1f) == 0);
1222 gdb_assert ((ra & ~0x1f) == 0);
1223 gdb_assert ((rb & ~0x1f) == 0);
1224 gdb_assert ((xo & ~0x3ff) == 0);
1225 gdb_assert ((rc & ~1) == 0);
1226
1227 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1228 *buf = (opcd << 26) | insn;
1229 return 1;
1230 }
1231
1232 /* Followings are frequently used x-form instructions. */
1233
1234 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1235 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1236 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1237 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1238 /* Assume bf = cr7. */
1239 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1240
1241
1242 /* Generate a md-form instruction in BUF and return the number of bytes written.
1243
1244 0 6 11 16 21 27 30 31 32
1245 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1246
1247 static int
1248 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1249 int xo, int rc)
1250 {
1251 uint32_t insn;
1252 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1253 unsigned int sh0_4 = sh & 0x1f;
1254 unsigned int sh5 = (sh >> 5) & 1;
1255
1256 gdb_assert ((opcd & ~0x3f) == 0);
1257 gdb_assert ((rs & ~0x1f) == 0);
1258 gdb_assert ((ra & ~0x1f) == 0);
1259 gdb_assert ((sh & ~0x3f) == 0);
1260 gdb_assert ((mb & ~0x3f) == 0);
1261 gdb_assert ((xo & ~0x7) == 0);
1262 gdb_assert ((rc & ~0x1) == 0);
1263
1264 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1265 | (sh5 << 1) | (xo << 2) | (rc & 1);
1266 *buf = (opcd << 26) | insn;
1267 return 1;
1268 }
1269
1270 /* The following are frequently used md-form instructions. */
1271
1272 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1273 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1274 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1275 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1276
1277 /* Generate a i-form instruction in BUF and return the number of bytes written.
1278
1279 0 6 30 31 32
1280 | OPCD | LI |AA|LK| */
1281
1282 static int
1283 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1284 {
1285 uint32_t insn;
1286
1287 gdb_assert ((opcd & ~0x3f) == 0);
1288
1289 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1290 *buf = (opcd << 26) | insn;
1291 return 1;
1292 }
1293
1294 /* The following are frequently used i-form instructions. */
1295
1296 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1297 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1298
1299 /* Generate a b-form instruction in BUF and return the number of bytes written.
1300
1301 0 6 11 16 30 31 32
1302 | OPCD | BO | BI | BD |AA|LK| */
1303
1304 static int
1305 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1306 int aa, int lk)
1307 {
1308 uint32_t insn;
1309
1310 gdb_assert ((opcd & ~0x3f) == 0);
1311 gdb_assert ((bo & ~0x1f) == 0);
1312 gdb_assert ((bi & ~0x1f) == 0);
1313
1314 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1315 *buf = (opcd << 26) | insn;
1316 return 1;
1317 }
1318
1319 /* The following are frequently used b-form instructions. */
1320 /* Assume bi = cr7. */
1321 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1322
1323 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1324 respectively. They are primary used for save/restore GPRs in jump-pad,
1325 not used for bytecode compiling. */
1326
1327 #ifdef __powerpc64__
1328 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1329 GEN_LD (buf, rt, ra, si) : \
1330 GEN_LWZ (buf, rt, ra, si))
1331 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1332 GEN_STD (buf, rt, ra, si) : \
1333 GEN_STW (buf, rt, ra, si))
1334 #else
1335 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1336 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1337 #endif
1338
1339 /* Generate a sequence of instructions to load IMM in the register REG.
1340 Write the instructions in BUF and return the number of bytes written. */
1341
1342 static int
1343 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1344 {
1345 uint32_t *p = buf;
1346
1347 if ((imm + 32768) < 65536)
1348 {
1349 /* li reg, imm[15:0] */
1350 p += GEN_LI (p, reg, imm);
1351 }
1352 else if ((imm >> 32) == 0)
1353 {
1354 /* lis reg, imm[31:16]
1355 ori reg, reg, imm[15:0]
1356 rldicl reg, reg, 0, 32 */
1357 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1358 if ((imm & 0xffff) != 0)
1359 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1360 /* Clear upper 32-bit if sign-bit is set. */
1361 if (imm & (1u << 31) && is_64)
1362 p += GEN_RLDICL (p, reg, reg, 0, 32);
1363 }
1364 else
1365 {
1366 gdb_assert (is_64);
1367 /* lis reg, <imm[63:48]>
1368 ori reg, reg, <imm[48:32]>
1369 rldicr reg, reg, 32, 31
1370 oris reg, reg, <imm[31:16]>
1371 ori reg, reg, <imm[15:0]> */
1372 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1373 if (((imm >> 32) & 0xffff) != 0)
1374 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1375 p += GEN_RLDICR (p, reg, reg, 32, 31);
1376 if (((imm >> 16) & 0xffff) != 0)
1377 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1378 if ((imm & 0xffff) != 0)
1379 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1380 }
1381
1382 return p - buf;
1383 }
1384
1385 /* Generate a sequence for atomically exchange at location LOCK.
1386 This code sequence clobbers r6, r7, r8. LOCK is the location for
1387 the atomic-xchg, OLD_VALUE is expected old value stored in the
1388 location, and R_NEW is a register for the new value. */
1389
1390 static int
1391 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1392 int is_64)
1393 {
1394 const int r_lock = 6;
1395 const int r_old = 7;
1396 const int r_tmp = 8;
1397 uint32_t *p = buf;
1398
1399 /*
1400 1: lwarx TMP, 0, LOCK
1401 cmpwi TMP, OLD
1402 bne 1b
1403 stwcx. NEW, 0, LOCK
1404 bne 1b */
1405
1406 p += gen_limm (p, r_lock, lock, is_64);
1407 p += gen_limm (p, r_old, old_value, is_64);
1408
1409 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1410 p += GEN_CMPW (p, r_tmp, r_old);
1411 p += GEN_BNE (p, -8);
1412 p += GEN_STWCX (p, r_new, 0, r_lock);
1413 p += GEN_BNE (p, -16);
1414
1415 return p - buf;
1416 }
1417
1418 /* Generate a sequence of instructions for calling a function
1419 at address of FN. Return the number of bytes are written in BUF. */
1420
1421 static int
1422 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1423 {
1424 uint32_t *p = buf;
1425
1426 /* Must be called by r12 for caller to calculate TOC address. */
1427 p += gen_limm (p, 12, fn, is_64);
1428 if (is_opd)
1429 {
1430 p += GEN_LOAD (p, 11, 12, 16, is_64);
1431 p += GEN_LOAD (p, 2, 12, 8, is_64);
1432 p += GEN_LOAD (p, 12, 12, 0, is_64);
1433 }
1434 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1435 *p++ = 0x4e800421; /* bctrl */
1436
1437 return p - buf;
1438 }
1439
1440 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1441 of instruction. This function is used to adjust pc-relative instructions
1442 when copying. */
1443
1444 static void
1445 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1446 {
1447 uint32_t insn, op6;
1448 long rel, newrel;
1449
1450 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1451 op6 = PPC_OP6 (insn);
1452
1453 if (op6 == 18 && (insn & 2) == 0)
1454 {
1455 /* branch && AA = 0 */
1456 rel = PPC_LI (insn);
1457 newrel = (oldloc - *to) + rel;
1458
1459 /* Out of range. Cannot relocate instruction. */
1460 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1461 return;
1462
1463 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1464 }
1465 else if (op6 == 16 && (insn & 2) == 0)
1466 {
1467 /* conditional branch && AA = 0 */
1468
1469 /* If the new relocation is too big for even a 26-bit unconditional
1470 branch, there is nothing we can do. Just abort.
1471
1472 Otherwise, if it can be fit in 16-bit conditional branch, just
1473 copy the instruction and relocate the address.
1474
1475 If the it's big for conditional-branch (16-bit), try to invert the
1476 condition and jump with 26-bit branch. For example,
1477
1478 beq .Lgoto
1479 INSN1
1480
1481 =>
1482
1483 bne 1f (+8)
1484 b .Lgoto
1485 1:INSN1
1486
1487 After this transform, we are actually jump from *TO+4 instead of *TO,
1488 so check the relocation again because it will be 1-insn farther then
1489 before if *TO is after OLDLOC.
1490
1491
1492 For BDNZT (or so) is transformed from
1493
1494 bdnzt eq, .Lgoto
1495 INSN1
1496
1497 =>
1498
1499 bdz 1f (+12)
1500 bf eq, 1f (+8)
1501 b .Lgoto
1502 1:INSN1
1503
1504 See also "BO field encodings". */
1505
1506 rel = PPC_BD (insn);
1507 newrel = (oldloc - *to) + rel;
1508
1509 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1510 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1511 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1512 {
1513 newrel -= 4;
1514
1515 /* Out of range. Cannot relocate instruction. */
1516 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1517 return;
1518
1519 if ((PPC_BO (insn) & 0x14) == 0x4)
1520 insn ^= (1 << 24);
1521 else if ((PPC_BO (insn) & 0x14) == 0x10)
1522 insn ^= (1 << 22);
1523
1524 /* Jump over the unconditional branch. */
1525 insn = (insn & ~0xfffc) | 0x8;
1526 target_write_memory (*to, (unsigned char *) &insn, 4);
1527 *to += 4;
1528
1529 /* Build a unconditional branch and copy LK bit. */
1530 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1531 target_write_memory (*to, (unsigned char *) &insn, 4);
1532 *to += 4;
1533
1534 return;
1535 }
1536 else if ((PPC_BO (insn) & 0x14) == 0)
1537 {
1538 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1539 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1540
1541 newrel -= 8;
1542
1543 /* Out of range. Cannot relocate instruction. */
1544 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1545 return;
1546
1547 /* Copy BI field. */
1548 bf_insn |= (insn & 0x1f0000);
1549
1550 /* Invert condition. */
1551 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1552 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1553
1554 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1555 *to += 4;
1556 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1557 *to += 4;
1558
1559 /* Build a unconditional branch and copy LK bit. */
1560 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1561 target_write_memory (*to, (unsigned char *) &insn, 4);
1562 *to += 4;
1563
1564 return;
1565 }
1566 else /* (BO & 0x14) == 0x14, branch always. */
1567 {
1568 /* Out of range. Cannot relocate instruction. */
1569 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1570 return;
1571
1572 /* Build a unconditional branch and copy LK bit. */
1573 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1574 target_write_memory (*to, (unsigned char *) &insn, 4);
1575 *to += 4;
1576
1577 return;
1578 }
1579 }
1580
1581 target_write_memory (*to, (unsigned char *) &insn, 4);
1582 *to += 4;
1583 }
1584
1585 bool
1586 ppc_target::supports_fast_tracepoints ()
1587 {
1588 return true;
1589 }
1590
1591 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1592 See target.h for details. */
1593
1594 int
1595 ppc_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1596 CORE_ADDR tpaddr,
1597 CORE_ADDR collector,
1598 CORE_ADDR lockaddr,
1599 ULONGEST orig_size,
1600 CORE_ADDR *jump_entry,
1601 CORE_ADDR *trampoline,
1602 ULONGEST *trampoline_size,
1603 unsigned char *jjump_pad_insn,
1604 ULONGEST *jjump_pad_insn_size,
1605 CORE_ADDR *adjusted_insn_addr,
1606 CORE_ADDR *adjusted_insn_addr_end,
1607 char *err)
1608 {
1609 uint32_t buf[256];
1610 uint32_t *p = buf;
1611 int j, offset;
1612 CORE_ADDR buildaddr = *jump_entry;
1613 const CORE_ADDR entryaddr = *jump_entry;
1614 int rsz, min_frame, frame_size, tp_reg;
1615 #ifdef __powerpc64__
1616 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1617 int is_64 = register_size (regcache->tdesc, 0) == 8;
1618 int is_opd = is_64 && !is_elfv2_inferior ();
1619 #else
1620 int is_64 = 0, is_opd = 0;
1621 #endif
1622
1623 #ifdef __powerpc64__
1624 if (is_64)
1625 {
1626 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1627 rsz = 8;
1628 min_frame = 112;
1629 frame_size = (40 * rsz) + min_frame;
1630 tp_reg = 13;
1631 }
1632 else
1633 {
1634 #endif
1635 rsz = 4;
1636 min_frame = 16;
1637 frame_size = (40 * rsz) + min_frame;
1638 tp_reg = 2;
1639 #ifdef __powerpc64__
1640 }
1641 #endif
1642
1643 /* Stack frame layout for this jump pad,
1644
1645 High thread_area (r13/r2) |
1646 tpoint - collecting_t obj
1647 PC/<tpaddr> | +36
1648 CTR | +35
1649 LR | +34
1650 XER | +33
1651 CR | +32
1652 R31 |
1653 R29 |
1654 ... |
1655 R1 | +1
1656 R0 - collected registers
1657 ... |
1658 ... |
1659 Low Back-chain -
1660
1661
1662 The code flow of this jump pad,
1663
1664 1. Adjust SP
1665 2. Save GPR and SPR
1666 3. Prepare argument
1667 4. Call gdb_collector
1668 5. Restore GPR and SPR
1669 6. Restore SP
1670 7. Build a jump for back to the program
1671 8. Copy/relocate original instruction
1672 9. Build a jump for replacing original instruction. */
1673
1674 /* Adjust stack pointer. */
1675 if (is_64)
1676 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1677 else
1678 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1679
1680 /* Store GPRs. Save R1 later, because it had just been modified, but
1681 we want the original value. */
1682 for (j = 2; j < 32; j++)
1683 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1684 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1685 /* Set r0 to the original value of r1 before adjusting stack frame,
1686 and then save it. */
1687 p += GEN_ADDI (p, 0, 1, frame_size);
1688 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1689
1690 /* Save CR, XER, LR, and CTR. */
1691 p += GEN_MFCR (p, 3); /* mfcr r3 */
1692 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1693 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1694 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1695 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1696 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1697 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1698 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1699
1700 /* Save PC<tpaddr> */
1701 p += gen_limm (p, 3, tpaddr, is_64);
1702 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1703
1704
1705 /* Setup arguments to collector. */
1706 /* Set r4 to collected registers. */
1707 p += GEN_ADDI (p, 4, 1, min_frame);
1708 /* Set r3 to TPOINT. */
1709 p += gen_limm (p, 3, tpoint, is_64);
1710
1711 /* Prepare collecting_t object for lock. */
1712 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1713 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1714 /* Set R5 to collecting object. */
1715 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1716
1717 p += GEN_LWSYNC (p);
1718 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1719 p += GEN_LWSYNC (p);
1720
1721 /* Call to collector. */
1722 p += gen_call (p, collector, is_64, is_opd);
1723
1724 /* Simply write 0 to release the lock. */
1725 p += gen_limm (p, 3, lockaddr, is_64);
1726 p += gen_limm (p, 4, 0, is_64);
1727 p += GEN_LWSYNC (p);
1728 p += GEN_STORE (p, 4, 3, 0, is_64);
1729
1730 /* Restore stack and registers. */
1731 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1732 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1733 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1734 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1735 p += GEN_MTCR (p, 3); /* mtcr r3 */
1736 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1737 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1738 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1739
1740 /* Restore GPRs. */
1741 for (j = 2; j < 32; j++)
1742 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1743 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1744 /* Restore SP. */
1745 p += GEN_ADDI (p, 1, 1, frame_size);
1746
1747 /* Flush instructions to inferior memory. */
1748 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1749
1750 /* Now, insert the original instruction to execute in the jump pad. */
1751 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1752 *adjusted_insn_addr_end = *adjusted_insn_addr;
1753 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1754
1755 /* Verify the relocation size. If should be 4 for normal copy,
1756 8 or 12 for some conditional branch. */
1757 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1758 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1759 {
1760 sprintf (err, "E.Unexpected instruction length = %d"
1761 "when relocate instruction.",
1762 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1763 return 1;
1764 }
1765
1766 buildaddr = *adjusted_insn_addr_end;
1767 p = buf;
1768 /* Finally, write a jump back to the program. */
1769 offset = (tpaddr + 4) - buildaddr;
1770 if (offset >= (1 << 25) || offset < -(1 << 25))
1771 {
1772 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1773 "(offset 0x%x > 26-bit).", offset);
1774 return 1;
1775 }
1776 /* b <tpaddr+4> */
1777 p += GEN_B (p, offset);
1778 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1779 *jump_entry = buildaddr + (p - buf) * 4;
1780
1781 /* The jump pad is now built. Wire in a jump to our jump pad. This
1782 is always done last (by our caller actually), so that we can
1783 install fast tracepoints with threads running. This relies on
1784 the agent's atomic write support. */
1785 offset = entryaddr - tpaddr;
1786 if (offset >= (1 << 25) || offset < -(1 << 25))
1787 {
1788 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1789 "(offset 0x%x > 26-bit).", offset);
1790 return 1;
1791 }
1792 /* b <jentry> */
1793 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1794 *jjump_pad_insn_size = 4;
1795
1796 return 0;
1797 }
1798
1799 /* Returns the minimum instruction length for installing a tracepoint. */
1800
1801 int
1802 ppc_target::get_min_fast_tracepoint_insn_len ()
1803 {
1804 return 4;
1805 }
1806
1807 /* Emits a given buffer into the target at current_insn_ptr. Length
1808 is in units of 32-bit words. */
1809
1810 static void
1811 emit_insns (uint32_t *buf, int n)
1812 {
1813 n = n * sizeof (uint32_t);
1814 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1815 current_insn_ptr += n;
1816 }
1817
1818 #define __EMIT_ASM(NAME, INSNS) \
1819 do \
1820 { \
1821 extern uint32_t start_bcax_ ## NAME []; \
1822 extern uint32_t end_bcax_ ## NAME []; \
1823 emit_insns (start_bcax_ ## NAME, \
1824 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1825 __asm__ (".section .text.__ppcbcax\n\t" \
1826 "start_bcax_" #NAME ":\n\t" \
1827 INSNS "\n\t" \
1828 "end_bcax_" #NAME ":\n\t" \
1829 ".previous\n\t"); \
1830 } while (0)
1831
1832 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1833 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1834
1835 /*
1836
1837 Bytecode execution stack frame - 32-bit
1838
1839 | LR save area (SP + 4)
1840 SP' -> +- Back chain (SP + 0)
1841 | Save r31 for access saved arguments
1842 | Save r30 for bytecode stack pointer
1843 | Save r4 for incoming argument *value
1844 | Save r3 for incoming argument regs
1845 r30 -> +- Bytecode execution stack
1846 |
1847 | 64-byte (8 doublewords) at initial.
1848 | Expand stack as needed.
1849 |
1850 +-
1851 | Some padding for minimum stack frame and 16-byte alignment.
1852 | 16 bytes.
1853 SP +- Back-chain (SP')
1854
1855 initial frame size
1856 = 16 + (4 * 4) + 64
1857 = 96
1858
1859 r30 is the stack-pointer for bytecode machine.
1860 It should point to next-empty, so we can use LDU for pop.
1861 r3 is used for cache of the high part of TOP value.
1862 It was the first argument, pointer to regs.
1863 r4 is used for cache of the low part of TOP value.
1864 It was the second argument, pointer to the result.
1865 We should set *result = TOP after leaving this function.
1866
1867 Note:
1868 * To restore stack at epilogue
1869 => sp = r31
1870 * To check stack is big enough for bytecode execution.
1871 => r30 - 8 > SP + 8
1872 * To return execution result.
1873 => 0(r4) = TOP
1874
1875 */
1876
1877 /* Regardless of endian, register 3 is always high part, 4 is low part.
1878 These defines are used when the register pair is stored/loaded.
1879 Likewise, to simplify code, have a similiar define for 5:6. */
1880
1881 #if __BYTE_ORDER == __LITTLE_ENDIAN
1882 #define TOP_FIRST "4"
1883 #define TOP_SECOND "3"
1884 #define TMP_FIRST "6"
1885 #define TMP_SECOND "5"
1886 #else
1887 #define TOP_FIRST "3"
1888 #define TOP_SECOND "4"
1889 #define TMP_FIRST "5"
1890 #define TMP_SECOND "6"
1891 #endif
1892
1893 /* Emit prologue in inferior memory. See above comments. */
1894
1895 static void
1896 ppc_emit_prologue (void)
1897 {
1898 EMIT_ASM (/* Save return address. */
1899 "mflr 0 \n"
1900 "stw 0, 4(1) \n"
1901 /* Adjust SP. 96 is the initial frame size. */
1902 "stwu 1, -96(1) \n"
1903 /* Save r30 and incoming arguments. */
1904 "stw 31, 96-4(1) \n"
1905 "stw 30, 96-8(1) \n"
1906 "stw 4, 96-12(1) \n"
1907 "stw 3, 96-16(1) \n"
1908 /* Point r31 to original r1 for access arguments. */
1909 "addi 31, 1, 96 \n"
1910 /* Set r30 to pointing stack-top. */
1911 "addi 30, 1, 64 \n"
1912 /* Initial r3/TOP to 0. */
1913 "li 3, 0 \n"
1914 "li 4, 0 \n");
1915 }
1916
1917 /* Emit epilogue in inferior memory. See above comments. */
1918
1919 static void
1920 ppc_emit_epilogue (void)
1921 {
1922 EMIT_ASM (/* *result = TOP */
1923 "lwz 5, -12(31) \n"
1924 "stw " TOP_FIRST ", 0(5) \n"
1925 "stw " TOP_SECOND ", 4(5) \n"
1926 /* Restore registers. */
1927 "lwz 31, -4(31) \n"
1928 "lwz 30, -8(31) \n"
1929 /* Restore SP. */
1930 "lwz 1, 0(1) \n"
1931 /* Restore LR. */
1932 "lwz 0, 4(1) \n"
1933 /* Return 0 for no-error. */
1934 "li 3, 0 \n"
1935 "mtlr 0 \n"
1936 "blr \n");
1937 }
1938
1939 /* TOP = stack[--sp] + TOP */
1940
1941 static void
1942 ppc_emit_add (void)
1943 {
1944 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1945 "lwz " TMP_SECOND ", 4(30)\n"
1946 "addc 4, 6, 4 \n"
1947 "adde 3, 5, 3 \n");
1948 }
1949
1950 /* TOP = stack[--sp] - TOP */
1951
1952 static void
1953 ppc_emit_sub (void)
1954 {
1955 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1956 "lwz " TMP_SECOND ", 4(30) \n"
1957 "subfc 4, 4, 6 \n"
1958 "subfe 3, 3, 5 \n");
1959 }
1960
1961 /* TOP = stack[--sp] * TOP */
1962
1963 static void
1964 ppc_emit_mul (void)
1965 {
1966 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1967 "lwz " TMP_SECOND ", 4(30) \n"
1968 "mulhwu 7, 6, 4 \n"
1969 "mullw 3, 6, 3 \n"
1970 "mullw 5, 4, 5 \n"
1971 "mullw 4, 6, 4 \n"
1972 "add 3, 5, 3 \n"
1973 "add 3, 7, 3 \n");
1974 }
1975
1976 /* TOP = stack[--sp] << TOP */
1977
1978 static void
1979 ppc_emit_lsh (void)
1980 {
1981 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1982 "lwz " TMP_SECOND ", 4(30) \n"
1983 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1984 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1985 "slw 5, 5, 4\n" /* Shift high part left */
1986 "slw 4, 6, 4\n" /* Shift low part left */
1987 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1988 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1989 "or 3, 5, 3\n"
1990 "or 3, 7, 3\n"); /* Assemble high part */
1991 }
1992
1993 /* Top = stack[--sp] >> TOP
1994 (Arithmetic shift right) */
1995
1996 static void
1997 ppc_emit_rsh_signed (void)
1998 {
1999 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2000 "lwz " TMP_SECOND ", 4(30) \n"
2001 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2002 "sraw 3, 5, 4\n" /* Shift high part right */
2003 "cmpwi 7, 1\n"
2004 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
2005 "sraw 4, 5, 7\n" /* Shift high to low */
2006 "b 2f\n"
2007 "1:\n"
2008 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
2009 "srw 4, 6, 4\n" /* Shift low part right */
2010 "slw 5, 5, 7\n" /* Shift high to low */
2011 "or 4, 4, 5\n" /* Assemble low part */
2012 "2:\n");
2013 }
2014
2015 /* Top = stack[--sp] >> TOP
2016 (Logical shift right) */
2017
2018 static void
2019 ppc_emit_rsh_unsigned (void)
2020 {
2021 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2022 "lwz " TMP_SECOND ", 4(30) \n"
2023 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2024 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2025 "srw 6, 6, 4\n" /* Shift low part right */
2026 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2027 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2028 "or 6, 6, 3\n"
2029 "srw 3, 5, 4\n" /* Shift high part right */
2030 "or 4, 6, 7\n"); /* Assemble low part */
2031 }
2032
2033 /* Emit code for signed-extension specified by ARG. */
2034
2035 static void
2036 ppc_emit_ext (int arg)
2037 {
2038 switch (arg)
2039 {
2040 case 8:
2041 EMIT_ASM ("extsb 4, 4\n"
2042 "srawi 3, 4, 31");
2043 break;
2044 case 16:
2045 EMIT_ASM ("extsh 4, 4\n"
2046 "srawi 3, 4, 31");
2047 break;
2048 case 32:
2049 EMIT_ASM ("srawi 3, 4, 31");
2050 break;
2051 default:
2052 emit_error = 1;
2053 }
2054 }
2055
2056 /* Emit code for zero-extension specified by ARG. */
2057
2058 static void
2059 ppc_emit_zero_ext (int arg)
2060 {
2061 switch (arg)
2062 {
2063 case 8:
2064 EMIT_ASM ("clrlwi 4,4,24\n"
2065 "li 3, 0\n");
2066 break;
2067 case 16:
2068 EMIT_ASM ("clrlwi 4,4,16\n"
2069 "li 3, 0\n");
2070 break;
2071 case 32:
2072 EMIT_ASM ("li 3, 0");
2073 break;
2074 default:
2075 emit_error = 1;
2076 }
2077 }
2078
2079 /* TOP = !TOP
2080 i.e., TOP = (TOP == 0) ? 1 : 0; */
2081
2082 static void
2083 ppc_emit_log_not (void)
2084 {
2085 EMIT_ASM ("or 4, 3, 4 \n"
2086 "cntlzw 4, 4 \n"
2087 "srwi 4, 4, 5 \n"
2088 "li 3, 0 \n");
2089 }
2090
2091 /* TOP = stack[--sp] & TOP */
2092
2093 static void
2094 ppc_emit_bit_and (void)
2095 {
2096 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2097 "lwz " TMP_SECOND ", 4(30) \n"
2098 "and 4, 6, 4 \n"
2099 "and 3, 5, 3 \n");
2100 }
2101
2102 /* TOP = stack[--sp] | TOP */
2103
2104 static void
2105 ppc_emit_bit_or (void)
2106 {
2107 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2108 "lwz " TMP_SECOND ", 4(30) \n"
2109 "or 4, 6, 4 \n"
2110 "or 3, 5, 3 \n");
2111 }
2112
2113 /* TOP = stack[--sp] ^ TOP */
2114
2115 static void
2116 ppc_emit_bit_xor (void)
2117 {
2118 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2119 "lwz " TMP_SECOND ", 4(30) \n"
2120 "xor 4, 6, 4 \n"
2121 "xor 3, 5, 3 \n");
2122 }
2123
2124 /* TOP = ~TOP
2125 i.e., TOP = ~(TOP | TOP) */
2126
2127 static void
2128 ppc_emit_bit_not (void)
2129 {
2130 EMIT_ASM ("nor 3, 3, 3 \n"
2131 "nor 4, 4, 4 \n");
2132 }
2133
2134 /* TOP = stack[--sp] == TOP */
2135
2136 static void
2137 ppc_emit_equal (void)
2138 {
2139 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2140 "lwz " TMP_SECOND ", 4(30) \n"
2141 "xor 4, 6, 4 \n"
2142 "xor 3, 5, 3 \n"
2143 "or 4, 3, 4 \n"
2144 "cntlzw 4, 4 \n"
2145 "srwi 4, 4, 5 \n"
2146 "li 3, 0 \n");
2147 }
2148
2149 /* TOP = stack[--sp] < TOP
2150 (Signed comparison) */
2151
2152 static void
2153 ppc_emit_less_signed (void)
2154 {
2155 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2156 "lwz " TMP_SECOND ", 4(30) \n"
2157 "cmplw 6, 6, 4 \n"
2158 "cmpw 7, 5, 3 \n"
2159 /* CR6 bit 0 = low less and high equal */
2160 "crand 6*4+0, 6*4+0, 7*4+2\n"
2161 /* CR7 bit 0 = (low less and high equal) or high less */
2162 "cror 7*4+0, 7*4+0, 6*4+0\n"
2163 "mfcr 4 \n"
2164 "rlwinm 4, 4, 29, 31, 31 \n"
2165 "li 3, 0 \n");
2166 }
2167
2168 /* TOP = stack[--sp] < TOP
2169 (Unsigned comparison) */
2170
2171 static void
2172 ppc_emit_less_unsigned (void)
2173 {
2174 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2175 "lwz " TMP_SECOND ", 4(30) \n"
2176 "cmplw 6, 6, 4 \n"
2177 "cmplw 7, 5, 3 \n"
2178 /* CR6 bit 0 = low less and high equal */
2179 "crand 6*4+0, 6*4+0, 7*4+2\n"
2180 /* CR7 bit 0 = (low less and high equal) or high less */
2181 "cror 7*4+0, 7*4+0, 6*4+0\n"
2182 "mfcr 4 \n"
2183 "rlwinm 4, 4, 29, 31, 31 \n"
2184 "li 3, 0 \n");
2185 }
2186
2187 /* Access the memory address in TOP in size of SIZE.
2188 Zero-extend the read value. */
2189
2190 static void
2191 ppc_emit_ref (int size)
2192 {
2193 switch (size)
2194 {
2195 case 1:
2196 EMIT_ASM ("lbz 4, 0(4)\n"
2197 "li 3, 0");
2198 break;
2199 case 2:
2200 EMIT_ASM ("lhz 4, 0(4)\n"
2201 "li 3, 0");
2202 break;
2203 case 4:
2204 EMIT_ASM ("lwz 4, 0(4)\n"
2205 "li 3, 0");
2206 break;
2207 case 8:
2208 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2209 EMIT_ASM ("lwz 3, 4(4)\n"
2210 "lwz 4, 0(4)");
2211 else
2212 EMIT_ASM ("lwz 3, 0(4)\n"
2213 "lwz 4, 4(4)");
2214 break;
2215 }
2216 }
2217
2218 /* TOP = NUM */
2219
2220 static void
2221 ppc_emit_const (LONGEST num)
2222 {
2223 uint32_t buf[10];
2224 uint32_t *p = buf;
2225
2226 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2227 p += gen_limm (p, 4, num & 0xffffffff, 0);
2228
2229 emit_insns (buf, p - buf);
2230 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2231 }
2232
2233 /* Set TOP to the value of register REG by calling get_raw_reg function
2234 with two argument, collected buffer and register number. */
2235
2236 static void
2237 ppc_emit_reg (int reg)
2238 {
2239 uint32_t buf[13];
2240 uint32_t *p = buf;
2241
2242 /* fctx->regs is passed in r3 and then saved in -16(31). */
2243 p += GEN_LWZ (p, 3, 31, -16);
2244 p += GEN_LI (p, 4, reg); /* li r4, reg */
2245 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2246
2247 emit_insns (buf, p - buf);
2248 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2249
2250 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2251 {
2252 EMIT_ASM ("mr 5, 4\n"
2253 "mr 4, 3\n"
2254 "mr 3, 5\n");
2255 }
2256 }
2257
2258 /* TOP = stack[--sp] */
2259
2260 static void
2261 ppc_emit_pop (void)
2262 {
2263 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2264 "lwz " TOP_SECOND ", 4(30) \n");
2265 }
2266
2267 /* stack[sp++] = TOP
2268
2269 Because we may use up bytecode stack, expand 8 doublewords more
2270 if needed. */
2271
2272 static void
2273 ppc_emit_stack_flush (void)
2274 {
2275 /* Make sure bytecode stack is big enough before push.
2276 Otherwise, expand 64-byte more. */
2277
2278 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2279 " stw " TOP_SECOND ", 4(30)\n"
2280 " addi 5, 30, -(8 + 8) \n"
2281 " cmpw 7, 5, 1 \n"
2282 " bgt 7, 1f \n"
2283 " stwu 31, -64(1) \n"
2284 "1:addi 30, 30, -8 \n");
2285 }
2286
2287 /* Swap TOP and stack[sp-1] */
2288
2289 static void
2290 ppc_emit_swap (void)
2291 {
2292 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2293 "lwz " TMP_SECOND ", 12(30) \n"
2294 "stw " TOP_FIRST ", 8(30) \n"
2295 "stw " TOP_SECOND ", 12(30) \n"
2296 "mr 3, 5 \n"
2297 "mr 4, 6 \n");
2298 }
2299
2300 /* Discard N elements in the stack. Also used for ppc64. */
2301
2302 static void
2303 ppc_emit_stack_adjust (int n)
2304 {
2305 uint32_t buf[6];
2306 uint32_t *p = buf;
2307
2308 n = n << 3;
2309 if ((n >> 15) != 0)
2310 {
2311 emit_error = 1;
2312 return;
2313 }
2314
2315 p += GEN_ADDI (p, 30, 30, n);
2316
2317 emit_insns (buf, p - buf);
2318 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2319 }
2320
2321 /* Call function FN. */
2322
2323 static void
2324 ppc_emit_call (CORE_ADDR fn)
2325 {
2326 uint32_t buf[11];
2327 uint32_t *p = buf;
2328
2329 p += gen_call (p, fn, 0, 0);
2330
2331 emit_insns (buf, p - buf);
2332 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2333 }
2334
2335 /* FN's prototype is `LONGEST(*fn)(int)'.
2336 TOP = fn (arg1)
2337 */
2338
2339 static void
2340 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2341 {
2342 uint32_t buf[15];
2343 uint32_t *p = buf;
2344
2345 /* Setup argument. arg1 is a 16-bit value. */
2346 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2347 p += gen_call (p, fn, 0, 0);
2348
2349 emit_insns (buf, p - buf);
2350 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2351
2352 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2353 {
2354 EMIT_ASM ("mr 5, 4\n"
2355 "mr 4, 3\n"
2356 "mr 3, 5\n");
2357 }
2358 }
2359
2360 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2361 fn (arg1, TOP)
2362
2363 TOP should be preserved/restored before/after the call. */
2364
2365 static void
2366 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2367 {
2368 uint32_t buf[21];
2369 uint32_t *p = buf;
2370
2371 /* Save TOP. 0(30) is next-empty. */
2372 p += GEN_STW (p, 3, 30, 0);
2373 p += GEN_STW (p, 4, 30, 4);
2374
2375 /* Setup argument. arg1 is a 16-bit value. */
2376 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2377 {
2378 p += GEN_MR (p, 5, 4);
2379 p += GEN_MR (p, 6, 3);
2380 }
2381 else
2382 {
2383 p += GEN_MR (p, 5, 3);
2384 p += GEN_MR (p, 6, 4);
2385 }
2386 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2387 p += gen_call (p, fn, 0, 0);
2388
2389 /* Restore TOP */
2390 p += GEN_LWZ (p, 3, 30, 0);
2391 p += GEN_LWZ (p, 4, 30, 4);
2392
2393 emit_insns (buf, p - buf);
2394 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2395 }
2396
2397 /* Note in the following goto ops:
2398
2399 When emitting goto, the target address is later relocated by
2400 write_goto_address. OFFSET_P is the offset of the branch instruction
2401 in the code sequence, and SIZE_P is how to relocate the instruction,
2402 recognized by ppc_write_goto_address. In current implementation,
2403 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2404 */
2405
2406 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2407
2408 static void
2409 ppc_emit_if_goto (int *offset_p, int *size_p)
2410 {
2411 EMIT_ASM ("or. 3, 3, 4 \n"
2412 "lwzu " TOP_FIRST ", 8(30) \n"
2413 "lwz " TOP_SECOND ", 4(30) \n"
2414 "1:bne 0, 1b \n");
2415
2416 if (offset_p)
2417 *offset_p = 12;
2418 if (size_p)
2419 *size_p = 14;
2420 }
2421
2422 /* Unconditional goto. Also used for ppc64. */
2423
2424 static void
2425 ppc_emit_goto (int *offset_p, int *size_p)
2426 {
2427 EMIT_ASM ("1:b 1b");
2428
2429 if (offset_p)
2430 *offset_p = 0;
2431 if (size_p)
2432 *size_p = 24;
2433 }
2434
2435 /* Goto if stack[--sp] == TOP */
2436
2437 static void
2438 ppc_emit_eq_goto (int *offset_p, int *size_p)
2439 {
2440 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2441 "lwz " TMP_SECOND ", 4(30) \n"
2442 "xor 4, 6, 4 \n"
2443 "xor 3, 5, 3 \n"
2444 "or. 3, 3, 4 \n"
2445 "lwzu " TOP_FIRST ", 8(30) \n"
2446 "lwz " TOP_SECOND ", 4(30) \n"
2447 "1:beq 0, 1b \n");
2448
2449 if (offset_p)
2450 *offset_p = 28;
2451 if (size_p)
2452 *size_p = 14;
2453 }
2454
2455 /* Goto if stack[--sp] != TOP */
2456
2457 static void
2458 ppc_emit_ne_goto (int *offset_p, int *size_p)
2459 {
2460 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2461 "lwz " TMP_SECOND ", 4(30) \n"
2462 "xor 4, 6, 4 \n"
2463 "xor 3, 5, 3 \n"
2464 "or. 3, 3, 4 \n"
2465 "lwzu " TOP_FIRST ", 8(30) \n"
2466 "lwz " TOP_SECOND ", 4(30) \n"
2467 "1:bne 0, 1b \n");
2468
2469 if (offset_p)
2470 *offset_p = 28;
2471 if (size_p)
2472 *size_p = 14;
2473 }
2474
2475 /* Goto if stack[--sp] < TOP */
2476
2477 static void
2478 ppc_emit_lt_goto (int *offset_p, int *size_p)
2479 {
2480 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2481 "lwz " TMP_SECOND ", 4(30) \n"
2482 "cmplw 6, 6, 4 \n"
2483 "cmpw 7, 5, 3 \n"
2484 /* CR6 bit 0 = low less and high equal */
2485 "crand 6*4+0, 6*4+0, 7*4+2\n"
2486 /* CR7 bit 0 = (low less and high equal) or high less */
2487 "cror 7*4+0, 7*4+0, 6*4+0\n"
2488 "lwzu " TOP_FIRST ", 8(30) \n"
2489 "lwz " TOP_SECOND ", 4(30)\n"
2490 "1:blt 7, 1b \n");
2491
2492 if (offset_p)
2493 *offset_p = 32;
2494 if (size_p)
2495 *size_p = 14;
2496 }
2497
2498 /* Goto if stack[--sp] <= TOP */
2499
2500 static void
2501 ppc_emit_le_goto (int *offset_p, int *size_p)
2502 {
2503 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2504 "lwz " TMP_SECOND ", 4(30) \n"
2505 "cmplw 6, 6, 4 \n"
2506 "cmpw 7, 5, 3 \n"
2507 /* CR6 bit 0 = low less/equal and high equal */
2508 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2509 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2510 "cror 7*4+0, 7*4+0, 6*4+0\n"
2511 "lwzu " TOP_FIRST ", 8(30) \n"
2512 "lwz " TOP_SECOND ", 4(30)\n"
2513 "1:blt 7, 1b \n");
2514
2515 if (offset_p)
2516 *offset_p = 32;
2517 if (size_p)
2518 *size_p = 14;
2519 }
2520
2521 /* Goto if stack[--sp] > TOP */
2522
2523 static void
2524 ppc_emit_gt_goto (int *offset_p, int *size_p)
2525 {
2526 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2527 "lwz " TMP_SECOND ", 4(30) \n"
2528 "cmplw 6, 6, 4 \n"
2529 "cmpw 7, 5, 3 \n"
2530 /* CR6 bit 0 = low greater and high equal */
2531 "crand 6*4+0, 6*4+1, 7*4+2\n"
2532 /* CR7 bit 0 = (low greater and high equal) or high greater */
2533 "cror 7*4+0, 7*4+1, 6*4+0\n"
2534 "lwzu " TOP_FIRST ", 8(30) \n"
2535 "lwz " TOP_SECOND ", 4(30)\n"
2536 "1:blt 7, 1b \n");
2537
2538 if (offset_p)
2539 *offset_p = 32;
2540 if (size_p)
2541 *size_p = 14;
2542 }
2543
2544 /* Goto if stack[--sp] >= TOP */
2545
2546 static void
2547 ppc_emit_ge_goto (int *offset_p, int *size_p)
2548 {
2549 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2550 "lwz " TMP_SECOND ", 4(30) \n"
2551 "cmplw 6, 6, 4 \n"
2552 "cmpw 7, 5, 3 \n"
2553 /* CR6 bit 0 = low ge and high equal */
2554 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2555 /* CR7 bit 0 = (low ge and high equal) or high greater */
2556 "cror 7*4+0, 7*4+1, 6*4+0\n"
2557 "lwzu " TOP_FIRST ", 8(30)\n"
2558 "lwz " TOP_SECOND ", 4(30)\n"
2559 "1:blt 7, 1b \n");
2560
2561 if (offset_p)
2562 *offset_p = 32;
2563 if (size_p)
2564 *size_p = 14;
2565 }
2566
2567 /* Relocate previous emitted branch instruction. FROM is the address
2568 of the branch instruction, TO is the goto target address, and SIZE
2569 if the value we set by *SIZE_P before. Currently, it is either
2570 24 or 14 of branch and conditional-branch instruction.
2571 Also used for ppc64. */
2572
2573 static void
2574 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2575 {
2576 long rel = to - from;
2577 uint32_t insn;
2578 int opcd;
2579
2580 read_inferior_memory (from, (unsigned char *) &insn, 4);
2581 opcd = (insn >> 26) & 0x3f;
2582
2583 switch (size)
2584 {
2585 case 14:
2586 if (opcd != 16
2587 || (rel >= (1 << 15) || rel < -(1 << 15)))
2588 emit_error = 1;
2589 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2590 break;
2591 case 24:
2592 if (opcd != 18
2593 || (rel >= (1 << 25) || rel < -(1 << 25)))
2594 emit_error = 1;
2595 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2596 break;
2597 default:
2598 emit_error = 1;
2599 }
2600
2601 if (!emit_error)
2602 target_write_memory (from, (unsigned char *) &insn, 4);
2603 }
2604
2605 /* Table of emit ops for 32-bit. */
2606
2607 static struct emit_ops ppc_emit_ops_impl =
2608 {
2609 ppc_emit_prologue,
2610 ppc_emit_epilogue,
2611 ppc_emit_add,
2612 ppc_emit_sub,
2613 ppc_emit_mul,
2614 ppc_emit_lsh,
2615 ppc_emit_rsh_signed,
2616 ppc_emit_rsh_unsigned,
2617 ppc_emit_ext,
2618 ppc_emit_log_not,
2619 ppc_emit_bit_and,
2620 ppc_emit_bit_or,
2621 ppc_emit_bit_xor,
2622 ppc_emit_bit_not,
2623 ppc_emit_equal,
2624 ppc_emit_less_signed,
2625 ppc_emit_less_unsigned,
2626 ppc_emit_ref,
2627 ppc_emit_if_goto,
2628 ppc_emit_goto,
2629 ppc_write_goto_address,
2630 ppc_emit_const,
2631 ppc_emit_call,
2632 ppc_emit_reg,
2633 ppc_emit_pop,
2634 ppc_emit_stack_flush,
2635 ppc_emit_zero_ext,
2636 ppc_emit_swap,
2637 ppc_emit_stack_adjust,
2638 ppc_emit_int_call_1,
2639 ppc_emit_void_call_2,
2640 ppc_emit_eq_goto,
2641 ppc_emit_ne_goto,
2642 ppc_emit_lt_goto,
2643 ppc_emit_le_goto,
2644 ppc_emit_gt_goto,
2645 ppc_emit_ge_goto
2646 };
2647
2648 #ifdef __powerpc64__
2649
2650 /*
2651
2652 Bytecode execution stack frame - 64-bit
2653
2654 | LR save area (SP + 16)
2655 | CR save area (SP + 8)
2656 SP' -> +- Back chain (SP + 0)
2657 | Save r31 for access saved arguments
2658 | Save r30 for bytecode stack pointer
2659 | Save r4 for incoming argument *value
2660 | Save r3 for incoming argument regs
2661 r30 -> +- Bytecode execution stack
2662 |
2663 | 64-byte (8 doublewords) at initial.
2664 | Expand stack as needed.
2665 |
2666 +-
2667 | Some padding for minimum stack frame.
2668 | 112 for ELFv1.
2669 SP +- Back-chain (SP')
2670
2671 initial frame size
2672 = 112 + (4 * 8) + 64
2673 = 208
2674
2675 r30 is the stack-pointer for bytecode machine.
2676 It should point to next-empty, so we can use LDU for pop.
2677 r3 is used for cache of TOP value.
2678 It was the first argument, pointer to regs.
2679 r4 is the second argument, pointer to the result.
2680 We should set *result = TOP after leaving this function.
2681
2682 Note:
2683 * To restore stack at epilogue
2684 => sp = r31
2685 * To check stack is big enough for bytecode execution.
2686 => r30 - 8 > SP + 112
2687 * To return execution result.
2688 => 0(r4) = TOP
2689
2690 */
2691
2692 /* Emit prologue in inferior memory. See above comments. */
2693
2694 static void
2695 ppc64v1_emit_prologue (void)
2696 {
2697 /* On ELFv1, function pointers really point to function descriptor,
2698 so emit one here. We don't care about contents of words 1 and 2,
2699 so let them just overlap out code. */
2700 uint64_t opd = current_insn_ptr + 8;
2701 uint32_t buf[2];
2702
2703 /* Mind the strict aliasing rules. */
2704 memcpy (buf, &opd, sizeof buf);
2705 emit_insns(buf, 2);
2706 EMIT_ASM (/* Save return address. */
2707 "mflr 0 \n"
2708 "std 0, 16(1) \n"
2709 /* Save r30 and incoming arguments. */
2710 "std 31, -8(1) \n"
2711 "std 30, -16(1) \n"
2712 "std 4, -24(1) \n"
2713 "std 3, -32(1) \n"
2714 /* Point r31 to current r1 for access arguments. */
2715 "mr 31, 1 \n"
2716 /* Adjust SP. 208 is the initial frame size. */
2717 "stdu 1, -208(1) \n"
2718 /* Set r30 to pointing stack-top. */
2719 "addi 30, 1, 168 \n"
2720 /* Initial r3/TOP to 0. */
2721 "li 3, 0 \n");
2722 }
2723
2724 /* Emit prologue in inferior memory. See above comments. */
2725
2726 static void
2727 ppc64v2_emit_prologue (void)
2728 {
2729 EMIT_ASM (/* Save return address. */
2730 "mflr 0 \n"
2731 "std 0, 16(1) \n"
2732 /* Save r30 and incoming arguments. */
2733 "std 31, -8(1) \n"
2734 "std 30, -16(1) \n"
2735 "std 4, -24(1) \n"
2736 "std 3, -32(1) \n"
2737 /* Point r31 to current r1 for access arguments. */
2738 "mr 31, 1 \n"
2739 /* Adjust SP. 208 is the initial frame size. */
2740 "stdu 1, -208(1) \n"
2741 /* Set r30 to pointing stack-top. */
2742 "addi 30, 1, 168 \n"
2743 /* Initial r3/TOP to 0. */
2744 "li 3, 0 \n");
2745 }
2746
2747 /* Emit epilogue in inferior memory. See above comments. */
2748
2749 static void
2750 ppc64_emit_epilogue (void)
2751 {
2752 EMIT_ASM (/* Restore SP. */
2753 "ld 1, 0(1) \n"
2754 /* *result = TOP */
2755 "ld 4, -24(1) \n"
2756 "std 3, 0(4) \n"
2757 /* Restore registers. */
2758 "ld 31, -8(1) \n"
2759 "ld 30, -16(1) \n"
2760 /* Restore LR. */
2761 "ld 0, 16(1) \n"
2762 /* Return 0 for no-error. */
2763 "li 3, 0 \n"
2764 "mtlr 0 \n"
2765 "blr \n");
2766 }
2767
2768 /* TOP = stack[--sp] + TOP */
2769
2770 static void
2771 ppc64_emit_add (void)
2772 {
2773 EMIT_ASM ("ldu 4, 8(30) \n"
2774 "add 3, 4, 3 \n");
2775 }
2776
2777 /* TOP = stack[--sp] - TOP */
2778
2779 static void
2780 ppc64_emit_sub (void)
2781 {
2782 EMIT_ASM ("ldu 4, 8(30) \n"
2783 "sub 3, 4, 3 \n");
2784 }
2785
2786 /* TOP = stack[--sp] * TOP */
2787
2788 static void
2789 ppc64_emit_mul (void)
2790 {
2791 EMIT_ASM ("ldu 4, 8(30) \n"
2792 "mulld 3, 4, 3 \n");
2793 }
2794
2795 /* TOP = stack[--sp] << TOP */
2796
2797 static void
2798 ppc64_emit_lsh (void)
2799 {
2800 EMIT_ASM ("ldu 4, 8(30) \n"
2801 "sld 3, 4, 3 \n");
2802 }
2803
2804 /* Top = stack[--sp] >> TOP
2805 (Arithmetic shift right) */
2806
2807 static void
2808 ppc64_emit_rsh_signed (void)
2809 {
2810 EMIT_ASM ("ldu 4, 8(30) \n"
2811 "srad 3, 4, 3 \n");
2812 }
2813
2814 /* Top = stack[--sp] >> TOP
2815 (Logical shift right) */
2816
2817 static void
2818 ppc64_emit_rsh_unsigned (void)
2819 {
2820 EMIT_ASM ("ldu 4, 8(30) \n"
2821 "srd 3, 4, 3 \n");
2822 }
2823
2824 /* Emit code for signed-extension specified by ARG. */
2825
2826 static void
2827 ppc64_emit_ext (int arg)
2828 {
2829 switch (arg)
2830 {
2831 case 8:
2832 EMIT_ASM ("extsb 3, 3");
2833 break;
2834 case 16:
2835 EMIT_ASM ("extsh 3, 3");
2836 break;
2837 case 32:
2838 EMIT_ASM ("extsw 3, 3");
2839 break;
2840 default:
2841 emit_error = 1;
2842 }
2843 }
2844
2845 /* Emit code for zero-extension specified by ARG. */
2846
2847 static void
2848 ppc64_emit_zero_ext (int arg)
2849 {
2850 switch (arg)
2851 {
2852 case 8:
2853 EMIT_ASM ("rldicl 3,3,0,56");
2854 break;
2855 case 16:
2856 EMIT_ASM ("rldicl 3,3,0,48");
2857 break;
2858 case 32:
2859 EMIT_ASM ("rldicl 3,3,0,32");
2860 break;
2861 default:
2862 emit_error = 1;
2863 }
2864 }
2865
2866 /* TOP = !TOP
2867 i.e., TOP = (TOP == 0) ? 1 : 0; */
2868
2869 static void
2870 ppc64_emit_log_not (void)
2871 {
2872 EMIT_ASM ("cntlzd 3, 3 \n"
2873 "srdi 3, 3, 6 \n");
2874 }
2875
2876 /* TOP = stack[--sp] & TOP */
2877
2878 static void
2879 ppc64_emit_bit_and (void)
2880 {
2881 EMIT_ASM ("ldu 4, 8(30) \n"
2882 "and 3, 4, 3 \n");
2883 }
2884
2885 /* TOP = stack[--sp] | TOP */
2886
2887 static void
2888 ppc64_emit_bit_or (void)
2889 {
2890 EMIT_ASM ("ldu 4, 8(30) \n"
2891 "or 3, 4, 3 \n");
2892 }
2893
2894 /* TOP = stack[--sp] ^ TOP */
2895
2896 static void
2897 ppc64_emit_bit_xor (void)
2898 {
2899 EMIT_ASM ("ldu 4, 8(30) \n"
2900 "xor 3, 4, 3 \n");
2901 }
2902
2903 /* TOP = ~TOP
2904 i.e., TOP = ~(TOP | TOP) */
2905
2906 static void
2907 ppc64_emit_bit_not (void)
2908 {
2909 EMIT_ASM ("nor 3, 3, 3 \n");
2910 }
2911
2912 /* TOP = stack[--sp] == TOP */
2913
2914 static void
2915 ppc64_emit_equal (void)
2916 {
2917 EMIT_ASM ("ldu 4, 8(30) \n"
2918 "xor 3, 3, 4 \n"
2919 "cntlzd 3, 3 \n"
2920 "srdi 3, 3, 6 \n");
2921 }
2922
2923 /* TOP = stack[--sp] < TOP
2924 (Signed comparison) */
2925
2926 static void
2927 ppc64_emit_less_signed (void)
2928 {
2929 EMIT_ASM ("ldu 4, 8(30) \n"
2930 "cmpd 7, 4, 3 \n"
2931 "mfcr 3 \n"
2932 "rlwinm 3, 3, 29, 31, 31 \n");
2933 }
2934
2935 /* TOP = stack[--sp] < TOP
2936 (Unsigned comparison) */
2937
2938 static void
2939 ppc64_emit_less_unsigned (void)
2940 {
2941 EMIT_ASM ("ldu 4, 8(30) \n"
2942 "cmpld 7, 4, 3 \n"
2943 "mfcr 3 \n"
2944 "rlwinm 3, 3, 29, 31, 31 \n");
2945 }
2946
2947 /* Access the memory address in TOP in size of SIZE.
2948 Zero-extend the read value. */
2949
2950 static void
2951 ppc64_emit_ref (int size)
2952 {
2953 switch (size)
2954 {
2955 case 1:
2956 EMIT_ASM ("lbz 3, 0(3)");
2957 break;
2958 case 2:
2959 EMIT_ASM ("lhz 3, 0(3)");
2960 break;
2961 case 4:
2962 EMIT_ASM ("lwz 3, 0(3)");
2963 break;
2964 case 8:
2965 EMIT_ASM ("ld 3, 0(3)");
2966 break;
2967 }
2968 }
2969
2970 /* TOP = NUM */
2971
2972 static void
2973 ppc64_emit_const (LONGEST num)
2974 {
2975 uint32_t buf[5];
2976 uint32_t *p = buf;
2977
2978 p += gen_limm (p, 3, num, 1);
2979
2980 emit_insns (buf, p - buf);
2981 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2982 }
2983
2984 /* Set TOP to the value of register REG by calling get_raw_reg function
2985 with two argument, collected buffer and register number. */
2986
2987 static void
2988 ppc64v1_emit_reg (int reg)
2989 {
2990 uint32_t buf[15];
2991 uint32_t *p = buf;
2992
2993 /* fctx->regs is passed in r3 and then saved in 176(1). */
2994 p += GEN_LD (p, 3, 31, -32);
2995 p += GEN_LI (p, 4, reg);
2996 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2997 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2998 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2999
3000 emit_insns (buf, p - buf);
3001 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3002 }
3003
3004 /* Likewise, for ELFv2. */
3005
3006 static void
3007 ppc64v2_emit_reg (int reg)
3008 {
3009 uint32_t buf[12];
3010 uint32_t *p = buf;
3011
3012 /* fctx->regs is passed in r3 and then saved in 176(1). */
3013 p += GEN_LD (p, 3, 31, -32);
3014 p += GEN_LI (p, 4, reg);
3015 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3016 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
3017 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3018
3019 emit_insns (buf, p - buf);
3020 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3021 }
3022
3023 /* TOP = stack[--sp] */
3024
3025 static void
3026 ppc64_emit_pop (void)
3027 {
3028 EMIT_ASM ("ldu 3, 8(30)");
3029 }
3030
3031 /* stack[sp++] = TOP
3032
3033 Because we may use up bytecode stack, expand 8 doublewords more
3034 if needed. */
3035
3036 static void
3037 ppc64_emit_stack_flush (void)
3038 {
3039 /* Make sure bytecode stack is big enough before push.
3040 Otherwise, expand 64-byte more. */
3041
3042 EMIT_ASM (" std 3, 0(30) \n"
3043 " addi 4, 30, -(112 + 8) \n"
3044 " cmpd 7, 4, 1 \n"
3045 " bgt 7, 1f \n"
3046 " stdu 31, -64(1) \n"
3047 "1:addi 30, 30, -8 \n");
3048 }
3049
3050 /* Swap TOP and stack[sp-1] */
3051
3052 static void
3053 ppc64_emit_swap (void)
3054 {
3055 EMIT_ASM ("ld 4, 8(30) \n"
3056 "std 3, 8(30) \n"
3057 "mr 3, 4 \n");
3058 }
3059
3060 /* Call function FN - ELFv1. */
3061
3062 static void
3063 ppc64v1_emit_call (CORE_ADDR fn)
3064 {
3065 uint32_t buf[13];
3066 uint32_t *p = buf;
3067
3068 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3069 p += gen_call (p, fn, 1, 1);
3070 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3071
3072 emit_insns (buf, p - buf);
3073 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3074 }
3075
3076 /* Call function FN - ELFv2. */
3077
3078 static void
3079 ppc64v2_emit_call (CORE_ADDR fn)
3080 {
3081 uint32_t buf[10];
3082 uint32_t *p = buf;
3083
3084 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3085 p += gen_call (p, fn, 1, 0);
3086 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3087
3088 emit_insns (buf, p - buf);
3089 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3090 }
3091
3092 /* FN's prototype is `LONGEST(*fn)(int)'.
3093 TOP = fn (arg1)
3094 */
3095
3096 static void
3097 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3098 {
3099 uint32_t buf[13];
3100 uint32_t *p = buf;
3101
3102 /* Setup argument. arg1 is a 16-bit value. */
3103 p += gen_limm (p, 3, arg1, 1);
3104 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3105 p += gen_call (p, fn, 1, 1);
3106 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3107
3108 emit_insns (buf, p - buf);
3109 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3110 }
3111
3112 /* Likewise for ELFv2. */
3113
3114 static void
3115 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3116 {
3117 uint32_t buf[10];
3118 uint32_t *p = buf;
3119
3120 /* Setup argument. arg1 is a 16-bit value. */
3121 p += gen_limm (p, 3, arg1, 1);
3122 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3123 p += gen_call (p, fn, 1, 0);
3124 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3125
3126 emit_insns (buf, p - buf);
3127 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3128 }
3129
3130 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3131 fn (arg1, TOP)
3132
3133 TOP should be preserved/restored before/after the call. */
3134
3135 static void
3136 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3137 {
3138 uint32_t buf[17];
3139 uint32_t *p = buf;
3140
3141 /* Save TOP. 0(30) is next-empty. */
3142 p += GEN_STD (p, 3, 30, 0);
3143
3144 /* Setup argument. arg1 is a 16-bit value. */
3145 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3146 p += gen_limm (p, 3, arg1, 1);
3147 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3148 p += gen_call (p, fn, 1, 1);
3149 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3150
3151 /* Restore TOP */
3152 p += GEN_LD (p, 3, 30, 0);
3153
3154 emit_insns (buf, p - buf);
3155 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3156 }
3157
3158 /* Likewise for ELFv2. */
3159
3160 static void
3161 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3162 {
3163 uint32_t buf[14];
3164 uint32_t *p = buf;
3165
3166 /* Save TOP. 0(30) is next-empty. */
3167 p += GEN_STD (p, 3, 30, 0);
3168
3169 /* Setup argument. arg1 is a 16-bit value. */
3170 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3171 p += gen_limm (p, 3, arg1, 1);
3172 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3173 p += gen_call (p, fn, 1, 0);
3174 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3175
3176 /* Restore TOP */
3177 p += GEN_LD (p, 3, 30, 0);
3178
3179 emit_insns (buf, p - buf);
3180 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3181 }
3182
3183 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3184
3185 static void
3186 ppc64_emit_if_goto (int *offset_p, int *size_p)
3187 {
3188 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3189 "ldu 3, 8(30) \n"
3190 "1:bne 7, 1b \n");
3191
3192 if (offset_p)
3193 *offset_p = 8;
3194 if (size_p)
3195 *size_p = 14;
3196 }
3197
3198 /* Goto if stack[--sp] == TOP */
3199
3200 static void
3201 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3202 {
3203 EMIT_ASM ("ldu 4, 8(30) \n"
3204 "cmpd 7, 4, 3 \n"
3205 "ldu 3, 8(30) \n"
3206 "1:beq 7, 1b \n");
3207
3208 if (offset_p)
3209 *offset_p = 12;
3210 if (size_p)
3211 *size_p = 14;
3212 }
3213
3214 /* Goto if stack[--sp] != TOP */
3215
3216 static void
3217 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3218 {
3219 EMIT_ASM ("ldu 4, 8(30) \n"
3220 "cmpd 7, 4, 3 \n"
3221 "ldu 3, 8(30) \n"
3222 "1:bne 7, 1b \n");
3223
3224 if (offset_p)
3225 *offset_p = 12;
3226 if (size_p)
3227 *size_p = 14;
3228 }
3229
3230 /* Goto if stack[--sp] < TOP */
3231
3232 static void
3233 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3234 {
3235 EMIT_ASM ("ldu 4, 8(30) \n"
3236 "cmpd 7, 4, 3 \n"
3237 "ldu 3, 8(30) \n"
3238 "1:blt 7, 1b \n");
3239
3240 if (offset_p)
3241 *offset_p = 12;
3242 if (size_p)
3243 *size_p = 14;
3244 }
3245
3246 /* Goto if stack[--sp] <= TOP */
3247
3248 static void
3249 ppc64_emit_le_goto (int *offset_p, int *size_p)
3250 {
3251 EMIT_ASM ("ldu 4, 8(30) \n"
3252 "cmpd 7, 4, 3 \n"
3253 "ldu 3, 8(30) \n"
3254 "1:ble 7, 1b \n");
3255
3256 if (offset_p)
3257 *offset_p = 12;
3258 if (size_p)
3259 *size_p = 14;
3260 }
3261
3262 /* Goto if stack[--sp] > TOP */
3263
3264 static void
3265 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3266 {
3267 EMIT_ASM ("ldu 4, 8(30) \n"
3268 "cmpd 7, 4, 3 \n"
3269 "ldu 3, 8(30) \n"
3270 "1:bgt 7, 1b \n");
3271
3272 if (offset_p)
3273 *offset_p = 12;
3274 if (size_p)
3275 *size_p = 14;
3276 }
3277
3278 /* Goto if stack[--sp] >= TOP */
3279
3280 static void
3281 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3282 {
3283 EMIT_ASM ("ldu 4, 8(30) \n"
3284 "cmpd 7, 4, 3 \n"
3285 "ldu 3, 8(30) \n"
3286 "1:bge 7, 1b \n");
3287
3288 if (offset_p)
3289 *offset_p = 12;
3290 if (size_p)
3291 *size_p = 14;
3292 }
3293
3294 /* Table of emit ops for 64-bit ELFv1. */
3295
3296 static struct emit_ops ppc64v1_emit_ops_impl =
3297 {
3298 ppc64v1_emit_prologue,
3299 ppc64_emit_epilogue,
3300 ppc64_emit_add,
3301 ppc64_emit_sub,
3302 ppc64_emit_mul,
3303 ppc64_emit_lsh,
3304 ppc64_emit_rsh_signed,
3305 ppc64_emit_rsh_unsigned,
3306 ppc64_emit_ext,
3307 ppc64_emit_log_not,
3308 ppc64_emit_bit_and,
3309 ppc64_emit_bit_or,
3310 ppc64_emit_bit_xor,
3311 ppc64_emit_bit_not,
3312 ppc64_emit_equal,
3313 ppc64_emit_less_signed,
3314 ppc64_emit_less_unsigned,
3315 ppc64_emit_ref,
3316 ppc64_emit_if_goto,
3317 ppc_emit_goto,
3318 ppc_write_goto_address,
3319 ppc64_emit_const,
3320 ppc64v1_emit_call,
3321 ppc64v1_emit_reg,
3322 ppc64_emit_pop,
3323 ppc64_emit_stack_flush,
3324 ppc64_emit_zero_ext,
3325 ppc64_emit_swap,
3326 ppc_emit_stack_adjust,
3327 ppc64v1_emit_int_call_1,
3328 ppc64v1_emit_void_call_2,
3329 ppc64_emit_eq_goto,
3330 ppc64_emit_ne_goto,
3331 ppc64_emit_lt_goto,
3332 ppc64_emit_le_goto,
3333 ppc64_emit_gt_goto,
3334 ppc64_emit_ge_goto
3335 };
3336
3337 /* Table of emit ops for 64-bit ELFv2. */
3338
3339 static struct emit_ops ppc64v2_emit_ops_impl =
3340 {
3341 ppc64v2_emit_prologue,
3342 ppc64_emit_epilogue,
3343 ppc64_emit_add,
3344 ppc64_emit_sub,
3345 ppc64_emit_mul,
3346 ppc64_emit_lsh,
3347 ppc64_emit_rsh_signed,
3348 ppc64_emit_rsh_unsigned,
3349 ppc64_emit_ext,
3350 ppc64_emit_log_not,
3351 ppc64_emit_bit_and,
3352 ppc64_emit_bit_or,
3353 ppc64_emit_bit_xor,
3354 ppc64_emit_bit_not,
3355 ppc64_emit_equal,
3356 ppc64_emit_less_signed,
3357 ppc64_emit_less_unsigned,
3358 ppc64_emit_ref,
3359 ppc64_emit_if_goto,
3360 ppc_emit_goto,
3361 ppc_write_goto_address,
3362 ppc64_emit_const,
3363 ppc64v2_emit_call,
3364 ppc64v2_emit_reg,
3365 ppc64_emit_pop,
3366 ppc64_emit_stack_flush,
3367 ppc64_emit_zero_ext,
3368 ppc64_emit_swap,
3369 ppc_emit_stack_adjust,
3370 ppc64v2_emit_int_call_1,
3371 ppc64v2_emit_void_call_2,
3372 ppc64_emit_eq_goto,
3373 ppc64_emit_ne_goto,
3374 ppc64_emit_lt_goto,
3375 ppc64_emit_le_goto,
3376 ppc64_emit_gt_goto,
3377 ppc64_emit_ge_goto
3378 };
3379
3380 #endif
3381
3382 /* Implementation of linux_target_ops method "emit_ops". */
3383
3384 static struct emit_ops *
3385 ppc_emit_ops (void)
3386 {
3387 #ifdef __powerpc64__
3388 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3389
3390 if (register_size (regcache->tdesc, 0) == 8)
3391 {
3392 if (is_elfv2_inferior ())
3393 return &ppc64v2_emit_ops_impl;
3394 else
3395 return &ppc64v1_emit_ops_impl;
3396 }
3397 #endif
3398 return &ppc_emit_ops_impl;
3399 }
3400
3401 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3402
3403 static int
3404 ppc_get_ipa_tdesc_idx (void)
3405 {
3406 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3407 const struct target_desc *tdesc = regcache->tdesc;
3408
3409 #ifdef __powerpc64__
3410 if (tdesc == tdesc_powerpc_64l)
3411 return PPC_TDESC_BASE;
3412 if (tdesc == tdesc_powerpc_altivec64l)
3413 return PPC_TDESC_ALTIVEC;
3414 if (tdesc == tdesc_powerpc_vsx64l)
3415 return PPC_TDESC_VSX;
3416 if (tdesc == tdesc_powerpc_isa205_64l)
3417 return PPC_TDESC_ISA205;
3418 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3419 return PPC_TDESC_ISA205_ALTIVEC;
3420 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3421 return PPC_TDESC_ISA205_VSX;
3422 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3423 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3424 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3425 return PPC_TDESC_ISA207_VSX;
3426 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3427 return PPC_TDESC_ISA207_HTM_VSX;
3428 #endif
3429
3430 if (tdesc == tdesc_powerpc_32l)
3431 return PPC_TDESC_BASE;
3432 if (tdesc == tdesc_powerpc_altivec32l)
3433 return PPC_TDESC_ALTIVEC;
3434 if (tdesc == tdesc_powerpc_vsx32l)
3435 return PPC_TDESC_VSX;
3436 if (tdesc == tdesc_powerpc_isa205_32l)
3437 return PPC_TDESC_ISA205;
3438 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3439 return PPC_TDESC_ISA205_ALTIVEC;
3440 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3441 return PPC_TDESC_ISA205_VSX;
3442 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3443 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3444 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3445 return PPC_TDESC_ISA207_VSX;
3446 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3447 return PPC_TDESC_ISA207_HTM_VSX;
3448 if (tdesc == tdesc_powerpc_e500l)
3449 return PPC_TDESC_E500;
3450
3451 return 0;
3452 }
3453
3454 struct linux_target_ops the_low_target = {
3455 ppc_emit_ops,
3456 NULL, /* supports_range_stepping */
3457 ppc_supports_hardware_single_step,
3458 NULL, /* get_syscall_trapinfo */
3459 ppc_get_ipa_tdesc_idx,
3460 };
3461
3462 /* The linux target ops object. */
3463
3464 linux_process_target *the_linux_target = &the_ppc_target;
3465
3466 void
3467 initialize_low_arch (void)
3468 {
3469 /* Initialize the Linux target descriptions. */
3470
3471 init_registers_powerpc_32l ();
3472 init_registers_powerpc_altivec32l ();
3473 init_registers_powerpc_vsx32l ();
3474 init_registers_powerpc_isa205_32l ();
3475 init_registers_powerpc_isa205_altivec32l ();
3476 init_registers_powerpc_isa205_vsx32l ();
3477 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3478 init_registers_powerpc_isa207_vsx32l ();
3479 init_registers_powerpc_isa207_htm_vsx32l ();
3480 init_registers_powerpc_e500l ();
3481 #if __powerpc64__
3482 init_registers_powerpc_64l ();
3483 init_registers_powerpc_altivec64l ();
3484 init_registers_powerpc_vsx64l ();
3485 init_registers_powerpc_isa205_64l ();
3486 init_registers_powerpc_isa205_altivec64l ();
3487 init_registers_powerpc_isa205_vsx64l ();
3488 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3489 init_registers_powerpc_isa207_vsx64l ();
3490 init_registers_powerpc_isa207_htm_vsx64l ();
3491 #endif
3492
3493 initialize_regsets_info (&ppc_regsets_info);
3494 }
This page took 0.117437 seconds and 5 git commands to generate.