71ad842243cc698c33db9731925bcb15d3a8991c
[deliverable/binutils-gdb.git] / gdbserver / linux-ppc-low.cc
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include "elf/common.h"
24 #include <sys/uio.h>
25 #include <elf.h>
26 #include <asm/ptrace.h>
27
28 #include "arch/ppc-linux-common.h"
29 #include "arch/ppc-linux-tdesc.h"
30 #include "nat/ppc-linux.h"
31 #include "nat/linux-ptrace.h"
32 #include "linux-ppc-tdesc-init.h"
33 #include "ax.h"
34 #include "tracepoint.h"
35
36 #define PPC_FIELD(value, from, len) \
37 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
38 #define PPC_SEXT(v, bs) \
39 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
40 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
41 - ((CORE_ADDR) 1 << ((bs) - 1)))
42 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
43 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
44 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
45 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
46
47 /* Linux target op definitions for the PowerPC architecture. */
48
49 class ppc_target : public linux_process_target
50 {
51 public:
52
53 const regs_info *get_regs_info () override;
54
55 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
56
57 bool supports_z_point_type (char z_type) override;
58
59
60 void low_collect_ptrace_register (regcache *regcache, int regno,
61 char *buf) override;
62
63 void low_supply_ptrace_register (regcache *regcache, int regno,
64 const char *buf) override;
65
66 bool supports_tracepoints () override;
67
68 bool supports_fast_tracepoints () override;
69
70 int install_fast_tracepoint_jump_pad
71 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
72 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
73 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
74 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
75 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
76 char *err) override;
77
78 int get_min_fast_tracepoint_insn_len () override;
79
80 struct emit_ops *emit_ops () override;
81
82 protected:
83
84 void low_arch_setup () override;
85
86 bool low_cannot_fetch_register (int regno) override;
87
88 bool low_cannot_store_register (int regno) override;
89
90 bool low_supports_breakpoints () override;
91
92 CORE_ADDR low_get_pc (regcache *regcache) override;
93
94 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
95
96 bool low_breakpoint_at (CORE_ADDR pc) override;
97
98 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
99 int size, raw_breakpoint *bp) override;
100
101 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
102 int size, raw_breakpoint *bp) override;
103
104 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
105 };
106
107 /* The singleton target ops object. */
108
109 static ppc_target the_ppc_target;
110
111 /* Holds the AT_HWCAP auxv entry. */
112
113 static unsigned long ppc_hwcap;
114
115 /* Holds the AT_HWCAP2 auxv entry. */
116
117 static unsigned long ppc_hwcap2;
118
119
120 #define ppc_num_regs 73
121
122 #ifdef __powerpc64__
123 /* We use a constant for FPSCR instead of PT_FPSCR, because
124 many shipped PPC64 kernels had the wrong value in ptrace.h. */
125 static int ppc_regmap[] =
126 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
127 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
128 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
129 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
130 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
131 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
132 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
133 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
134 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
135 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
136 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
137 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
138 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
139 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
140 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
141 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
142 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
143 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
144 PT_ORIG_R3 * 8, PT_TRAP * 8 };
145 #else
146 /* Currently, don't check/send MQ. */
147 static int ppc_regmap[] =
148 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
149 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
150 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
151 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
152 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
153 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
154 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
155 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
156 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
157 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
158 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
159 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
160 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
161 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
162 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
163 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
164 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
165 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
166 PT_ORIG_R3 * 4, PT_TRAP * 4
167 };
168
169 static int ppc_regmap_e500[] =
170 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
171 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
172 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
173 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
174 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
175 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
176 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
177 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
178 -1, -1, -1, -1,
179 -1, -1, -1, -1,
180 -1, -1, -1, -1,
181 -1, -1, -1, -1,
182 -1, -1, -1, -1,
183 -1, -1, -1, -1,
184 -1, -1, -1, -1,
185 -1, -1, -1, -1,
186 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
187 PT_CTR * 4, PT_XER * 4, -1,
188 PT_ORIG_R3 * 4, PT_TRAP * 4
189 };
190 #endif
191
192 /* Check whether the kernel provides a register set with number
193 REGSET_ID of size REGSETSIZE for process/thread TID. */
194
195 static int
196 ppc_check_regset (int tid, int regset_id, int regsetsize)
197 {
198 void *buf = alloca (regsetsize);
199 struct iovec iov;
200
201 iov.iov_base = buf;
202 iov.iov_len = regsetsize;
203
204 if (ptrace (PTRACE_GETREGSET, tid, regset_id, &iov) >= 0
205 || errno == ENODATA)
206 return 1;
207 return 0;
208 }
209
210 bool
211 ppc_target::low_cannot_store_register (int regno)
212 {
213 const struct target_desc *tdesc = current_process ()->tdesc;
214
215 #ifndef __powerpc64__
216 /* Some kernels do not allow us to store fpscr. */
217 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
218 && regno == find_regno (tdesc, "fpscr"))
219 return true;
220 #endif
221
222 /* Some kernels do not allow us to store orig_r3 or trap. */
223 if (regno == find_regno (tdesc, "orig_r3")
224 || regno == find_regno (tdesc, "trap"))
225 return true;
226
227 return false;
228 }
229
230 bool
231 ppc_target::low_cannot_fetch_register (int regno)
232 {
233 return false;
234 }
235
236 void
237 ppc_target::low_collect_ptrace_register (regcache *regcache, int regno,
238 char *buf)
239 {
240 memset (buf, 0, sizeof (long));
241
242 if (__BYTE_ORDER == __LITTLE_ENDIAN)
243 {
244 /* Little-endian values always sit at the left end of the buffer. */
245 collect_register (regcache, regno, buf);
246 }
247 else if (__BYTE_ORDER == __BIG_ENDIAN)
248 {
249 /* Big-endian values sit at the right end of the buffer. In case of
250 registers whose sizes are smaller than sizeof (long), we must use a
251 padding to access them correctly. */
252 int size = register_size (regcache->tdesc, regno);
253
254 if (size < sizeof (long))
255 collect_register (regcache, regno, buf + sizeof (long) - size);
256 else
257 collect_register (regcache, regno, buf);
258 }
259 else
260 perror_with_name ("Unexpected byte order");
261 }
262
263 void
264 ppc_target::low_supply_ptrace_register (regcache *regcache, int regno,
265 const char *buf)
266 {
267 if (__BYTE_ORDER == __LITTLE_ENDIAN)
268 {
269 /* Little-endian values always sit at the left end of the buffer. */
270 supply_register (regcache, regno, buf);
271 }
272 else if (__BYTE_ORDER == __BIG_ENDIAN)
273 {
274 /* Big-endian values sit at the right end of the buffer. In case of
275 registers whose sizes are smaller than sizeof (long), we must use a
276 padding to access them correctly. */
277 int size = register_size (regcache->tdesc, regno);
278
279 if (size < sizeof (long))
280 supply_register (regcache, regno, buf + sizeof (long) - size);
281 else
282 supply_register (regcache, regno, buf);
283 }
284 else
285 perror_with_name ("Unexpected byte order");
286 }
287
288 bool
289 ppc_target::low_supports_breakpoints ()
290 {
291 return true;
292 }
293
294 CORE_ADDR
295 ppc_target::low_get_pc (regcache *regcache)
296 {
297 if (register_size (regcache->tdesc, 0) == 4)
298 {
299 unsigned int pc;
300 collect_register_by_name (regcache, "pc", &pc);
301 return (CORE_ADDR) pc;
302 }
303 else
304 {
305 unsigned long pc;
306 collect_register_by_name (regcache, "pc", &pc);
307 return (CORE_ADDR) pc;
308 }
309 }
310
311 void
312 ppc_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
313 {
314 if (register_size (regcache->tdesc, 0) == 4)
315 {
316 unsigned int newpc = pc;
317 supply_register_by_name (regcache, "pc", &newpc);
318 }
319 else
320 {
321 unsigned long newpc = pc;
322 supply_register_by_name (regcache, "pc", &newpc);
323 }
324 }
325
326 #ifndef __powerpc64__
327 static int ppc_regmap_adjusted;
328 #endif
329
330
331 /* Correct in either endianness.
332 This instruction is "twge r2, r2", which GDB uses as a software
333 breakpoint. */
334 static const unsigned int ppc_breakpoint = 0x7d821008;
335 #define ppc_breakpoint_len 4
336
337 /* Implementation of target ops method "sw_breakpoint_from_kind". */
338
339 const gdb_byte *
340 ppc_target::sw_breakpoint_from_kind (int kind, int *size)
341 {
342 *size = ppc_breakpoint_len;
343 return (const gdb_byte *) &ppc_breakpoint;
344 }
345
346 bool
347 ppc_target::low_breakpoint_at (CORE_ADDR where)
348 {
349 unsigned int insn;
350
351 read_memory (where, (unsigned char *) &insn, 4);
352 if (insn == ppc_breakpoint)
353 return true;
354 /* If necessary, recognize more trap instructions here. GDB only uses
355 the one. */
356
357 return false;
358 }
359
360 /* Implement supports_z_point_type target-ops.
361 Returns true if type Z_TYPE breakpoint is supported.
362
363 Handling software breakpoint at server side, so tracepoints
364 and breakpoints can be inserted at the same location. */
365
366 bool
367 ppc_target::supports_z_point_type (char z_type)
368 {
369 switch (z_type)
370 {
371 case Z_PACKET_SW_BP:
372 return true;
373 case Z_PACKET_HW_BP:
374 case Z_PACKET_WRITE_WP:
375 case Z_PACKET_ACCESS_WP:
376 default:
377 return false;
378 }
379 }
380
381 /* Implement the low_insert_point linux target op.
382 Returns 0 on success, -1 on failure and 1 on unsupported. */
383
384 int
385 ppc_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
386 int size, raw_breakpoint *bp)
387 {
388 switch (type)
389 {
390 case raw_bkpt_type_sw:
391 return insert_memory_breakpoint (bp);
392
393 case raw_bkpt_type_hw:
394 case raw_bkpt_type_write_wp:
395 case raw_bkpt_type_access_wp:
396 default:
397 /* Unsupported. */
398 return 1;
399 }
400 }
401
402 /* Implement the low_remove_point linux target op.
403 Returns 0 on success, -1 on failure and 1 on unsupported. */
404
405 int
406 ppc_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
407 int size, raw_breakpoint *bp)
408 {
409 switch (type)
410 {
411 case raw_bkpt_type_sw:
412 return remove_memory_breakpoint (bp);
413
414 case raw_bkpt_type_hw:
415 case raw_bkpt_type_write_wp:
416 case raw_bkpt_type_access_wp:
417 default:
418 /* Unsupported. */
419 return 1;
420 }
421 }
422
423 /* Provide only a fill function for the general register set. ps_lgetregs
424 will use this for NPTL support. */
425
426 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
427 {
428 int i;
429
430 ppc_target *my_ppc_target = (ppc_target *) the_linux_target;
431
432 for (i = 0; i < 32; i++)
433 my_ppc_target->low_collect_ptrace_register (regcache, i,
434 (char *) buf + ppc_regmap[i]);
435
436 for (i = 64; i < 70; i++)
437 my_ppc_target->low_collect_ptrace_register (regcache, i,
438 (char *) buf + ppc_regmap[i]);
439
440 for (i = 71; i < 73; i++)
441 my_ppc_target->low_collect_ptrace_register (regcache, i,
442 (char *) buf + ppc_regmap[i]);
443 }
444
445 /* Program Priority Register regset fill function. */
446
447 static void
448 ppc_fill_pprregset (struct regcache *regcache, void *buf)
449 {
450 char *ppr = (char *) buf;
451
452 collect_register_by_name (regcache, "ppr", ppr);
453 }
454
455 /* Program Priority Register regset store function. */
456
457 static void
458 ppc_store_pprregset (struct regcache *regcache, const void *buf)
459 {
460 const char *ppr = (const char *) buf;
461
462 supply_register_by_name (regcache, "ppr", ppr);
463 }
464
465 /* Data Stream Control Register regset fill function. */
466
467 static void
468 ppc_fill_dscrregset (struct regcache *regcache, void *buf)
469 {
470 char *dscr = (char *) buf;
471
472 collect_register_by_name (regcache, "dscr", dscr);
473 }
474
475 /* Data Stream Control Register regset store function. */
476
477 static void
478 ppc_store_dscrregset (struct regcache *regcache, const void *buf)
479 {
480 const char *dscr = (const char *) buf;
481
482 supply_register_by_name (regcache, "dscr", dscr);
483 }
484
485 /* Target Address Register regset fill function. */
486
487 static void
488 ppc_fill_tarregset (struct regcache *regcache, void *buf)
489 {
490 char *tar = (char *) buf;
491
492 collect_register_by_name (regcache, "tar", tar);
493 }
494
495 /* Target Address Register regset store function. */
496
497 static void
498 ppc_store_tarregset (struct regcache *regcache, const void *buf)
499 {
500 const char *tar = (const char *) buf;
501
502 supply_register_by_name (regcache, "tar", tar);
503 }
504
505 /* Event-Based Branching regset store function. Unless the inferior
506 has a perf event open, ptrace can return in error when reading and
507 writing to the regset, with ENODATA. For reading, the registers
508 will correctly show as unavailable. For writing, gdbserver
509 currently only caches any register writes from P and G packets and
510 the stub always tries to write all the regsets when resuming the
511 inferior, which would result in frequent warnings. For this
512 reason, we don't define a fill function. This also means that the
513 client-side regcache will be dirty if the user tries to write to
514 the EBB registers. G packets that the client sends to write to
515 unrelated registers will also include data for EBB registers, even
516 if they are unavailable. */
517
518 static void
519 ppc_store_ebbregset (struct regcache *regcache, const void *buf)
520 {
521 const char *regset = (const char *) buf;
522
523 /* The order in the kernel regset is: EBBRR, EBBHR, BESCR. In the
524 .dat file is BESCR, EBBHR, EBBRR. */
525 supply_register_by_name (regcache, "ebbrr", &regset[0]);
526 supply_register_by_name (regcache, "ebbhr", &regset[8]);
527 supply_register_by_name (regcache, "bescr", &regset[16]);
528 }
529
530 /* Performance Monitoring Unit regset fill function. */
531
532 static void
533 ppc_fill_pmuregset (struct regcache *regcache, void *buf)
534 {
535 char *regset = (char *) buf;
536
537 /* The order in the kernel regset is SIAR, SDAR, SIER, MMCR2, MMCR0.
538 In the .dat file is MMCR0, MMCR2, SIAR, SDAR, SIER. */
539 collect_register_by_name (regcache, "siar", &regset[0]);
540 collect_register_by_name (regcache, "sdar", &regset[8]);
541 collect_register_by_name (regcache, "sier", &regset[16]);
542 collect_register_by_name (regcache, "mmcr2", &regset[24]);
543 collect_register_by_name (regcache, "mmcr0", &regset[32]);
544 }
545
546 /* Performance Monitoring Unit regset store function. */
547
548 static void
549 ppc_store_pmuregset (struct regcache *regcache, const void *buf)
550 {
551 const char *regset = (const char *) buf;
552
553 supply_register_by_name (regcache, "siar", &regset[0]);
554 supply_register_by_name (regcache, "sdar", &regset[8]);
555 supply_register_by_name (regcache, "sier", &regset[16]);
556 supply_register_by_name (regcache, "mmcr2", &regset[24]);
557 supply_register_by_name (regcache, "mmcr0", &regset[32]);
558 }
559
560 /* Hardware Transactional Memory special-purpose register regset fill
561 function. */
562
563 static void
564 ppc_fill_tm_sprregset (struct regcache *regcache, void *buf)
565 {
566 int i, base;
567 char *regset = (char *) buf;
568
569 base = find_regno (regcache->tdesc, "tfhar");
570 for (i = 0; i < 3; i++)
571 collect_register (regcache, base + i, &regset[i * 8]);
572 }
573
574 /* Hardware Transactional Memory special-purpose register regset store
575 function. */
576
577 static void
578 ppc_store_tm_sprregset (struct regcache *regcache, const void *buf)
579 {
580 int i, base;
581 const char *regset = (const char *) buf;
582
583 base = find_regno (regcache->tdesc, "tfhar");
584 for (i = 0; i < 3; i++)
585 supply_register (regcache, base + i, &regset[i * 8]);
586 }
587
588 /* For the same reasons as the EBB regset, none of the HTM
589 checkpointed regsets have a fill function. These registers are
590 only available if the inferior is in a transaction. */
591
592 /* Hardware Transactional Memory checkpointed general-purpose regset
593 store function. */
594
595 static void
596 ppc_store_tm_cgprregset (struct regcache *regcache, const void *buf)
597 {
598 int i, base, size, endian_offset;
599 const char *regset = (const char *) buf;
600
601 base = find_regno (regcache->tdesc, "cr0");
602 size = register_size (regcache->tdesc, base);
603
604 gdb_assert (size == 4 || size == 8);
605
606 for (i = 0; i < 32; i++)
607 supply_register (regcache, base + i, &regset[i * size]);
608
609 endian_offset = 0;
610
611 if ((size == 8) && (__BYTE_ORDER == __BIG_ENDIAN))
612 endian_offset = 4;
613
614 supply_register_by_name (regcache, "ccr",
615 &regset[PT_CCR * size + endian_offset]);
616
617 supply_register_by_name (regcache, "cxer",
618 &regset[PT_XER * size + endian_offset]);
619
620 supply_register_by_name (regcache, "clr", &regset[PT_LNK * size]);
621 supply_register_by_name (regcache, "cctr", &regset[PT_CTR * size]);
622 }
623
624 /* Hardware Transactional Memory checkpointed floating-point regset
625 store function. */
626
627 static void
628 ppc_store_tm_cfprregset (struct regcache *regcache, const void *buf)
629 {
630 int i, base;
631 const char *regset = (const char *) buf;
632
633 base = find_regno (regcache->tdesc, "cf0");
634
635 for (i = 0; i < 32; i++)
636 supply_register (regcache, base + i, &regset[i * 8]);
637
638 supply_register_by_name (regcache, "cfpscr", &regset[32 * 8]);
639 }
640
641 /* Hardware Transactional Memory checkpointed vector regset store
642 function. */
643
644 static void
645 ppc_store_tm_cvrregset (struct regcache *regcache, const void *buf)
646 {
647 int i, base;
648 const char *regset = (const char *) buf;
649 int vscr_offset = 0;
650
651 base = find_regno (regcache->tdesc, "cvr0");
652
653 for (i = 0; i < 32; i++)
654 supply_register (regcache, base + i, &regset[i * 16]);
655
656 if (__BYTE_ORDER == __BIG_ENDIAN)
657 vscr_offset = 12;
658
659 supply_register_by_name (regcache, "cvscr",
660 &regset[32 * 16 + vscr_offset]);
661
662 supply_register_by_name (regcache, "cvrsave", &regset[33 * 16]);
663 }
664
665 /* Hardware Transactional Memory checkpointed vector-scalar regset
666 store function. */
667
668 static void
669 ppc_store_tm_cvsxregset (struct regcache *regcache, const void *buf)
670 {
671 int i, base;
672 const char *regset = (const char *) buf;
673
674 base = find_regno (regcache->tdesc, "cvs0h");
675 for (i = 0; i < 32; i++)
676 supply_register (regcache, base + i, &regset[i * 8]);
677 }
678
679 /* Hardware Transactional Memory checkpointed Program Priority
680 Register regset store function. */
681
682 static void
683 ppc_store_tm_cpprregset (struct regcache *regcache, const void *buf)
684 {
685 const char *cppr = (const char *) buf;
686
687 supply_register_by_name (regcache, "cppr", cppr);
688 }
689
690 /* Hardware Transactional Memory checkpointed Data Stream Control
691 Register regset store function. */
692
693 static void
694 ppc_store_tm_cdscrregset (struct regcache *regcache, const void *buf)
695 {
696 const char *cdscr = (const char *) buf;
697
698 supply_register_by_name (regcache, "cdscr", cdscr);
699 }
700
701 /* Hardware Transactional Memory checkpointed Target Address Register
702 regset store function. */
703
704 static void
705 ppc_store_tm_ctarregset (struct regcache *regcache, const void *buf)
706 {
707 const char *ctar = (const char *) buf;
708
709 supply_register_by_name (regcache, "ctar", ctar);
710 }
711
712 static void
713 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
714 {
715 int i, base;
716 char *regset = (char *) buf;
717
718 base = find_regno (regcache->tdesc, "vs0h");
719 for (i = 0; i < 32; i++)
720 collect_register (regcache, base + i, &regset[i * 8]);
721 }
722
723 static void
724 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
725 {
726 int i, base;
727 const char *regset = (const char *) buf;
728
729 base = find_regno (regcache->tdesc, "vs0h");
730 for (i = 0; i < 32; i++)
731 supply_register (regcache, base + i, &regset[i * 8]);
732 }
733
734 static void
735 ppc_fill_vrregset (struct regcache *regcache, void *buf)
736 {
737 int i, base;
738 char *regset = (char *) buf;
739 int vscr_offset = 0;
740
741 base = find_regno (regcache->tdesc, "vr0");
742 for (i = 0; i < 32; i++)
743 collect_register (regcache, base + i, &regset[i * 16]);
744
745 if (__BYTE_ORDER == __BIG_ENDIAN)
746 vscr_offset = 12;
747
748 collect_register_by_name (regcache, "vscr",
749 &regset[32 * 16 + vscr_offset]);
750
751 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
752 }
753
754 static void
755 ppc_store_vrregset (struct regcache *regcache, const void *buf)
756 {
757 int i, base;
758 const char *regset = (const char *) buf;
759 int vscr_offset = 0;
760
761 base = find_regno (regcache->tdesc, "vr0");
762 for (i = 0; i < 32; i++)
763 supply_register (regcache, base + i, &regset[i * 16]);
764
765 if (__BYTE_ORDER == __BIG_ENDIAN)
766 vscr_offset = 12;
767
768 supply_register_by_name (regcache, "vscr",
769 &regset[32 * 16 + vscr_offset]);
770 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
771 }
772
773 struct gdb_evrregset_t
774 {
775 unsigned long evr[32];
776 unsigned long long acc;
777 unsigned long spefscr;
778 };
779
780 static void
781 ppc_fill_evrregset (struct regcache *regcache, void *buf)
782 {
783 int i, ev0;
784 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
785
786 ev0 = find_regno (regcache->tdesc, "ev0h");
787 for (i = 0; i < 32; i++)
788 collect_register (regcache, ev0 + i, &regset->evr[i]);
789
790 collect_register_by_name (regcache, "acc", &regset->acc);
791 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
792 }
793
794 static void
795 ppc_store_evrregset (struct regcache *regcache, const void *buf)
796 {
797 int i, ev0;
798 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
799
800 ev0 = find_regno (regcache->tdesc, "ev0h");
801 for (i = 0; i < 32; i++)
802 supply_register (regcache, ev0 + i, &regset->evr[i]);
803
804 supply_register_by_name (regcache, "acc", &regset->acc);
805 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
806 }
807
808 static struct regset_info ppc_regsets[] = {
809 /* List the extra register sets before GENERAL_REGS. That way we will
810 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
811 general registers. Some kernels support these, but not the newer
812 PPC_PTRACE_GETREGS. */
813 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CTAR, 0, EXTENDED_REGS,
814 NULL, ppc_store_tm_ctarregset },
815 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CDSCR, 0, EXTENDED_REGS,
816 NULL, ppc_store_tm_cdscrregset },
817 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CPPR, 0, EXTENDED_REGS,
818 NULL, ppc_store_tm_cpprregset },
819 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVSX, 0, EXTENDED_REGS,
820 NULL, ppc_store_tm_cvsxregset },
821 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CVMX, 0, EXTENDED_REGS,
822 NULL, ppc_store_tm_cvrregset },
823 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CFPR, 0, EXTENDED_REGS,
824 NULL, ppc_store_tm_cfprregset },
825 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_CGPR, 0, EXTENDED_REGS,
826 NULL, ppc_store_tm_cgprregset },
827 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TM_SPR, 0, EXTENDED_REGS,
828 ppc_fill_tm_sprregset, ppc_store_tm_sprregset },
829 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_EBB, 0, EXTENDED_REGS,
830 NULL, ppc_store_ebbregset },
831 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PMU, 0, EXTENDED_REGS,
832 ppc_fill_pmuregset, ppc_store_pmuregset },
833 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_TAR, 0, EXTENDED_REGS,
834 ppc_fill_tarregset, ppc_store_tarregset },
835 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_PPR, 0, EXTENDED_REGS,
836 ppc_fill_pprregset, ppc_store_pprregset },
837 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PPC_DSCR, 0, EXTENDED_REGS,
838 ppc_fill_dscrregset, ppc_store_dscrregset },
839 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
840 ppc_fill_vsxregset, ppc_store_vsxregset },
841 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
842 ppc_fill_vrregset, ppc_store_vrregset },
843 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
844 ppc_fill_evrregset, ppc_store_evrregset },
845 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
846 NULL_REGSET
847 };
848
849 static struct usrregs_info ppc_usrregs_info =
850 {
851 ppc_num_regs,
852 ppc_regmap,
853 };
854
855 static struct regsets_info ppc_regsets_info =
856 {
857 ppc_regsets, /* regsets */
858 0, /* num_regsets */
859 NULL, /* disabled_regsets */
860 };
861
862 static struct regs_info myregs_info =
863 {
864 NULL, /* regset_bitmap */
865 &ppc_usrregs_info,
866 &ppc_regsets_info
867 };
868
869 const regs_info *
870 ppc_target::get_regs_info ()
871 {
872 return &myregs_info;
873 }
874
875 void
876 ppc_target::low_arch_setup ()
877 {
878 const struct target_desc *tdesc;
879 struct regset_info *regset;
880 struct ppc_linux_features features = ppc_linux_no_features;
881
882 int tid = lwpid_of (current_thread);
883
884 features.wordsize = ppc_linux_target_wordsize (tid);
885
886 if (features.wordsize == 4)
887 tdesc = tdesc_powerpc_32l;
888 else
889 tdesc = tdesc_powerpc_64l;
890
891 current_process ()->tdesc = tdesc;
892
893 /* The value of current_process ()->tdesc needs to be set for this
894 call. */
895 ppc_hwcap = linux_get_hwcap (features.wordsize);
896 ppc_hwcap2 = linux_get_hwcap2 (features.wordsize);
897
898 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
899
900 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
901 features.vsx = true;
902
903 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
904 features.altivec = true;
905
906 if ((ppc_hwcap2 & PPC_FEATURE2_DSCR)
907 && ppc_check_regset (tid, NT_PPC_DSCR, PPC_LINUX_SIZEOF_DSCRREGSET)
908 && ppc_check_regset (tid, NT_PPC_PPR, PPC_LINUX_SIZEOF_PPRREGSET))
909 {
910 features.ppr_dscr = true;
911 if ((ppc_hwcap2 & PPC_FEATURE2_ARCH_2_07)
912 && (ppc_hwcap2 & PPC_FEATURE2_TAR)
913 && (ppc_hwcap2 & PPC_FEATURE2_EBB)
914 && ppc_check_regset (tid, NT_PPC_TAR,
915 PPC_LINUX_SIZEOF_TARREGSET)
916 && ppc_check_regset (tid, NT_PPC_EBB,
917 PPC_LINUX_SIZEOF_EBBREGSET)
918 && ppc_check_regset (tid, NT_PPC_PMU,
919 PPC_LINUX_SIZEOF_PMUREGSET))
920 {
921 features.isa207 = true;
922 if ((ppc_hwcap2 & PPC_FEATURE2_HTM)
923 && ppc_check_regset (tid, NT_PPC_TM_SPR,
924 PPC_LINUX_SIZEOF_TM_SPRREGSET))
925 features.htm = true;
926 }
927 }
928
929 tdesc = ppc_linux_match_description (features);
930
931 /* On 32-bit machines, check for SPE registers.
932 Set the low target's regmap field as appropriately. */
933 #ifndef __powerpc64__
934 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
935 tdesc = tdesc_powerpc_e500l;
936
937 if (!ppc_regmap_adjusted)
938 {
939 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
940 ppc_usrregs_info.regmap = ppc_regmap_e500;
941
942 /* If the FPSCR is 64-bit wide, we need to fetch the whole
943 64-bit slot and not just its second word. The PT_FPSCR
944 supplied in a 32-bit GDB compilation doesn't reflect
945 this. */
946 if (register_size (tdesc, 70) == 8)
947 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
948
949 ppc_regmap_adjusted = 1;
950 }
951 #endif
952
953 current_process ()->tdesc = tdesc;
954
955 for (regset = ppc_regsets; regset->size >= 0; regset++)
956 switch (regset->get_request)
957 {
958 case PTRACE_GETVRREGS:
959 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
960 break;
961 case PTRACE_GETVSXREGS:
962 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
963 break;
964 case PTRACE_GETEVRREGS:
965 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
966 regset->size = 32 * 4 + 8 + 4;
967 else
968 regset->size = 0;
969 break;
970 case PTRACE_GETREGSET:
971 switch (regset->nt_type)
972 {
973 case NT_PPC_PPR:
974 regset->size = (features.ppr_dscr ?
975 PPC_LINUX_SIZEOF_PPRREGSET : 0);
976 break;
977 case NT_PPC_DSCR:
978 regset->size = (features.ppr_dscr ?
979 PPC_LINUX_SIZEOF_DSCRREGSET : 0);
980 break;
981 case NT_PPC_TAR:
982 regset->size = (features.isa207 ?
983 PPC_LINUX_SIZEOF_TARREGSET : 0);
984 break;
985 case NT_PPC_EBB:
986 regset->size = (features.isa207 ?
987 PPC_LINUX_SIZEOF_EBBREGSET : 0);
988 break;
989 case NT_PPC_PMU:
990 regset->size = (features.isa207 ?
991 PPC_LINUX_SIZEOF_PMUREGSET : 0);
992 break;
993 case NT_PPC_TM_SPR:
994 regset->size = (features.htm ?
995 PPC_LINUX_SIZEOF_TM_SPRREGSET : 0);
996 break;
997 case NT_PPC_TM_CGPR:
998 if (features.wordsize == 4)
999 regset->size = (features.htm ?
1000 PPC32_LINUX_SIZEOF_CGPRREGSET : 0);
1001 else
1002 regset->size = (features.htm ?
1003 PPC64_LINUX_SIZEOF_CGPRREGSET : 0);
1004 break;
1005 case NT_PPC_TM_CFPR:
1006 regset->size = (features.htm ?
1007 PPC_LINUX_SIZEOF_CFPRREGSET : 0);
1008 break;
1009 case NT_PPC_TM_CVMX:
1010 regset->size = (features.htm ?
1011 PPC_LINUX_SIZEOF_CVMXREGSET : 0);
1012 break;
1013 case NT_PPC_TM_CVSX:
1014 regset->size = (features.htm ?
1015 PPC_LINUX_SIZEOF_CVSXREGSET : 0);
1016 break;
1017 case NT_PPC_TM_CPPR:
1018 regset->size = (features.htm ?
1019 PPC_LINUX_SIZEOF_CPPRREGSET : 0);
1020 break;
1021 case NT_PPC_TM_CDSCR:
1022 regset->size = (features.htm ?
1023 PPC_LINUX_SIZEOF_CDSCRREGSET : 0);
1024 break;
1025 case NT_PPC_TM_CTAR:
1026 regset->size = (features.htm ?
1027 PPC_LINUX_SIZEOF_CTARREGSET : 0);
1028 break;
1029 default:
1030 break;
1031 }
1032 break;
1033 default:
1034 break;
1035 }
1036 }
1037
1038 /* Implementation of target ops method "supports_tracepoints". */
1039
1040 bool
1041 ppc_target::supports_tracepoints ()
1042 {
1043 return true;
1044 }
1045
1046 /* Get the thread area address. This is used to recognize which
1047 thread is which when tracing with the in-process agent library. We
1048 don't read anything from the address, and treat it as opaque; it's
1049 the address itself that we assume is unique per-thread. */
1050
1051 int
1052 ppc_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
1053 {
1054 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
1055 struct thread_info *thr = get_lwp_thread (lwp);
1056 struct regcache *regcache = get_thread_regcache (thr, 1);
1057 ULONGEST tp = 0;
1058
1059 #ifdef __powerpc64__
1060 if (register_size (regcache->tdesc, 0) == 8)
1061 collect_register_by_name (regcache, "r13", &tp);
1062 else
1063 #endif
1064 collect_register_by_name (regcache, "r2", &tp);
1065
1066 *addr = tp;
1067
1068 return 0;
1069 }
1070
1071 #ifdef __powerpc64__
1072
1073 /* Older glibc doesn't provide this. */
1074
1075 #ifndef EF_PPC64_ABI
1076 #define EF_PPC64_ABI 3
1077 #endif
1078
1079 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
1080 inferiors. */
1081
1082 static int
1083 is_elfv2_inferior (void)
1084 {
1085 /* To be used as fallback if we're unable to determine the right result -
1086 assume inferior uses the same ABI as gdbserver. */
1087 #if _CALL_ELF == 2
1088 const int def_res = 1;
1089 #else
1090 const int def_res = 0;
1091 #endif
1092 CORE_ADDR phdr;
1093 Elf64_Ehdr ehdr;
1094
1095 const struct target_desc *tdesc = current_process ()->tdesc;
1096 int wordsize = register_size (tdesc, 0);
1097
1098 if (!linux_get_auxv (wordsize, AT_PHDR, &phdr))
1099 return def_res;
1100
1101 /* Assume ELF header is at the beginning of the page where program headers
1102 are located. If it doesn't look like one, bail. */
1103
1104 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
1105 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
1106 return def_res;
1107
1108 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
1109 }
1110
1111 #endif
1112
1113 /* Generate a ds-form instruction in BUF and return the number of bytes written
1114
1115 0 6 11 16 30 32
1116 | OPCD | RST | RA | DS |XO| */
1117
1118 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
1119 static int
1120 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
1121 {
1122 uint32_t insn;
1123
1124 gdb_assert ((opcd & ~0x3f) == 0);
1125 gdb_assert ((rst & ~0x1f) == 0);
1126 gdb_assert ((ra & ~0x1f) == 0);
1127 gdb_assert ((xo & ~0x3) == 0);
1128
1129 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
1130 *buf = (opcd << 26) | insn;
1131 return 1;
1132 }
1133
1134 /* Followings are frequently used ds-form instructions. */
1135
1136 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
1137 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
1138 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
1139 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
1140
1141 /* Generate a d-form instruction in BUF.
1142
1143 0 6 11 16 32
1144 | OPCD | RST | RA | D | */
1145
1146 static int
1147 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
1148 {
1149 uint32_t insn;
1150
1151 gdb_assert ((opcd & ~0x3f) == 0);
1152 gdb_assert ((rst & ~0x1f) == 0);
1153 gdb_assert ((ra & ~0x1f) == 0);
1154
1155 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
1156 *buf = (opcd << 26) | insn;
1157 return 1;
1158 }
1159
1160 /* Followings are frequently used d-form instructions. */
1161
1162 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
1163 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
1164 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
1165 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
1166 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
1167 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
1168 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
1169 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
1170 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
1171
1172 /* Generate a xfx-form instruction in BUF and return the number of bytes
1173 written.
1174
1175 0 6 11 21 31 32
1176 | OPCD | RST | RI | XO |/| */
1177
1178 static int
1179 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
1180 {
1181 uint32_t insn;
1182 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
1183
1184 gdb_assert ((opcd & ~0x3f) == 0);
1185 gdb_assert ((rst & ~0x1f) == 0);
1186 gdb_assert ((xo & ~0x3ff) == 0);
1187
1188 insn = (rst << 21) | (n << 11) | (xo << 1);
1189 *buf = (opcd << 26) | insn;
1190 return 1;
1191 }
1192
1193 /* Followings are frequently used xfx-form instructions. */
1194
1195 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
1196 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
1197 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
1198 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
1199 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
1200 E & 0xf, 598)
1201 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
1202
1203
1204 /* Generate a x-form instruction in BUF and return the number of bytes written.
1205
1206 0 6 11 16 21 31 32
1207 | OPCD | RST | RA | RB | XO |RC| */
1208
1209 static int
1210 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
1211 {
1212 uint32_t insn;
1213
1214 gdb_assert ((opcd & ~0x3f) == 0);
1215 gdb_assert ((rst & ~0x1f) == 0);
1216 gdb_assert ((ra & ~0x1f) == 0);
1217 gdb_assert ((rb & ~0x1f) == 0);
1218 gdb_assert ((xo & ~0x3ff) == 0);
1219 gdb_assert ((rc & ~1) == 0);
1220
1221 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
1222 *buf = (opcd << 26) | insn;
1223 return 1;
1224 }
1225
1226 /* Followings are frequently used x-form instructions. */
1227
1228 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
1229 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
1230 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
1231 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
1232 /* Assume bf = cr7. */
1233 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
1234
1235
1236 /* Generate a md-form instruction in BUF and return the number of bytes written.
1237
1238 0 6 11 16 21 27 30 31 32
1239 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
1240
1241 static int
1242 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
1243 int xo, int rc)
1244 {
1245 uint32_t insn;
1246 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
1247 unsigned int sh0_4 = sh & 0x1f;
1248 unsigned int sh5 = (sh >> 5) & 1;
1249
1250 gdb_assert ((opcd & ~0x3f) == 0);
1251 gdb_assert ((rs & ~0x1f) == 0);
1252 gdb_assert ((ra & ~0x1f) == 0);
1253 gdb_assert ((sh & ~0x3f) == 0);
1254 gdb_assert ((mb & ~0x3f) == 0);
1255 gdb_assert ((xo & ~0x7) == 0);
1256 gdb_assert ((rc & ~0x1) == 0);
1257
1258 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
1259 | (sh5 << 1) | (xo << 2) | (rc & 1);
1260 *buf = (opcd << 26) | insn;
1261 return 1;
1262 }
1263
1264 /* The following are frequently used md-form instructions. */
1265
1266 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
1267 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
1268 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
1269 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
1270
1271 /* Generate a i-form instruction in BUF and return the number of bytes written.
1272
1273 0 6 30 31 32
1274 | OPCD | LI |AA|LK| */
1275
1276 static int
1277 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
1278 {
1279 uint32_t insn;
1280
1281 gdb_assert ((opcd & ~0x3f) == 0);
1282
1283 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
1284 *buf = (opcd << 26) | insn;
1285 return 1;
1286 }
1287
1288 /* The following are frequently used i-form instructions. */
1289
1290 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
1291 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
1292
1293 /* Generate a b-form instruction in BUF and return the number of bytes written.
1294
1295 0 6 11 16 30 31 32
1296 | OPCD | BO | BI | BD |AA|LK| */
1297
1298 static int
1299 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
1300 int aa, int lk)
1301 {
1302 uint32_t insn;
1303
1304 gdb_assert ((opcd & ~0x3f) == 0);
1305 gdb_assert ((bo & ~0x1f) == 0);
1306 gdb_assert ((bi & ~0x1f) == 0);
1307
1308 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
1309 *buf = (opcd << 26) | insn;
1310 return 1;
1311 }
1312
1313 /* The following are frequently used b-form instructions. */
1314 /* Assume bi = cr7. */
1315 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
1316
1317 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
1318 respectively. They are primary used for save/restore GPRs in jump-pad,
1319 not used for bytecode compiling. */
1320
1321 #ifdef __powerpc64__
1322 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
1323 GEN_LD (buf, rt, ra, si) : \
1324 GEN_LWZ (buf, rt, ra, si))
1325 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
1326 GEN_STD (buf, rt, ra, si) : \
1327 GEN_STW (buf, rt, ra, si))
1328 #else
1329 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
1330 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
1331 #endif
1332
1333 /* Generate a sequence of instructions to load IMM in the register REG.
1334 Write the instructions in BUF and return the number of bytes written. */
1335
1336 static int
1337 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
1338 {
1339 uint32_t *p = buf;
1340
1341 if ((imm + 32768) < 65536)
1342 {
1343 /* li reg, imm[15:0] */
1344 p += GEN_LI (p, reg, imm);
1345 }
1346 else if ((imm >> 32) == 0)
1347 {
1348 /* lis reg, imm[31:16]
1349 ori reg, reg, imm[15:0]
1350 rldicl reg, reg, 0, 32 */
1351 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
1352 if ((imm & 0xffff) != 0)
1353 p += GEN_ORI (p, reg, reg, imm & 0xffff);
1354 /* Clear upper 32-bit if sign-bit is set. */
1355 if (imm & (1u << 31) && is_64)
1356 p += GEN_RLDICL (p, reg, reg, 0, 32);
1357 }
1358 else
1359 {
1360 gdb_assert (is_64);
1361 /* lis reg, <imm[63:48]>
1362 ori reg, reg, <imm[48:32]>
1363 rldicr reg, reg, 32, 31
1364 oris reg, reg, <imm[31:16]>
1365 ori reg, reg, <imm[15:0]> */
1366 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1367 if (((imm >> 32) & 0xffff) != 0)
1368 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1369 p += GEN_RLDICR (p, reg, reg, 32, 31);
1370 if (((imm >> 16) & 0xffff) != 0)
1371 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1372 if ((imm & 0xffff) != 0)
1373 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1374 }
1375
1376 return p - buf;
1377 }
1378
1379 /* Generate a sequence for atomically exchange at location LOCK.
1380 This code sequence clobbers r6, r7, r8. LOCK is the location for
1381 the atomic-xchg, OLD_VALUE is expected old value stored in the
1382 location, and R_NEW is a register for the new value. */
1383
1384 static int
1385 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1386 int is_64)
1387 {
1388 const int r_lock = 6;
1389 const int r_old = 7;
1390 const int r_tmp = 8;
1391 uint32_t *p = buf;
1392
1393 /*
1394 1: lwarx TMP, 0, LOCK
1395 cmpwi TMP, OLD
1396 bne 1b
1397 stwcx. NEW, 0, LOCK
1398 bne 1b */
1399
1400 p += gen_limm (p, r_lock, lock, is_64);
1401 p += gen_limm (p, r_old, old_value, is_64);
1402
1403 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1404 p += GEN_CMPW (p, r_tmp, r_old);
1405 p += GEN_BNE (p, -8);
1406 p += GEN_STWCX (p, r_new, 0, r_lock);
1407 p += GEN_BNE (p, -16);
1408
1409 return p - buf;
1410 }
1411
1412 /* Generate a sequence of instructions for calling a function
1413 at address of FN. Return the number of bytes are written in BUF. */
1414
1415 static int
1416 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1417 {
1418 uint32_t *p = buf;
1419
1420 /* Must be called by r12 for caller to calculate TOC address. */
1421 p += gen_limm (p, 12, fn, is_64);
1422 if (is_opd)
1423 {
1424 p += GEN_LOAD (p, 11, 12, 16, is_64);
1425 p += GEN_LOAD (p, 2, 12, 8, is_64);
1426 p += GEN_LOAD (p, 12, 12, 0, is_64);
1427 }
1428 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1429 *p++ = 0x4e800421; /* bctrl */
1430
1431 return p - buf;
1432 }
1433
1434 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1435 of instruction. This function is used to adjust pc-relative instructions
1436 when copying. */
1437
1438 static void
1439 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1440 {
1441 uint32_t insn, op6;
1442 long rel, newrel;
1443
1444 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1445 op6 = PPC_OP6 (insn);
1446
1447 if (op6 == 18 && (insn & 2) == 0)
1448 {
1449 /* branch && AA = 0 */
1450 rel = PPC_LI (insn);
1451 newrel = (oldloc - *to) + rel;
1452
1453 /* Out of range. Cannot relocate instruction. */
1454 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1455 return;
1456
1457 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1458 }
1459 else if (op6 == 16 && (insn & 2) == 0)
1460 {
1461 /* conditional branch && AA = 0 */
1462
1463 /* If the new relocation is too big for even a 26-bit unconditional
1464 branch, there is nothing we can do. Just abort.
1465
1466 Otherwise, if it can be fit in 16-bit conditional branch, just
1467 copy the instruction and relocate the address.
1468
1469 If the it's big for conditional-branch (16-bit), try to invert the
1470 condition and jump with 26-bit branch. For example,
1471
1472 beq .Lgoto
1473 INSN1
1474
1475 =>
1476
1477 bne 1f (+8)
1478 b .Lgoto
1479 1:INSN1
1480
1481 After this transform, we are actually jump from *TO+4 instead of *TO,
1482 so check the relocation again because it will be 1-insn farther then
1483 before if *TO is after OLDLOC.
1484
1485
1486 For BDNZT (or so) is transformed from
1487
1488 bdnzt eq, .Lgoto
1489 INSN1
1490
1491 =>
1492
1493 bdz 1f (+12)
1494 bf eq, 1f (+8)
1495 b .Lgoto
1496 1:INSN1
1497
1498 See also "BO field encodings". */
1499
1500 rel = PPC_BD (insn);
1501 newrel = (oldloc - *to) + rel;
1502
1503 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1504 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1505 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1506 {
1507 newrel -= 4;
1508
1509 /* Out of range. Cannot relocate instruction. */
1510 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1511 return;
1512
1513 if ((PPC_BO (insn) & 0x14) == 0x4)
1514 insn ^= (1 << 24);
1515 else if ((PPC_BO (insn) & 0x14) == 0x10)
1516 insn ^= (1 << 22);
1517
1518 /* Jump over the unconditional branch. */
1519 insn = (insn & ~0xfffc) | 0x8;
1520 target_write_memory (*to, (unsigned char *) &insn, 4);
1521 *to += 4;
1522
1523 /* Build a unconditional branch and copy LK bit. */
1524 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1525 target_write_memory (*to, (unsigned char *) &insn, 4);
1526 *to += 4;
1527
1528 return;
1529 }
1530 else if ((PPC_BO (insn) & 0x14) == 0)
1531 {
1532 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1533 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1534
1535 newrel -= 8;
1536
1537 /* Out of range. Cannot relocate instruction. */
1538 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1539 return;
1540
1541 /* Copy BI field. */
1542 bf_insn |= (insn & 0x1f0000);
1543
1544 /* Invert condition. */
1545 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1546 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1547
1548 target_write_memory (*to, (unsigned char *) &bdnz_insn, 4);
1549 *to += 4;
1550 target_write_memory (*to, (unsigned char *) &bf_insn, 4);
1551 *to += 4;
1552
1553 /* Build a unconditional branch and copy LK bit. */
1554 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1555 target_write_memory (*to, (unsigned char *) &insn, 4);
1556 *to += 4;
1557
1558 return;
1559 }
1560 else /* (BO & 0x14) == 0x14, branch always. */
1561 {
1562 /* Out of range. Cannot relocate instruction. */
1563 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1564 return;
1565
1566 /* Build a unconditional branch and copy LK bit. */
1567 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1568 target_write_memory (*to, (unsigned char *) &insn, 4);
1569 *to += 4;
1570
1571 return;
1572 }
1573 }
1574
1575 target_write_memory (*to, (unsigned char *) &insn, 4);
1576 *to += 4;
1577 }
1578
1579 bool
1580 ppc_target::supports_fast_tracepoints ()
1581 {
1582 return true;
1583 }
1584
1585 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1586 See target.h for details. */
1587
1588 int
1589 ppc_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1590 CORE_ADDR tpaddr,
1591 CORE_ADDR collector,
1592 CORE_ADDR lockaddr,
1593 ULONGEST orig_size,
1594 CORE_ADDR *jump_entry,
1595 CORE_ADDR *trampoline,
1596 ULONGEST *trampoline_size,
1597 unsigned char *jjump_pad_insn,
1598 ULONGEST *jjump_pad_insn_size,
1599 CORE_ADDR *adjusted_insn_addr,
1600 CORE_ADDR *adjusted_insn_addr_end,
1601 char *err)
1602 {
1603 uint32_t buf[256];
1604 uint32_t *p = buf;
1605 int j, offset;
1606 CORE_ADDR buildaddr = *jump_entry;
1607 const CORE_ADDR entryaddr = *jump_entry;
1608 int rsz, min_frame, frame_size, tp_reg;
1609 #ifdef __powerpc64__
1610 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1611 int is_64 = register_size (regcache->tdesc, 0) == 8;
1612 int is_opd = is_64 && !is_elfv2_inferior ();
1613 #else
1614 int is_64 = 0, is_opd = 0;
1615 #endif
1616
1617 #ifdef __powerpc64__
1618 if (is_64)
1619 {
1620 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1621 rsz = 8;
1622 min_frame = 112;
1623 frame_size = (40 * rsz) + min_frame;
1624 tp_reg = 13;
1625 }
1626 else
1627 {
1628 #endif
1629 rsz = 4;
1630 min_frame = 16;
1631 frame_size = (40 * rsz) + min_frame;
1632 tp_reg = 2;
1633 #ifdef __powerpc64__
1634 }
1635 #endif
1636
1637 /* Stack frame layout for this jump pad,
1638
1639 High thread_area (r13/r2) |
1640 tpoint - collecting_t obj
1641 PC/<tpaddr> | +36
1642 CTR | +35
1643 LR | +34
1644 XER | +33
1645 CR | +32
1646 R31 |
1647 R29 |
1648 ... |
1649 R1 | +1
1650 R0 - collected registers
1651 ... |
1652 ... |
1653 Low Back-chain -
1654
1655
1656 The code flow of this jump pad,
1657
1658 1. Adjust SP
1659 2. Save GPR and SPR
1660 3. Prepare argument
1661 4. Call gdb_collector
1662 5. Restore GPR and SPR
1663 6. Restore SP
1664 7. Build a jump for back to the program
1665 8. Copy/relocate original instruction
1666 9. Build a jump for replacing original instruction. */
1667
1668 /* Adjust stack pointer. */
1669 if (is_64)
1670 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1671 else
1672 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1673
1674 /* Store GPRs. Save R1 later, because it had just been modified, but
1675 we want the original value. */
1676 for (j = 2; j < 32; j++)
1677 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1678 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1679 /* Set r0 to the original value of r1 before adjusting stack frame,
1680 and then save it. */
1681 p += GEN_ADDI (p, 0, 1, frame_size);
1682 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1683
1684 /* Save CR, XER, LR, and CTR. */
1685 p += GEN_MFCR (p, 3); /* mfcr r3 */
1686 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1687 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1688 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1689 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1690 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1691 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1692 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1693
1694 /* Save PC<tpaddr> */
1695 p += gen_limm (p, 3, tpaddr, is_64);
1696 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1697
1698
1699 /* Setup arguments to collector. */
1700 /* Set r4 to collected registers. */
1701 p += GEN_ADDI (p, 4, 1, min_frame);
1702 /* Set r3 to TPOINT. */
1703 p += gen_limm (p, 3, tpoint, is_64);
1704
1705 /* Prepare collecting_t object for lock. */
1706 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1707 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1708 /* Set R5 to collecting object. */
1709 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1710
1711 p += GEN_LWSYNC (p);
1712 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1713 p += GEN_LWSYNC (p);
1714
1715 /* Call to collector. */
1716 p += gen_call (p, collector, is_64, is_opd);
1717
1718 /* Simply write 0 to release the lock. */
1719 p += gen_limm (p, 3, lockaddr, is_64);
1720 p += gen_limm (p, 4, 0, is_64);
1721 p += GEN_LWSYNC (p);
1722 p += GEN_STORE (p, 4, 3, 0, is_64);
1723
1724 /* Restore stack and registers. */
1725 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1726 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1727 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1728 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1729 p += GEN_MTCR (p, 3); /* mtcr r3 */
1730 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1731 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1732 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1733
1734 /* Restore GPRs. */
1735 for (j = 2; j < 32; j++)
1736 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1737 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1738 /* Restore SP. */
1739 p += GEN_ADDI (p, 1, 1, frame_size);
1740
1741 /* Flush instructions to inferior memory. */
1742 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1743
1744 /* Now, insert the original instruction to execute in the jump pad. */
1745 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1746 *adjusted_insn_addr_end = *adjusted_insn_addr;
1747 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1748
1749 /* Verify the relocation size. If should be 4 for normal copy,
1750 8 or 12 for some conditional branch. */
1751 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1752 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1753 {
1754 sprintf (err, "E.Unexpected instruction length = %d"
1755 "when relocate instruction.",
1756 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1757 return 1;
1758 }
1759
1760 buildaddr = *adjusted_insn_addr_end;
1761 p = buf;
1762 /* Finally, write a jump back to the program. */
1763 offset = (tpaddr + 4) - buildaddr;
1764 if (offset >= (1 << 25) || offset < -(1 << 25))
1765 {
1766 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1767 "(offset 0x%x > 26-bit).", offset);
1768 return 1;
1769 }
1770 /* b <tpaddr+4> */
1771 p += GEN_B (p, offset);
1772 target_write_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1773 *jump_entry = buildaddr + (p - buf) * 4;
1774
1775 /* The jump pad is now built. Wire in a jump to our jump pad. This
1776 is always done last (by our caller actually), so that we can
1777 install fast tracepoints with threads running. This relies on
1778 the agent's atomic write support. */
1779 offset = entryaddr - tpaddr;
1780 if (offset >= (1 << 25) || offset < -(1 << 25))
1781 {
1782 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1783 "(offset 0x%x > 26-bit).", offset);
1784 return 1;
1785 }
1786 /* b <jentry> */
1787 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1788 *jjump_pad_insn_size = 4;
1789
1790 return 0;
1791 }
1792
1793 /* Returns the minimum instruction length for installing a tracepoint. */
1794
1795 int
1796 ppc_target::get_min_fast_tracepoint_insn_len ()
1797 {
1798 return 4;
1799 }
1800
1801 /* Emits a given buffer into the target at current_insn_ptr. Length
1802 is in units of 32-bit words. */
1803
1804 static void
1805 emit_insns (uint32_t *buf, int n)
1806 {
1807 n = n * sizeof (uint32_t);
1808 target_write_memory (current_insn_ptr, (unsigned char *) buf, n);
1809 current_insn_ptr += n;
1810 }
1811
1812 #define __EMIT_ASM(NAME, INSNS) \
1813 do \
1814 { \
1815 extern uint32_t start_bcax_ ## NAME []; \
1816 extern uint32_t end_bcax_ ## NAME []; \
1817 emit_insns (start_bcax_ ## NAME, \
1818 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1819 __asm__ (".section .text.__ppcbcax\n\t" \
1820 "start_bcax_" #NAME ":\n\t" \
1821 INSNS "\n\t" \
1822 "end_bcax_" #NAME ":\n\t" \
1823 ".previous\n\t"); \
1824 } while (0)
1825
1826 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1827 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1828
1829 /*
1830
1831 Bytecode execution stack frame - 32-bit
1832
1833 | LR save area (SP + 4)
1834 SP' -> +- Back chain (SP + 0)
1835 | Save r31 for access saved arguments
1836 | Save r30 for bytecode stack pointer
1837 | Save r4 for incoming argument *value
1838 | Save r3 for incoming argument regs
1839 r30 -> +- Bytecode execution stack
1840 |
1841 | 64-byte (8 doublewords) at initial.
1842 | Expand stack as needed.
1843 |
1844 +-
1845 | Some padding for minimum stack frame and 16-byte alignment.
1846 | 16 bytes.
1847 SP +- Back-chain (SP')
1848
1849 initial frame size
1850 = 16 + (4 * 4) + 64
1851 = 96
1852
1853 r30 is the stack-pointer for bytecode machine.
1854 It should point to next-empty, so we can use LDU for pop.
1855 r3 is used for cache of the high part of TOP value.
1856 It was the first argument, pointer to regs.
1857 r4 is used for cache of the low part of TOP value.
1858 It was the second argument, pointer to the result.
1859 We should set *result = TOP after leaving this function.
1860
1861 Note:
1862 * To restore stack at epilogue
1863 => sp = r31
1864 * To check stack is big enough for bytecode execution.
1865 => r30 - 8 > SP + 8
1866 * To return execution result.
1867 => 0(r4) = TOP
1868
1869 */
1870
1871 /* Regardless of endian, register 3 is always high part, 4 is low part.
1872 These defines are used when the register pair is stored/loaded.
1873 Likewise, to simplify code, have a similiar define for 5:6. */
1874
1875 #if __BYTE_ORDER == __LITTLE_ENDIAN
1876 #define TOP_FIRST "4"
1877 #define TOP_SECOND "3"
1878 #define TMP_FIRST "6"
1879 #define TMP_SECOND "5"
1880 #else
1881 #define TOP_FIRST "3"
1882 #define TOP_SECOND "4"
1883 #define TMP_FIRST "5"
1884 #define TMP_SECOND "6"
1885 #endif
1886
1887 /* Emit prologue in inferior memory. See above comments. */
1888
1889 static void
1890 ppc_emit_prologue (void)
1891 {
1892 EMIT_ASM (/* Save return address. */
1893 "mflr 0 \n"
1894 "stw 0, 4(1) \n"
1895 /* Adjust SP. 96 is the initial frame size. */
1896 "stwu 1, -96(1) \n"
1897 /* Save r30 and incoming arguments. */
1898 "stw 31, 96-4(1) \n"
1899 "stw 30, 96-8(1) \n"
1900 "stw 4, 96-12(1) \n"
1901 "stw 3, 96-16(1) \n"
1902 /* Point r31 to original r1 for access arguments. */
1903 "addi 31, 1, 96 \n"
1904 /* Set r30 to pointing stack-top. */
1905 "addi 30, 1, 64 \n"
1906 /* Initial r3/TOP to 0. */
1907 "li 3, 0 \n"
1908 "li 4, 0 \n");
1909 }
1910
1911 /* Emit epilogue in inferior memory. See above comments. */
1912
1913 static void
1914 ppc_emit_epilogue (void)
1915 {
1916 EMIT_ASM (/* *result = TOP */
1917 "lwz 5, -12(31) \n"
1918 "stw " TOP_FIRST ", 0(5) \n"
1919 "stw " TOP_SECOND ", 4(5) \n"
1920 /* Restore registers. */
1921 "lwz 31, -4(31) \n"
1922 "lwz 30, -8(31) \n"
1923 /* Restore SP. */
1924 "lwz 1, 0(1) \n"
1925 /* Restore LR. */
1926 "lwz 0, 4(1) \n"
1927 /* Return 0 for no-error. */
1928 "li 3, 0 \n"
1929 "mtlr 0 \n"
1930 "blr \n");
1931 }
1932
1933 /* TOP = stack[--sp] + TOP */
1934
1935 static void
1936 ppc_emit_add (void)
1937 {
1938 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1939 "lwz " TMP_SECOND ", 4(30)\n"
1940 "addc 4, 6, 4 \n"
1941 "adde 3, 5, 3 \n");
1942 }
1943
1944 /* TOP = stack[--sp] - TOP */
1945
1946 static void
1947 ppc_emit_sub (void)
1948 {
1949 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1950 "lwz " TMP_SECOND ", 4(30) \n"
1951 "subfc 4, 4, 6 \n"
1952 "subfe 3, 3, 5 \n");
1953 }
1954
1955 /* TOP = stack[--sp] * TOP */
1956
1957 static void
1958 ppc_emit_mul (void)
1959 {
1960 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1961 "lwz " TMP_SECOND ", 4(30) \n"
1962 "mulhwu 7, 6, 4 \n"
1963 "mullw 3, 6, 3 \n"
1964 "mullw 5, 4, 5 \n"
1965 "mullw 4, 6, 4 \n"
1966 "add 3, 5, 3 \n"
1967 "add 3, 7, 3 \n");
1968 }
1969
1970 /* TOP = stack[--sp] << TOP */
1971
1972 static void
1973 ppc_emit_lsh (void)
1974 {
1975 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1976 "lwz " TMP_SECOND ", 4(30) \n"
1977 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1978 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1979 "slw 5, 5, 4\n" /* Shift high part left */
1980 "slw 4, 6, 4\n" /* Shift low part left */
1981 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1982 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1983 "or 3, 5, 3\n"
1984 "or 3, 7, 3\n"); /* Assemble high part */
1985 }
1986
1987 /* Top = stack[--sp] >> TOP
1988 (Arithmetic shift right) */
1989
1990 static void
1991 ppc_emit_rsh_signed (void)
1992 {
1993 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1994 "lwz " TMP_SECOND ", 4(30) \n"
1995 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1996 "sraw 3, 5, 4\n" /* Shift high part right */
1997 "cmpwi 7, 1\n"
1998 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1999 "sraw 4, 5, 7\n" /* Shift high to low */
2000 "b 2f\n"
2001 "1:\n"
2002 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
2003 "srw 4, 6, 4\n" /* Shift low part right */
2004 "slw 5, 5, 7\n" /* Shift high to low */
2005 "or 4, 4, 5\n" /* Assemble low part */
2006 "2:\n");
2007 }
2008
2009 /* Top = stack[--sp] >> TOP
2010 (Logical shift right) */
2011
2012 static void
2013 ppc_emit_rsh_unsigned (void)
2014 {
2015 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2016 "lwz " TMP_SECOND ", 4(30) \n"
2017 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
2018 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
2019 "srw 6, 6, 4\n" /* Shift low part right */
2020 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
2021 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
2022 "or 6, 6, 3\n"
2023 "srw 3, 5, 4\n" /* Shift high part right */
2024 "or 4, 6, 7\n"); /* Assemble low part */
2025 }
2026
2027 /* Emit code for signed-extension specified by ARG. */
2028
2029 static void
2030 ppc_emit_ext (int arg)
2031 {
2032 switch (arg)
2033 {
2034 case 8:
2035 EMIT_ASM ("extsb 4, 4\n"
2036 "srawi 3, 4, 31");
2037 break;
2038 case 16:
2039 EMIT_ASM ("extsh 4, 4\n"
2040 "srawi 3, 4, 31");
2041 break;
2042 case 32:
2043 EMIT_ASM ("srawi 3, 4, 31");
2044 break;
2045 default:
2046 emit_error = 1;
2047 }
2048 }
2049
2050 /* Emit code for zero-extension specified by ARG. */
2051
2052 static void
2053 ppc_emit_zero_ext (int arg)
2054 {
2055 switch (arg)
2056 {
2057 case 8:
2058 EMIT_ASM ("clrlwi 4,4,24\n"
2059 "li 3, 0\n");
2060 break;
2061 case 16:
2062 EMIT_ASM ("clrlwi 4,4,16\n"
2063 "li 3, 0\n");
2064 break;
2065 case 32:
2066 EMIT_ASM ("li 3, 0");
2067 break;
2068 default:
2069 emit_error = 1;
2070 }
2071 }
2072
2073 /* TOP = !TOP
2074 i.e., TOP = (TOP == 0) ? 1 : 0; */
2075
2076 static void
2077 ppc_emit_log_not (void)
2078 {
2079 EMIT_ASM ("or 4, 3, 4 \n"
2080 "cntlzw 4, 4 \n"
2081 "srwi 4, 4, 5 \n"
2082 "li 3, 0 \n");
2083 }
2084
2085 /* TOP = stack[--sp] & TOP */
2086
2087 static void
2088 ppc_emit_bit_and (void)
2089 {
2090 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2091 "lwz " TMP_SECOND ", 4(30) \n"
2092 "and 4, 6, 4 \n"
2093 "and 3, 5, 3 \n");
2094 }
2095
2096 /* TOP = stack[--sp] | TOP */
2097
2098 static void
2099 ppc_emit_bit_or (void)
2100 {
2101 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2102 "lwz " TMP_SECOND ", 4(30) \n"
2103 "or 4, 6, 4 \n"
2104 "or 3, 5, 3 \n");
2105 }
2106
2107 /* TOP = stack[--sp] ^ TOP */
2108
2109 static void
2110 ppc_emit_bit_xor (void)
2111 {
2112 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2113 "lwz " TMP_SECOND ", 4(30) \n"
2114 "xor 4, 6, 4 \n"
2115 "xor 3, 5, 3 \n");
2116 }
2117
2118 /* TOP = ~TOP
2119 i.e., TOP = ~(TOP | TOP) */
2120
2121 static void
2122 ppc_emit_bit_not (void)
2123 {
2124 EMIT_ASM ("nor 3, 3, 3 \n"
2125 "nor 4, 4, 4 \n");
2126 }
2127
2128 /* TOP = stack[--sp] == TOP */
2129
2130 static void
2131 ppc_emit_equal (void)
2132 {
2133 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2134 "lwz " TMP_SECOND ", 4(30) \n"
2135 "xor 4, 6, 4 \n"
2136 "xor 3, 5, 3 \n"
2137 "or 4, 3, 4 \n"
2138 "cntlzw 4, 4 \n"
2139 "srwi 4, 4, 5 \n"
2140 "li 3, 0 \n");
2141 }
2142
2143 /* TOP = stack[--sp] < TOP
2144 (Signed comparison) */
2145
2146 static void
2147 ppc_emit_less_signed (void)
2148 {
2149 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2150 "lwz " TMP_SECOND ", 4(30) \n"
2151 "cmplw 6, 6, 4 \n"
2152 "cmpw 7, 5, 3 \n"
2153 /* CR6 bit 0 = low less and high equal */
2154 "crand 6*4+0, 6*4+0, 7*4+2\n"
2155 /* CR7 bit 0 = (low less and high equal) or high less */
2156 "cror 7*4+0, 7*4+0, 6*4+0\n"
2157 "mfcr 4 \n"
2158 "rlwinm 4, 4, 29, 31, 31 \n"
2159 "li 3, 0 \n");
2160 }
2161
2162 /* TOP = stack[--sp] < TOP
2163 (Unsigned comparison) */
2164
2165 static void
2166 ppc_emit_less_unsigned (void)
2167 {
2168 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2169 "lwz " TMP_SECOND ", 4(30) \n"
2170 "cmplw 6, 6, 4 \n"
2171 "cmplw 7, 5, 3 \n"
2172 /* CR6 bit 0 = low less and high equal */
2173 "crand 6*4+0, 6*4+0, 7*4+2\n"
2174 /* CR7 bit 0 = (low less and high equal) or high less */
2175 "cror 7*4+0, 7*4+0, 6*4+0\n"
2176 "mfcr 4 \n"
2177 "rlwinm 4, 4, 29, 31, 31 \n"
2178 "li 3, 0 \n");
2179 }
2180
2181 /* Access the memory address in TOP in size of SIZE.
2182 Zero-extend the read value. */
2183
2184 static void
2185 ppc_emit_ref (int size)
2186 {
2187 switch (size)
2188 {
2189 case 1:
2190 EMIT_ASM ("lbz 4, 0(4)\n"
2191 "li 3, 0");
2192 break;
2193 case 2:
2194 EMIT_ASM ("lhz 4, 0(4)\n"
2195 "li 3, 0");
2196 break;
2197 case 4:
2198 EMIT_ASM ("lwz 4, 0(4)\n"
2199 "li 3, 0");
2200 break;
2201 case 8:
2202 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2203 EMIT_ASM ("lwz 3, 4(4)\n"
2204 "lwz 4, 0(4)");
2205 else
2206 EMIT_ASM ("lwz 3, 0(4)\n"
2207 "lwz 4, 4(4)");
2208 break;
2209 }
2210 }
2211
2212 /* TOP = NUM */
2213
2214 static void
2215 ppc_emit_const (LONGEST num)
2216 {
2217 uint32_t buf[10];
2218 uint32_t *p = buf;
2219
2220 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
2221 p += gen_limm (p, 4, num & 0xffffffff, 0);
2222
2223 emit_insns (buf, p - buf);
2224 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2225 }
2226
2227 /* Set TOP to the value of register REG by calling get_raw_reg function
2228 with two argument, collected buffer and register number. */
2229
2230 static void
2231 ppc_emit_reg (int reg)
2232 {
2233 uint32_t buf[13];
2234 uint32_t *p = buf;
2235
2236 /* fctx->regs is passed in r3 and then saved in -16(31). */
2237 p += GEN_LWZ (p, 3, 31, -16);
2238 p += GEN_LI (p, 4, reg); /* li r4, reg */
2239 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
2240
2241 emit_insns (buf, p - buf);
2242 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2243
2244 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2245 {
2246 EMIT_ASM ("mr 5, 4\n"
2247 "mr 4, 3\n"
2248 "mr 3, 5\n");
2249 }
2250 }
2251
2252 /* TOP = stack[--sp] */
2253
2254 static void
2255 ppc_emit_pop (void)
2256 {
2257 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
2258 "lwz " TOP_SECOND ", 4(30) \n");
2259 }
2260
2261 /* stack[sp++] = TOP
2262
2263 Because we may use up bytecode stack, expand 8 doublewords more
2264 if needed. */
2265
2266 static void
2267 ppc_emit_stack_flush (void)
2268 {
2269 /* Make sure bytecode stack is big enough before push.
2270 Otherwise, expand 64-byte more. */
2271
2272 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
2273 " stw " TOP_SECOND ", 4(30)\n"
2274 " addi 5, 30, -(8 + 8) \n"
2275 " cmpw 7, 5, 1 \n"
2276 " bgt 7, 1f \n"
2277 " stwu 31, -64(1) \n"
2278 "1:addi 30, 30, -8 \n");
2279 }
2280
2281 /* Swap TOP and stack[sp-1] */
2282
2283 static void
2284 ppc_emit_swap (void)
2285 {
2286 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
2287 "lwz " TMP_SECOND ", 12(30) \n"
2288 "stw " TOP_FIRST ", 8(30) \n"
2289 "stw " TOP_SECOND ", 12(30) \n"
2290 "mr 3, 5 \n"
2291 "mr 4, 6 \n");
2292 }
2293
2294 /* Discard N elements in the stack. Also used for ppc64. */
2295
2296 static void
2297 ppc_emit_stack_adjust (int n)
2298 {
2299 uint32_t buf[6];
2300 uint32_t *p = buf;
2301
2302 n = n << 3;
2303 if ((n >> 15) != 0)
2304 {
2305 emit_error = 1;
2306 return;
2307 }
2308
2309 p += GEN_ADDI (p, 30, 30, n);
2310
2311 emit_insns (buf, p - buf);
2312 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2313 }
2314
2315 /* Call function FN. */
2316
2317 static void
2318 ppc_emit_call (CORE_ADDR fn)
2319 {
2320 uint32_t buf[11];
2321 uint32_t *p = buf;
2322
2323 p += gen_call (p, fn, 0, 0);
2324
2325 emit_insns (buf, p - buf);
2326 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2327 }
2328
2329 /* FN's prototype is `LONGEST(*fn)(int)'.
2330 TOP = fn (arg1)
2331 */
2332
2333 static void
2334 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
2335 {
2336 uint32_t buf[15];
2337 uint32_t *p = buf;
2338
2339 /* Setup argument. arg1 is a 16-bit value. */
2340 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2341 p += gen_call (p, fn, 0, 0);
2342
2343 emit_insns (buf, p - buf);
2344 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2345
2346 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2347 {
2348 EMIT_ASM ("mr 5, 4\n"
2349 "mr 4, 3\n"
2350 "mr 3, 5\n");
2351 }
2352 }
2353
2354 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2355 fn (arg1, TOP)
2356
2357 TOP should be preserved/restored before/after the call. */
2358
2359 static void
2360 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
2361 {
2362 uint32_t buf[21];
2363 uint32_t *p = buf;
2364
2365 /* Save TOP. 0(30) is next-empty. */
2366 p += GEN_STW (p, 3, 30, 0);
2367 p += GEN_STW (p, 4, 30, 4);
2368
2369 /* Setup argument. arg1 is a 16-bit value. */
2370 if (__BYTE_ORDER == __LITTLE_ENDIAN)
2371 {
2372 p += GEN_MR (p, 5, 4);
2373 p += GEN_MR (p, 6, 3);
2374 }
2375 else
2376 {
2377 p += GEN_MR (p, 5, 3);
2378 p += GEN_MR (p, 6, 4);
2379 }
2380 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2381 p += gen_call (p, fn, 0, 0);
2382
2383 /* Restore TOP */
2384 p += GEN_LWZ (p, 3, 30, 0);
2385 p += GEN_LWZ (p, 4, 30, 4);
2386
2387 emit_insns (buf, p - buf);
2388 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2389 }
2390
2391 /* Note in the following goto ops:
2392
2393 When emitting goto, the target address is later relocated by
2394 write_goto_address. OFFSET_P is the offset of the branch instruction
2395 in the code sequence, and SIZE_P is how to relocate the instruction,
2396 recognized by ppc_write_goto_address. In current implementation,
2397 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2398 */
2399
2400 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2401
2402 static void
2403 ppc_emit_if_goto (int *offset_p, int *size_p)
2404 {
2405 EMIT_ASM ("or. 3, 3, 4 \n"
2406 "lwzu " TOP_FIRST ", 8(30) \n"
2407 "lwz " TOP_SECOND ", 4(30) \n"
2408 "1:bne 0, 1b \n");
2409
2410 if (offset_p)
2411 *offset_p = 12;
2412 if (size_p)
2413 *size_p = 14;
2414 }
2415
2416 /* Unconditional goto. Also used for ppc64. */
2417
2418 static void
2419 ppc_emit_goto (int *offset_p, int *size_p)
2420 {
2421 EMIT_ASM ("1:b 1b");
2422
2423 if (offset_p)
2424 *offset_p = 0;
2425 if (size_p)
2426 *size_p = 24;
2427 }
2428
2429 /* Goto if stack[--sp] == TOP */
2430
2431 static void
2432 ppc_emit_eq_goto (int *offset_p, int *size_p)
2433 {
2434 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2435 "lwz " TMP_SECOND ", 4(30) \n"
2436 "xor 4, 6, 4 \n"
2437 "xor 3, 5, 3 \n"
2438 "or. 3, 3, 4 \n"
2439 "lwzu " TOP_FIRST ", 8(30) \n"
2440 "lwz " TOP_SECOND ", 4(30) \n"
2441 "1:beq 0, 1b \n");
2442
2443 if (offset_p)
2444 *offset_p = 28;
2445 if (size_p)
2446 *size_p = 14;
2447 }
2448
2449 /* Goto if stack[--sp] != TOP */
2450
2451 static void
2452 ppc_emit_ne_goto (int *offset_p, int *size_p)
2453 {
2454 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2455 "lwz " TMP_SECOND ", 4(30) \n"
2456 "xor 4, 6, 4 \n"
2457 "xor 3, 5, 3 \n"
2458 "or. 3, 3, 4 \n"
2459 "lwzu " TOP_FIRST ", 8(30) \n"
2460 "lwz " TOP_SECOND ", 4(30) \n"
2461 "1:bne 0, 1b \n");
2462
2463 if (offset_p)
2464 *offset_p = 28;
2465 if (size_p)
2466 *size_p = 14;
2467 }
2468
2469 /* Goto if stack[--sp] < TOP */
2470
2471 static void
2472 ppc_emit_lt_goto (int *offset_p, int *size_p)
2473 {
2474 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2475 "lwz " TMP_SECOND ", 4(30) \n"
2476 "cmplw 6, 6, 4 \n"
2477 "cmpw 7, 5, 3 \n"
2478 /* CR6 bit 0 = low less and high equal */
2479 "crand 6*4+0, 6*4+0, 7*4+2\n"
2480 /* CR7 bit 0 = (low less and high equal) or high less */
2481 "cror 7*4+0, 7*4+0, 6*4+0\n"
2482 "lwzu " TOP_FIRST ", 8(30) \n"
2483 "lwz " TOP_SECOND ", 4(30)\n"
2484 "1:blt 7, 1b \n");
2485
2486 if (offset_p)
2487 *offset_p = 32;
2488 if (size_p)
2489 *size_p = 14;
2490 }
2491
2492 /* Goto if stack[--sp] <= TOP */
2493
2494 static void
2495 ppc_emit_le_goto (int *offset_p, int *size_p)
2496 {
2497 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2498 "lwz " TMP_SECOND ", 4(30) \n"
2499 "cmplw 6, 6, 4 \n"
2500 "cmpw 7, 5, 3 \n"
2501 /* CR6 bit 0 = low less/equal and high equal */
2502 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2503 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2504 "cror 7*4+0, 7*4+0, 6*4+0\n"
2505 "lwzu " TOP_FIRST ", 8(30) \n"
2506 "lwz " TOP_SECOND ", 4(30)\n"
2507 "1:blt 7, 1b \n");
2508
2509 if (offset_p)
2510 *offset_p = 32;
2511 if (size_p)
2512 *size_p = 14;
2513 }
2514
2515 /* Goto if stack[--sp] > TOP */
2516
2517 static void
2518 ppc_emit_gt_goto (int *offset_p, int *size_p)
2519 {
2520 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2521 "lwz " TMP_SECOND ", 4(30) \n"
2522 "cmplw 6, 6, 4 \n"
2523 "cmpw 7, 5, 3 \n"
2524 /* CR6 bit 0 = low greater and high equal */
2525 "crand 6*4+0, 6*4+1, 7*4+2\n"
2526 /* CR7 bit 0 = (low greater and high equal) or high greater */
2527 "cror 7*4+0, 7*4+1, 6*4+0\n"
2528 "lwzu " TOP_FIRST ", 8(30) \n"
2529 "lwz " TOP_SECOND ", 4(30)\n"
2530 "1:blt 7, 1b \n");
2531
2532 if (offset_p)
2533 *offset_p = 32;
2534 if (size_p)
2535 *size_p = 14;
2536 }
2537
2538 /* Goto if stack[--sp] >= TOP */
2539
2540 static void
2541 ppc_emit_ge_goto (int *offset_p, int *size_p)
2542 {
2543 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2544 "lwz " TMP_SECOND ", 4(30) \n"
2545 "cmplw 6, 6, 4 \n"
2546 "cmpw 7, 5, 3 \n"
2547 /* CR6 bit 0 = low ge and high equal */
2548 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2549 /* CR7 bit 0 = (low ge and high equal) or high greater */
2550 "cror 7*4+0, 7*4+1, 6*4+0\n"
2551 "lwzu " TOP_FIRST ", 8(30)\n"
2552 "lwz " TOP_SECOND ", 4(30)\n"
2553 "1:blt 7, 1b \n");
2554
2555 if (offset_p)
2556 *offset_p = 32;
2557 if (size_p)
2558 *size_p = 14;
2559 }
2560
2561 /* Relocate previous emitted branch instruction. FROM is the address
2562 of the branch instruction, TO is the goto target address, and SIZE
2563 if the value we set by *SIZE_P before. Currently, it is either
2564 24 or 14 of branch and conditional-branch instruction.
2565 Also used for ppc64. */
2566
2567 static void
2568 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2569 {
2570 long rel = to - from;
2571 uint32_t insn;
2572 int opcd;
2573
2574 read_inferior_memory (from, (unsigned char *) &insn, 4);
2575 opcd = (insn >> 26) & 0x3f;
2576
2577 switch (size)
2578 {
2579 case 14:
2580 if (opcd != 16
2581 || (rel >= (1 << 15) || rel < -(1 << 15)))
2582 emit_error = 1;
2583 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2584 break;
2585 case 24:
2586 if (opcd != 18
2587 || (rel >= (1 << 25) || rel < -(1 << 25)))
2588 emit_error = 1;
2589 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2590 break;
2591 default:
2592 emit_error = 1;
2593 }
2594
2595 if (!emit_error)
2596 target_write_memory (from, (unsigned char *) &insn, 4);
2597 }
2598
2599 /* Table of emit ops for 32-bit. */
2600
2601 static struct emit_ops ppc_emit_ops_impl =
2602 {
2603 ppc_emit_prologue,
2604 ppc_emit_epilogue,
2605 ppc_emit_add,
2606 ppc_emit_sub,
2607 ppc_emit_mul,
2608 ppc_emit_lsh,
2609 ppc_emit_rsh_signed,
2610 ppc_emit_rsh_unsigned,
2611 ppc_emit_ext,
2612 ppc_emit_log_not,
2613 ppc_emit_bit_and,
2614 ppc_emit_bit_or,
2615 ppc_emit_bit_xor,
2616 ppc_emit_bit_not,
2617 ppc_emit_equal,
2618 ppc_emit_less_signed,
2619 ppc_emit_less_unsigned,
2620 ppc_emit_ref,
2621 ppc_emit_if_goto,
2622 ppc_emit_goto,
2623 ppc_write_goto_address,
2624 ppc_emit_const,
2625 ppc_emit_call,
2626 ppc_emit_reg,
2627 ppc_emit_pop,
2628 ppc_emit_stack_flush,
2629 ppc_emit_zero_ext,
2630 ppc_emit_swap,
2631 ppc_emit_stack_adjust,
2632 ppc_emit_int_call_1,
2633 ppc_emit_void_call_2,
2634 ppc_emit_eq_goto,
2635 ppc_emit_ne_goto,
2636 ppc_emit_lt_goto,
2637 ppc_emit_le_goto,
2638 ppc_emit_gt_goto,
2639 ppc_emit_ge_goto
2640 };
2641
2642 #ifdef __powerpc64__
2643
2644 /*
2645
2646 Bytecode execution stack frame - 64-bit
2647
2648 | LR save area (SP + 16)
2649 | CR save area (SP + 8)
2650 SP' -> +- Back chain (SP + 0)
2651 | Save r31 for access saved arguments
2652 | Save r30 for bytecode stack pointer
2653 | Save r4 for incoming argument *value
2654 | Save r3 for incoming argument regs
2655 r30 -> +- Bytecode execution stack
2656 |
2657 | 64-byte (8 doublewords) at initial.
2658 | Expand stack as needed.
2659 |
2660 +-
2661 | Some padding for minimum stack frame.
2662 | 112 for ELFv1.
2663 SP +- Back-chain (SP')
2664
2665 initial frame size
2666 = 112 + (4 * 8) + 64
2667 = 208
2668
2669 r30 is the stack-pointer for bytecode machine.
2670 It should point to next-empty, so we can use LDU for pop.
2671 r3 is used for cache of TOP value.
2672 It was the first argument, pointer to regs.
2673 r4 is the second argument, pointer to the result.
2674 We should set *result = TOP after leaving this function.
2675
2676 Note:
2677 * To restore stack at epilogue
2678 => sp = r31
2679 * To check stack is big enough for bytecode execution.
2680 => r30 - 8 > SP + 112
2681 * To return execution result.
2682 => 0(r4) = TOP
2683
2684 */
2685
2686 /* Emit prologue in inferior memory. See above comments. */
2687
2688 static void
2689 ppc64v1_emit_prologue (void)
2690 {
2691 /* On ELFv1, function pointers really point to function descriptor,
2692 so emit one here. We don't care about contents of words 1 and 2,
2693 so let them just overlap out code. */
2694 uint64_t opd = current_insn_ptr + 8;
2695 uint32_t buf[2];
2696
2697 /* Mind the strict aliasing rules. */
2698 memcpy (buf, &opd, sizeof buf);
2699 emit_insns(buf, 2);
2700 EMIT_ASM (/* Save return address. */
2701 "mflr 0 \n"
2702 "std 0, 16(1) \n"
2703 /* Save r30 and incoming arguments. */
2704 "std 31, -8(1) \n"
2705 "std 30, -16(1) \n"
2706 "std 4, -24(1) \n"
2707 "std 3, -32(1) \n"
2708 /* Point r31 to current r1 for access arguments. */
2709 "mr 31, 1 \n"
2710 /* Adjust SP. 208 is the initial frame size. */
2711 "stdu 1, -208(1) \n"
2712 /* Set r30 to pointing stack-top. */
2713 "addi 30, 1, 168 \n"
2714 /* Initial r3/TOP to 0. */
2715 "li 3, 0 \n");
2716 }
2717
2718 /* Emit prologue in inferior memory. See above comments. */
2719
2720 static void
2721 ppc64v2_emit_prologue (void)
2722 {
2723 EMIT_ASM (/* Save return address. */
2724 "mflr 0 \n"
2725 "std 0, 16(1) \n"
2726 /* Save r30 and incoming arguments. */
2727 "std 31, -8(1) \n"
2728 "std 30, -16(1) \n"
2729 "std 4, -24(1) \n"
2730 "std 3, -32(1) \n"
2731 /* Point r31 to current r1 for access arguments. */
2732 "mr 31, 1 \n"
2733 /* Adjust SP. 208 is the initial frame size. */
2734 "stdu 1, -208(1) \n"
2735 /* Set r30 to pointing stack-top. */
2736 "addi 30, 1, 168 \n"
2737 /* Initial r3/TOP to 0. */
2738 "li 3, 0 \n");
2739 }
2740
2741 /* Emit epilogue in inferior memory. See above comments. */
2742
2743 static void
2744 ppc64_emit_epilogue (void)
2745 {
2746 EMIT_ASM (/* Restore SP. */
2747 "ld 1, 0(1) \n"
2748 /* *result = TOP */
2749 "ld 4, -24(1) \n"
2750 "std 3, 0(4) \n"
2751 /* Restore registers. */
2752 "ld 31, -8(1) \n"
2753 "ld 30, -16(1) \n"
2754 /* Restore LR. */
2755 "ld 0, 16(1) \n"
2756 /* Return 0 for no-error. */
2757 "li 3, 0 \n"
2758 "mtlr 0 \n"
2759 "blr \n");
2760 }
2761
2762 /* TOP = stack[--sp] + TOP */
2763
2764 static void
2765 ppc64_emit_add (void)
2766 {
2767 EMIT_ASM ("ldu 4, 8(30) \n"
2768 "add 3, 4, 3 \n");
2769 }
2770
2771 /* TOP = stack[--sp] - TOP */
2772
2773 static void
2774 ppc64_emit_sub (void)
2775 {
2776 EMIT_ASM ("ldu 4, 8(30) \n"
2777 "sub 3, 4, 3 \n");
2778 }
2779
2780 /* TOP = stack[--sp] * TOP */
2781
2782 static void
2783 ppc64_emit_mul (void)
2784 {
2785 EMIT_ASM ("ldu 4, 8(30) \n"
2786 "mulld 3, 4, 3 \n");
2787 }
2788
2789 /* TOP = stack[--sp] << TOP */
2790
2791 static void
2792 ppc64_emit_lsh (void)
2793 {
2794 EMIT_ASM ("ldu 4, 8(30) \n"
2795 "sld 3, 4, 3 \n");
2796 }
2797
2798 /* Top = stack[--sp] >> TOP
2799 (Arithmetic shift right) */
2800
2801 static void
2802 ppc64_emit_rsh_signed (void)
2803 {
2804 EMIT_ASM ("ldu 4, 8(30) \n"
2805 "srad 3, 4, 3 \n");
2806 }
2807
2808 /* Top = stack[--sp] >> TOP
2809 (Logical shift right) */
2810
2811 static void
2812 ppc64_emit_rsh_unsigned (void)
2813 {
2814 EMIT_ASM ("ldu 4, 8(30) \n"
2815 "srd 3, 4, 3 \n");
2816 }
2817
2818 /* Emit code for signed-extension specified by ARG. */
2819
2820 static void
2821 ppc64_emit_ext (int arg)
2822 {
2823 switch (arg)
2824 {
2825 case 8:
2826 EMIT_ASM ("extsb 3, 3");
2827 break;
2828 case 16:
2829 EMIT_ASM ("extsh 3, 3");
2830 break;
2831 case 32:
2832 EMIT_ASM ("extsw 3, 3");
2833 break;
2834 default:
2835 emit_error = 1;
2836 }
2837 }
2838
2839 /* Emit code for zero-extension specified by ARG. */
2840
2841 static void
2842 ppc64_emit_zero_ext (int arg)
2843 {
2844 switch (arg)
2845 {
2846 case 8:
2847 EMIT_ASM ("rldicl 3,3,0,56");
2848 break;
2849 case 16:
2850 EMIT_ASM ("rldicl 3,3,0,48");
2851 break;
2852 case 32:
2853 EMIT_ASM ("rldicl 3,3,0,32");
2854 break;
2855 default:
2856 emit_error = 1;
2857 }
2858 }
2859
2860 /* TOP = !TOP
2861 i.e., TOP = (TOP == 0) ? 1 : 0; */
2862
2863 static void
2864 ppc64_emit_log_not (void)
2865 {
2866 EMIT_ASM ("cntlzd 3, 3 \n"
2867 "srdi 3, 3, 6 \n");
2868 }
2869
2870 /* TOP = stack[--sp] & TOP */
2871
2872 static void
2873 ppc64_emit_bit_and (void)
2874 {
2875 EMIT_ASM ("ldu 4, 8(30) \n"
2876 "and 3, 4, 3 \n");
2877 }
2878
2879 /* TOP = stack[--sp] | TOP */
2880
2881 static void
2882 ppc64_emit_bit_or (void)
2883 {
2884 EMIT_ASM ("ldu 4, 8(30) \n"
2885 "or 3, 4, 3 \n");
2886 }
2887
2888 /* TOP = stack[--sp] ^ TOP */
2889
2890 static void
2891 ppc64_emit_bit_xor (void)
2892 {
2893 EMIT_ASM ("ldu 4, 8(30) \n"
2894 "xor 3, 4, 3 \n");
2895 }
2896
2897 /* TOP = ~TOP
2898 i.e., TOP = ~(TOP | TOP) */
2899
2900 static void
2901 ppc64_emit_bit_not (void)
2902 {
2903 EMIT_ASM ("nor 3, 3, 3 \n");
2904 }
2905
2906 /* TOP = stack[--sp] == TOP */
2907
2908 static void
2909 ppc64_emit_equal (void)
2910 {
2911 EMIT_ASM ("ldu 4, 8(30) \n"
2912 "xor 3, 3, 4 \n"
2913 "cntlzd 3, 3 \n"
2914 "srdi 3, 3, 6 \n");
2915 }
2916
2917 /* TOP = stack[--sp] < TOP
2918 (Signed comparison) */
2919
2920 static void
2921 ppc64_emit_less_signed (void)
2922 {
2923 EMIT_ASM ("ldu 4, 8(30) \n"
2924 "cmpd 7, 4, 3 \n"
2925 "mfcr 3 \n"
2926 "rlwinm 3, 3, 29, 31, 31 \n");
2927 }
2928
2929 /* TOP = stack[--sp] < TOP
2930 (Unsigned comparison) */
2931
2932 static void
2933 ppc64_emit_less_unsigned (void)
2934 {
2935 EMIT_ASM ("ldu 4, 8(30) \n"
2936 "cmpld 7, 4, 3 \n"
2937 "mfcr 3 \n"
2938 "rlwinm 3, 3, 29, 31, 31 \n");
2939 }
2940
2941 /* Access the memory address in TOP in size of SIZE.
2942 Zero-extend the read value. */
2943
2944 static void
2945 ppc64_emit_ref (int size)
2946 {
2947 switch (size)
2948 {
2949 case 1:
2950 EMIT_ASM ("lbz 3, 0(3)");
2951 break;
2952 case 2:
2953 EMIT_ASM ("lhz 3, 0(3)");
2954 break;
2955 case 4:
2956 EMIT_ASM ("lwz 3, 0(3)");
2957 break;
2958 case 8:
2959 EMIT_ASM ("ld 3, 0(3)");
2960 break;
2961 }
2962 }
2963
2964 /* TOP = NUM */
2965
2966 static void
2967 ppc64_emit_const (LONGEST num)
2968 {
2969 uint32_t buf[5];
2970 uint32_t *p = buf;
2971
2972 p += gen_limm (p, 3, num, 1);
2973
2974 emit_insns (buf, p - buf);
2975 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2976 }
2977
2978 /* Set TOP to the value of register REG by calling get_raw_reg function
2979 with two argument, collected buffer and register number. */
2980
2981 static void
2982 ppc64v1_emit_reg (int reg)
2983 {
2984 uint32_t buf[15];
2985 uint32_t *p = buf;
2986
2987 /* fctx->regs is passed in r3 and then saved in 176(1). */
2988 p += GEN_LD (p, 3, 31, -32);
2989 p += GEN_LI (p, 4, reg);
2990 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2991 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2992 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2993
2994 emit_insns (buf, p - buf);
2995 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2996 }
2997
2998 /* Likewise, for ELFv2. */
2999
3000 static void
3001 ppc64v2_emit_reg (int reg)
3002 {
3003 uint32_t buf[12];
3004 uint32_t *p = buf;
3005
3006 /* fctx->regs is passed in r3 and then saved in 176(1). */
3007 p += GEN_LD (p, 3, 31, -32);
3008 p += GEN_LI (p, 4, reg);
3009 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3010 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
3011 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3012
3013 emit_insns (buf, p - buf);
3014 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3015 }
3016
3017 /* TOP = stack[--sp] */
3018
3019 static void
3020 ppc64_emit_pop (void)
3021 {
3022 EMIT_ASM ("ldu 3, 8(30)");
3023 }
3024
3025 /* stack[sp++] = TOP
3026
3027 Because we may use up bytecode stack, expand 8 doublewords more
3028 if needed. */
3029
3030 static void
3031 ppc64_emit_stack_flush (void)
3032 {
3033 /* Make sure bytecode stack is big enough before push.
3034 Otherwise, expand 64-byte more. */
3035
3036 EMIT_ASM (" std 3, 0(30) \n"
3037 " addi 4, 30, -(112 + 8) \n"
3038 " cmpd 7, 4, 1 \n"
3039 " bgt 7, 1f \n"
3040 " stdu 31, -64(1) \n"
3041 "1:addi 30, 30, -8 \n");
3042 }
3043
3044 /* Swap TOP and stack[sp-1] */
3045
3046 static void
3047 ppc64_emit_swap (void)
3048 {
3049 EMIT_ASM ("ld 4, 8(30) \n"
3050 "std 3, 8(30) \n"
3051 "mr 3, 4 \n");
3052 }
3053
3054 /* Call function FN - ELFv1. */
3055
3056 static void
3057 ppc64v1_emit_call (CORE_ADDR fn)
3058 {
3059 uint32_t buf[13];
3060 uint32_t *p = buf;
3061
3062 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3063 p += gen_call (p, fn, 1, 1);
3064 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3065
3066 emit_insns (buf, p - buf);
3067 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3068 }
3069
3070 /* Call function FN - ELFv2. */
3071
3072 static void
3073 ppc64v2_emit_call (CORE_ADDR fn)
3074 {
3075 uint32_t buf[10];
3076 uint32_t *p = buf;
3077
3078 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3079 p += gen_call (p, fn, 1, 0);
3080 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3081
3082 emit_insns (buf, p - buf);
3083 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3084 }
3085
3086 /* FN's prototype is `LONGEST(*fn)(int)'.
3087 TOP = fn (arg1)
3088 */
3089
3090 static void
3091 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
3092 {
3093 uint32_t buf[13];
3094 uint32_t *p = buf;
3095
3096 /* Setup argument. arg1 is a 16-bit value. */
3097 p += gen_limm (p, 3, arg1, 1);
3098 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3099 p += gen_call (p, fn, 1, 1);
3100 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3101
3102 emit_insns (buf, p - buf);
3103 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3104 }
3105
3106 /* Likewise for ELFv2. */
3107
3108 static void
3109 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
3110 {
3111 uint32_t buf[10];
3112 uint32_t *p = buf;
3113
3114 /* Setup argument. arg1 is a 16-bit value. */
3115 p += gen_limm (p, 3, arg1, 1);
3116 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3117 p += gen_call (p, fn, 1, 0);
3118 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3119
3120 emit_insns (buf, p - buf);
3121 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3122 }
3123
3124 /* FN's prototype is `void(*fn)(int,LONGEST)'.
3125 fn (arg1, TOP)
3126
3127 TOP should be preserved/restored before/after the call. */
3128
3129 static void
3130 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
3131 {
3132 uint32_t buf[17];
3133 uint32_t *p = buf;
3134
3135 /* Save TOP. 0(30) is next-empty. */
3136 p += GEN_STD (p, 3, 30, 0);
3137
3138 /* Setup argument. arg1 is a 16-bit value. */
3139 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3140 p += gen_limm (p, 3, arg1, 1);
3141 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
3142 p += gen_call (p, fn, 1, 1);
3143 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
3144
3145 /* Restore TOP */
3146 p += GEN_LD (p, 3, 30, 0);
3147
3148 emit_insns (buf, p - buf);
3149 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3150 }
3151
3152 /* Likewise for ELFv2. */
3153
3154 static void
3155 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
3156 {
3157 uint32_t buf[14];
3158 uint32_t *p = buf;
3159
3160 /* Save TOP. 0(30) is next-empty. */
3161 p += GEN_STD (p, 3, 30, 0);
3162
3163 /* Setup argument. arg1 is a 16-bit value. */
3164 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
3165 p += gen_limm (p, 3, arg1, 1);
3166 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
3167 p += gen_call (p, fn, 1, 0);
3168 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
3169
3170 /* Restore TOP */
3171 p += GEN_LD (p, 3, 30, 0);
3172
3173 emit_insns (buf, p - buf);
3174 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
3175 }
3176
3177 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
3178
3179 static void
3180 ppc64_emit_if_goto (int *offset_p, int *size_p)
3181 {
3182 EMIT_ASM ("cmpdi 7, 3, 0 \n"
3183 "ldu 3, 8(30) \n"
3184 "1:bne 7, 1b \n");
3185
3186 if (offset_p)
3187 *offset_p = 8;
3188 if (size_p)
3189 *size_p = 14;
3190 }
3191
3192 /* Goto if stack[--sp] == TOP */
3193
3194 static void
3195 ppc64_emit_eq_goto (int *offset_p, int *size_p)
3196 {
3197 EMIT_ASM ("ldu 4, 8(30) \n"
3198 "cmpd 7, 4, 3 \n"
3199 "ldu 3, 8(30) \n"
3200 "1:beq 7, 1b \n");
3201
3202 if (offset_p)
3203 *offset_p = 12;
3204 if (size_p)
3205 *size_p = 14;
3206 }
3207
3208 /* Goto if stack[--sp] != TOP */
3209
3210 static void
3211 ppc64_emit_ne_goto (int *offset_p, int *size_p)
3212 {
3213 EMIT_ASM ("ldu 4, 8(30) \n"
3214 "cmpd 7, 4, 3 \n"
3215 "ldu 3, 8(30) \n"
3216 "1:bne 7, 1b \n");
3217
3218 if (offset_p)
3219 *offset_p = 12;
3220 if (size_p)
3221 *size_p = 14;
3222 }
3223
3224 /* Goto if stack[--sp] < TOP */
3225
3226 static void
3227 ppc64_emit_lt_goto (int *offset_p, int *size_p)
3228 {
3229 EMIT_ASM ("ldu 4, 8(30) \n"
3230 "cmpd 7, 4, 3 \n"
3231 "ldu 3, 8(30) \n"
3232 "1:blt 7, 1b \n");
3233
3234 if (offset_p)
3235 *offset_p = 12;
3236 if (size_p)
3237 *size_p = 14;
3238 }
3239
3240 /* Goto if stack[--sp] <= TOP */
3241
3242 static void
3243 ppc64_emit_le_goto (int *offset_p, int *size_p)
3244 {
3245 EMIT_ASM ("ldu 4, 8(30) \n"
3246 "cmpd 7, 4, 3 \n"
3247 "ldu 3, 8(30) \n"
3248 "1:ble 7, 1b \n");
3249
3250 if (offset_p)
3251 *offset_p = 12;
3252 if (size_p)
3253 *size_p = 14;
3254 }
3255
3256 /* Goto if stack[--sp] > TOP */
3257
3258 static void
3259 ppc64_emit_gt_goto (int *offset_p, int *size_p)
3260 {
3261 EMIT_ASM ("ldu 4, 8(30) \n"
3262 "cmpd 7, 4, 3 \n"
3263 "ldu 3, 8(30) \n"
3264 "1:bgt 7, 1b \n");
3265
3266 if (offset_p)
3267 *offset_p = 12;
3268 if (size_p)
3269 *size_p = 14;
3270 }
3271
3272 /* Goto if stack[--sp] >= TOP */
3273
3274 static void
3275 ppc64_emit_ge_goto (int *offset_p, int *size_p)
3276 {
3277 EMIT_ASM ("ldu 4, 8(30) \n"
3278 "cmpd 7, 4, 3 \n"
3279 "ldu 3, 8(30) \n"
3280 "1:bge 7, 1b \n");
3281
3282 if (offset_p)
3283 *offset_p = 12;
3284 if (size_p)
3285 *size_p = 14;
3286 }
3287
3288 /* Table of emit ops for 64-bit ELFv1. */
3289
3290 static struct emit_ops ppc64v1_emit_ops_impl =
3291 {
3292 ppc64v1_emit_prologue,
3293 ppc64_emit_epilogue,
3294 ppc64_emit_add,
3295 ppc64_emit_sub,
3296 ppc64_emit_mul,
3297 ppc64_emit_lsh,
3298 ppc64_emit_rsh_signed,
3299 ppc64_emit_rsh_unsigned,
3300 ppc64_emit_ext,
3301 ppc64_emit_log_not,
3302 ppc64_emit_bit_and,
3303 ppc64_emit_bit_or,
3304 ppc64_emit_bit_xor,
3305 ppc64_emit_bit_not,
3306 ppc64_emit_equal,
3307 ppc64_emit_less_signed,
3308 ppc64_emit_less_unsigned,
3309 ppc64_emit_ref,
3310 ppc64_emit_if_goto,
3311 ppc_emit_goto,
3312 ppc_write_goto_address,
3313 ppc64_emit_const,
3314 ppc64v1_emit_call,
3315 ppc64v1_emit_reg,
3316 ppc64_emit_pop,
3317 ppc64_emit_stack_flush,
3318 ppc64_emit_zero_ext,
3319 ppc64_emit_swap,
3320 ppc_emit_stack_adjust,
3321 ppc64v1_emit_int_call_1,
3322 ppc64v1_emit_void_call_2,
3323 ppc64_emit_eq_goto,
3324 ppc64_emit_ne_goto,
3325 ppc64_emit_lt_goto,
3326 ppc64_emit_le_goto,
3327 ppc64_emit_gt_goto,
3328 ppc64_emit_ge_goto
3329 };
3330
3331 /* Table of emit ops for 64-bit ELFv2. */
3332
3333 static struct emit_ops ppc64v2_emit_ops_impl =
3334 {
3335 ppc64v2_emit_prologue,
3336 ppc64_emit_epilogue,
3337 ppc64_emit_add,
3338 ppc64_emit_sub,
3339 ppc64_emit_mul,
3340 ppc64_emit_lsh,
3341 ppc64_emit_rsh_signed,
3342 ppc64_emit_rsh_unsigned,
3343 ppc64_emit_ext,
3344 ppc64_emit_log_not,
3345 ppc64_emit_bit_and,
3346 ppc64_emit_bit_or,
3347 ppc64_emit_bit_xor,
3348 ppc64_emit_bit_not,
3349 ppc64_emit_equal,
3350 ppc64_emit_less_signed,
3351 ppc64_emit_less_unsigned,
3352 ppc64_emit_ref,
3353 ppc64_emit_if_goto,
3354 ppc_emit_goto,
3355 ppc_write_goto_address,
3356 ppc64_emit_const,
3357 ppc64v2_emit_call,
3358 ppc64v2_emit_reg,
3359 ppc64_emit_pop,
3360 ppc64_emit_stack_flush,
3361 ppc64_emit_zero_ext,
3362 ppc64_emit_swap,
3363 ppc_emit_stack_adjust,
3364 ppc64v2_emit_int_call_1,
3365 ppc64v2_emit_void_call_2,
3366 ppc64_emit_eq_goto,
3367 ppc64_emit_ne_goto,
3368 ppc64_emit_lt_goto,
3369 ppc64_emit_le_goto,
3370 ppc64_emit_gt_goto,
3371 ppc64_emit_ge_goto
3372 };
3373
3374 #endif
3375
3376 /* Implementation of target ops method "emit_ops". */
3377
3378 emit_ops *
3379 ppc_target::emit_ops ()
3380 {
3381 #ifdef __powerpc64__
3382 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3383
3384 if (register_size (regcache->tdesc, 0) == 8)
3385 {
3386 if (is_elfv2_inferior ())
3387 return &ppc64v2_emit_ops_impl;
3388 else
3389 return &ppc64v1_emit_ops_impl;
3390 }
3391 #endif
3392 return &ppc_emit_ops_impl;
3393 }
3394
3395 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3396
3397 static int
3398 ppc_get_ipa_tdesc_idx (void)
3399 {
3400 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3401 const struct target_desc *tdesc = regcache->tdesc;
3402
3403 #ifdef __powerpc64__
3404 if (tdesc == tdesc_powerpc_64l)
3405 return PPC_TDESC_BASE;
3406 if (tdesc == tdesc_powerpc_altivec64l)
3407 return PPC_TDESC_ALTIVEC;
3408 if (tdesc == tdesc_powerpc_vsx64l)
3409 return PPC_TDESC_VSX;
3410 if (tdesc == tdesc_powerpc_isa205_64l)
3411 return PPC_TDESC_ISA205;
3412 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3413 return PPC_TDESC_ISA205_ALTIVEC;
3414 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3415 return PPC_TDESC_ISA205_VSX;
3416 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx64l)
3417 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3418 if (tdesc == tdesc_powerpc_isa207_vsx64l)
3419 return PPC_TDESC_ISA207_VSX;
3420 if (tdesc == tdesc_powerpc_isa207_htm_vsx64l)
3421 return PPC_TDESC_ISA207_HTM_VSX;
3422 #endif
3423
3424 if (tdesc == tdesc_powerpc_32l)
3425 return PPC_TDESC_BASE;
3426 if (tdesc == tdesc_powerpc_altivec32l)
3427 return PPC_TDESC_ALTIVEC;
3428 if (tdesc == tdesc_powerpc_vsx32l)
3429 return PPC_TDESC_VSX;
3430 if (tdesc == tdesc_powerpc_isa205_32l)
3431 return PPC_TDESC_ISA205;
3432 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3433 return PPC_TDESC_ISA205_ALTIVEC;
3434 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3435 return PPC_TDESC_ISA205_VSX;
3436 if (tdesc == tdesc_powerpc_isa205_ppr_dscr_vsx32l)
3437 return PPC_TDESC_ISA205_PPR_DSCR_VSX;
3438 if (tdesc == tdesc_powerpc_isa207_vsx32l)
3439 return PPC_TDESC_ISA207_VSX;
3440 if (tdesc == tdesc_powerpc_isa207_htm_vsx32l)
3441 return PPC_TDESC_ISA207_HTM_VSX;
3442 if (tdesc == tdesc_powerpc_e500l)
3443 return PPC_TDESC_E500;
3444
3445 return 0;
3446 }
3447
3448 struct linux_target_ops the_low_target = {
3449 ppc_get_ipa_tdesc_idx,
3450 };
3451
3452 /* The linux target ops object. */
3453
3454 linux_process_target *the_linux_target = &the_ppc_target;
3455
3456 void
3457 initialize_low_arch (void)
3458 {
3459 /* Initialize the Linux target descriptions. */
3460
3461 init_registers_powerpc_32l ();
3462 init_registers_powerpc_altivec32l ();
3463 init_registers_powerpc_vsx32l ();
3464 init_registers_powerpc_isa205_32l ();
3465 init_registers_powerpc_isa205_altivec32l ();
3466 init_registers_powerpc_isa205_vsx32l ();
3467 init_registers_powerpc_isa205_ppr_dscr_vsx32l ();
3468 init_registers_powerpc_isa207_vsx32l ();
3469 init_registers_powerpc_isa207_htm_vsx32l ();
3470 init_registers_powerpc_e500l ();
3471 #if __powerpc64__
3472 init_registers_powerpc_64l ();
3473 init_registers_powerpc_altivec64l ();
3474 init_registers_powerpc_vsx64l ();
3475 init_registers_powerpc_isa205_64l ();
3476 init_registers_powerpc_isa205_altivec64l ();
3477 init_registers_powerpc_isa205_vsx64l ();
3478 init_registers_powerpc_isa205_ppr_dscr_vsx64l ();
3479 init_registers_powerpc_isa207_vsx64l ();
3480 init_registers_powerpc_isa207_htm_vsx64l ();
3481 #endif
3482
3483 initialize_regsets_info (&ppc_regsets_info);
3484 }
This page took 0.18554 seconds and 3 git commands to generate.