[PowerPC] Consolidate linux vector regset sizes
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-ppc-low.c
1 /* GNU/Linux/PowerPC specific low level interface, for the remote server for
2 GDB.
3 Copyright (C) 1995-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <elf.h>
24 #include <asm/ptrace.h>
25
26 #include "arch/ppc-linux-common.h"
27 #include "arch/ppc-linux-tdesc.h"
28 #include "nat/ppc-linux.h"
29 #include "linux-ppc-tdesc-init.h"
30 #include "ax.h"
31 #include "tracepoint.h"
32
33 #define PPC_FIELD(value, from, len) \
34 (((value) >> (32 - (from) - (len))) & ((1 << (len)) - 1))
35 #define PPC_SEXT(v, bs) \
36 ((((CORE_ADDR) (v) & (((CORE_ADDR) 1 << (bs)) - 1)) \
37 ^ ((CORE_ADDR) 1 << ((bs) - 1))) \
38 - ((CORE_ADDR) 1 << ((bs) - 1)))
39 #define PPC_OP6(insn) PPC_FIELD (insn, 0, 6)
40 #define PPC_BO(insn) PPC_FIELD (insn, 6, 5)
41 #define PPC_LI(insn) (PPC_SEXT (PPC_FIELD (insn, 6, 24), 24) << 2)
42 #define PPC_BD(insn) (PPC_SEXT (PPC_FIELD (insn, 16, 14), 14) << 2)
43
44 static unsigned long ppc_hwcap;
45
46
47 #define ppc_num_regs 73
48
49 #ifdef __powerpc64__
50 /* We use a constant for FPSCR instead of PT_FPSCR, because
51 many shipped PPC64 kernels had the wrong value in ptrace.h. */
52 static int ppc_regmap[] =
53 {PT_R0 * 8, PT_R1 * 8, PT_R2 * 8, PT_R3 * 8,
54 PT_R4 * 8, PT_R5 * 8, PT_R6 * 8, PT_R7 * 8,
55 PT_R8 * 8, PT_R9 * 8, PT_R10 * 8, PT_R11 * 8,
56 PT_R12 * 8, PT_R13 * 8, PT_R14 * 8, PT_R15 * 8,
57 PT_R16 * 8, PT_R17 * 8, PT_R18 * 8, PT_R19 * 8,
58 PT_R20 * 8, PT_R21 * 8, PT_R22 * 8, PT_R23 * 8,
59 PT_R24 * 8, PT_R25 * 8, PT_R26 * 8, PT_R27 * 8,
60 PT_R28 * 8, PT_R29 * 8, PT_R30 * 8, PT_R31 * 8,
61 PT_FPR0*8, PT_FPR0*8 + 8, PT_FPR0*8+16, PT_FPR0*8+24,
62 PT_FPR0*8+32, PT_FPR0*8+40, PT_FPR0*8+48, PT_FPR0*8+56,
63 PT_FPR0*8+64, PT_FPR0*8+72, PT_FPR0*8+80, PT_FPR0*8+88,
64 PT_FPR0*8+96, PT_FPR0*8+104, PT_FPR0*8+112, PT_FPR0*8+120,
65 PT_FPR0*8+128, PT_FPR0*8+136, PT_FPR0*8+144, PT_FPR0*8+152,
66 PT_FPR0*8+160, PT_FPR0*8+168, PT_FPR0*8+176, PT_FPR0*8+184,
67 PT_FPR0*8+192, PT_FPR0*8+200, PT_FPR0*8+208, PT_FPR0*8+216,
68 PT_FPR0*8+224, PT_FPR0*8+232, PT_FPR0*8+240, PT_FPR0*8+248,
69 PT_NIP * 8, PT_MSR * 8, PT_CCR * 8, PT_LNK * 8,
70 PT_CTR * 8, PT_XER * 8, PT_FPR0*8 + 256,
71 PT_ORIG_R3 * 8, PT_TRAP * 8 };
72 #else
73 /* Currently, don't check/send MQ. */
74 static int ppc_regmap[] =
75 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
76 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
77 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
78 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
79 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
80 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
81 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
82 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
83 PT_FPR0*4, PT_FPR0*4 + 8, PT_FPR0*4+16, PT_FPR0*4+24,
84 PT_FPR0*4+32, PT_FPR0*4+40, PT_FPR0*4+48, PT_FPR0*4+56,
85 PT_FPR0*4+64, PT_FPR0*4+72, PT_FPR0*4+80, PT_FPR0*4+88,
86 PT_FPR0*4+96, PT_FPR0*4+104, PT_FPR0*4+112, PT_FPR0*4+120,
87 PT_FPR0*4+128, PT_FPR0*4+136, PT_FPR0*4+144, PT_FPR0*4+152,
88 PT_FPR0*4+160, PT_FPR0*4+168, PT_FPR0*4+176, PT_FPR0*4+184,
89 PT_FPR0*4+192, PT_FPR0*4+200, PT_FPR0*4+208, PT_FPR0*4+216,
90 PT_FPR0*4+224, PT_FPR0*4+232, PT_FPR0*4+240, PT_FPR0*4+248,
91 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
92 PT_CTR * 4, PT_XER * 4, PT_FPSCR * 4,
93 PT_ORIG_R3 * 4, PT_TRAP * 4
94 };
95
96 static int ppc_regmap_e500[] =
97 {PT_R0 * 4, PT_R1 * 4, PT_R2 * 4, PT_R3 * 4,
98 PT_R4 * 4, PT_R5 * 4, PT_R6 * 4, PT_R7 * 4,
99 PT_R8 * 4, PT_R9 * 4, PT_R10 * 4, PT_R11 * 4,
100 PT_R12 * 4, PT_R13 * 4, PT_R14 * 4, PT_R15 * 4,
101 PT_R16 * 4, PT_R17 * 4, PT_R18 * 4, PT_R19 * 4,
102 PT_R20 * 4, PT_R21 * 4, PT_R22 * 4, PT_R23 * 4,
103 PT_R24 * 4, PT_R25 * 4, PT_R26 * 4, PT_R27 * 4,
104 PT_R28 * 4, PT_R29 * 4, PT_R30 * 4, PT_R31 * 4,
105 -1, -1, -1, -1,
106 -1, -1, -1, -1,
107 -1, -1, -1, -1,
108 -1, -1, -1, -1,
109 -1, -1, -1, -1,
110 -1, -1, -1, -1,
111 -1, -1, -1, -1,
112 -1, -1, -1, -1,
113 PT_NIP * 4, PT_MSR * 4, PT_CCR * 4, PT_LNK * 4,
114 PT_CTR * 4, PT_XER * 4, -1,
115 PT_ORIG_R3 * 4, PT_TRAP * 4
116 };
117 #endif
118
119 static int
120 ppc_cannot_store_register (int regno)
121 {
122 const struct target_desc *tdesc = current_process ()->tdesc;
123
124 #ifndef __powerpc64__
125 /* Some kernels do not allow us to store fpscr. */
126 if (!(ppc_hwcap & PPC_FEATURE_HAS_SPE)
127 && regno == find_regno (tdesc, "fpscr"))
128 return 2;
129 #endif
130
131 /* Some kernels do not allow us to store orig_r3 or trap. */
132 if (regno == find_regno (tdesc, "orig_r3")
133 || regno == find_regno (tdesc, "trap"))
134 return 2;
135
136 return 0;
137 }
138
139 static int
140 ppc_cannot_fetch_register (int regno)
141 {
142 return 0;
143 }
144
145 static void
146 ppc_collect_ptrace_register (struct regcache *regcache, int regno, char *buf)
147 {
148 memset (buf, 0, sizeof (long));
149
150 if (__BYTE_ORDER == __LITTLE_ENDIAN)
151 {
152 /* Little-endian values always sit at the left end of the buffer. */
153 collect_register (regcache, regno, buf);
154 }
155 else if (__BYTE_ORDER == __BIG_ENDIAN)
156 {
157 /* Big-endian values sit at the right end of the buffer. In case of
158 registers whose sizes are smaller than sizeof (long), we must use a
159 padding to access them correctly. */
160 int size = register_size (regcache->tdesc, regno);
161
162 if (size < sizeof (long))
163 collect_register (regcache, regno, buf + sizeof (long) - size);
164 else
165 collect_register (regcache, regno, buf);
166 }
167 else
168 perror_with_name ("Unexpected byte order");
169 }
170
171 static void
172 ppc_supply_ptrace_register (struct regcache *regcache,
173 int regno, const char *buf)
174 {
175 if (__BYTE_ORDER == __LITTLE_ENDIAN)
176 {
177 /* Little-endian values always sit at the left end of the buffer. */
178 supply_register (regcache, regno, buf);
179 }
180 else if (__BYTE_ORDER == __BIG_ENDIAN)
181 {
182 /* Big-endian values sit at the right end of the buffer. In case of
183 registers whose sizes are smaller than sizeof (long), we must use a
184 padding to access them correctly. */
185 int size = register_size (regcache->tdesc, regno);
186
187 if (size < sizeof (long))
188 supply_register (regcache, regno, buf + sizeof (long) - size);
189 else
190 supply_register (regcache, regno, buf);
191 }
192 else
193 perror_with_name ("Unexpected byte order");
194 }
195
196
197 #define INSTR_SC 0x44000002
198 #define NR_spu_run 0x0116
199
200 /* If the PPU thread is currently stopped on a spu_run system call,
201 return to FD and ADDR the file handle and NPC parameter address
202 used with the system call. Return non-zero if successful. */
203 static int
204 parse_spufs_run (struct regcache *regcache, int *fd, CORE_ADDR *addr)
205 {
206 CORE_ADDR curr_pc;
207 int curr_insn;
208 int curr_r0;
209
210 if (register_size (regcache->tdesc, 0) == 4)
211 {
212 unsigned int pc, r0, r3, r4;
213 collect_register_by_name (regcache, "pc", &pc);
214 collect_register_by_name (regcache, "r0", &r0);
215 collect_register_by_name (regcache, "orig_r3", &r3);
216 collect_register_by_name (regcache, "r4", &r4);
217 curr_pc = (CORE_ADDR) pc;
218 curr_r0 = (int) r0;
219 *fd = (int) r3;
220 *addr = (CORE_ADDR) r4;
221 }
222 else
223 {
224 unsigned long pc, r0, r3, r4;
225 collect_register_by_name (regcache, "pc", &pc);
226 collect_register_by_name (regcache, "r0", &r0);
227 collect_register_by_name (regcache, "orig_r3", &r3);
228 collect_register_by_name (regcache, "r4", &r4);
229 curr_pc = (CORE_ADDR) pc;
230 curr_r0 = (int) r0;
231 *fd = (int) r3;
232 *addr = (CORE_ADDR) r4;
233 }
234
235 /* Fetch instruction preceding current NIP. */
236 if ((*the_target->read_memory) (curr_pc - 4,
237 (unsigned char *) &curr_insn, 4) != 0)
238 return 0;
239 /* It should be a "sc" instruction. */
240 if (curr_insn != INSTR_SC)
241 return 0;
242 /* System call number should be NR_spu_run. */
243 if (curr_r0 != NR_spu_run)
244 return 0;
245
246 return 1;
247 }
248
249 static CORE_ADDR
250 ppc_get_pc (struct regcache *regcache)
251 {
252 CORE_ADDR addr;
253 int fd;
254
255 if (parse_spufs_run (regcache, &fd, &addr))
256 {
257 unsigned int pc;
258 (*the_target->read_memory) (addr, (unsigned char *) &pc, 4);
259 return ((CORE_ADDR)1 << 63)
260 | ((CORE_ADDR)fd << 32) | (CORE_ADDR) (pc - 4);
261 }
262 else if (register_size (regcache->tdesc, 0) == 4)
263 {
264 unsigned int pc;
265 collect_register_by_name (regcache, "pc", &pc);
266 return (CORE_ADDR) pc;
267 }
268 else
269 {
270 unsigned long pc;
271 collect_register_by_name (regcache, "pc", &pc);
272 return (CORE_ADDR) pc;
273 }
274 }
275
276 static void
277 ppc_set_pc (struct regcache *regcache, CORE_ADDR pc)
278 {
279 CORE_ADDR addr;
280 int fd;
281
282 if (parse_spufs_run (regcache, &fd, &addr))
283 {
284 unsigned int newpc = pc;
285 (*the_target->write_memory) (addr, (unsigned char *) &newpc, 4);
286 }
287 else if (register_size (regcache->tdesc, 0) == 4)
288 {
289 unsigned int newpc = pc;
290 supply_register_by_name (regcache, "pc", &newpc);
291 }
292 else
293 {
294 unsigned long newpc = pc;
295 supply_register_by_name (regcache, "pc", &newpc);
296 }
297 }
298
299
300 static int
301 ppc_get_auxv (unsigned long type, unsigned long *valp)
302 {
303 const struct target_desc *tdesc = current_process ()->tdesc;
304 int wordsize = register_size (tdesc, 0);
305 unsigned char *data = (unsigned char *) alloca (2 * wordsize);
306 int offset = 0;
307
308 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
309 {
310 if (wordsize == 4)
311 {
312 unsigned int *data_p = (unsigned int *)data;
313 if (data_p[0] == type)
314 {
315 *valp = data_p[1];
316 return 1;
317 }
318 }
319 else
320 {
321 unsigned long *data_p = (unsigned long *)data;
322 if (data_p[0] == type)
323 {
324 *valp = data_p[1];
325 return 1;
326 }
327 }
328
329 offset += 2 * wordsize;
330 }
331
332 *valp = 0;
333 return 0;
334 }
335
336 #ifndef __powerpc64__
337 static int ppc_regmap_adjusted;
338 #endif
339
340
341 /* Correct in either endianness.
342 This instruction is "twge r2, r2", which GDB uses as a software
343 breakpoint. */
344 static const unsigned int ppc_breakpoint = 0x7d821008;
345 #define ppc_breakpoint_len 4
346
347 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
348
349 static const gdb_byte *
350 ppc_sw_breakpoint_from_kind (int kind, int *size)
351 {
352 *size = ppc_breakpoint_len;
353 return (const gdb_byte *) &ppc_breakpoint;
354 }
355
356 static int
357 ppc_breakpoint_at (CORE_ADDR where)
358 {
359 unsigned int insn;
360
361 if (where & ((CORE_ADDR)1 << 63))
362 {
363 char mem_annex[32];
364 sprintf (mem_annex, "%d/mem", (int)((where >> 32) & 0x7fffffff));
365 (*the_target->qxfer_spu) (mem_annex, (unsigned char *) &insn,
366 NULL, where & 0xffffffff, 4);
367 if (insn == 0x3fff)
368 return 1;
369 }
370 else
371 {
372 (*the_target->read_memory) (where, (unsigned char *) &insn, 4);
373 if (insn == ppc_breakpoint)
374 return 1;
375 /* If necessary, recognize more trap instructions here. GDB only uses
376 the one. */
377 }
378
379 return 0;
380 }
381
382 /* Implement supports_z_point_type target-ops.
383 Returns true if type Z_TYPE breakpoint is supported.
384
385 Handling software breakpoint at server side, so tracepoints
386 and breakpoints can be inserted at the same location. */
387
388 static int
389 ppc_supports_z_point_type (char z_type)
390 {
391 switch (z_type)
392 {
393 case Z_PACKET_SW_BP:
394 return 1;
395 case Z_PACKET_HW_BP:
396 case Z_PACKET_WRITE_WP:
397 case Z_PACKET_ACCESS_WP:
398 default:
399 return 0;
400 }
401 }
402
403 /* Implement insert_point target-ops.
404 Returns 0 on success, -1 on failure and 1 on unsupported. */
405
406 static int
407 ppc_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
408 int size, struct raw_breakpoint *bp)
409 {
410 switch (type)
411 {
412 case raw_bkpt_type_sw:
413 return insert_memory_breakpoint (bp);
414
415 case raw_bkpt_type_hw:
416 case raw_bkpt_type_write_wp:
417 case raw_bkpt_type_access_wp:
418 default:
419 /* Unsupported. */
420 return 1;
421 }
422 }
423
424 /* Implement remove_point target-ops.
425 Returns 0 on success, -1 on failure and 1 on unsupported. */
426
427 static int
428 ppc_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
429 int size, struct raw_breakpoint *bp)
430 {
431 switch (type)
432 {
433 case raw_bkpt_type_sw:
434 return remove_memory_breakpoint (bp);
435
436 case raw_bkpt_type_hw:
437 case raw_bkpt_type_write_wp:
438 case raw_bkpt_type_access_wp:
439 default:
440 /* Unsupported. */
441 return 1;
442 }
443 }
444
445 /* Provide only a fill function for the general register set. ps_lgetregs
446 will use this for NPTL support. */
447
448 static void ppc_fill_gregset (struct regcache *regcache, void *buf)
449 {
450 int i;
451
452 for (i = 0; i < 32; i++)
453 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
454
455 for (i = 64; i < 70; i++)
456 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
457
458 for (i = 71; i < 73; i++)
459 ppc_collect_ptrace_register (regcache, i, (char *) buf + ppc_regmap[i]);
460 }
461
462 static void
463 ppc_fill_vsxregset (struct regcache *regcache, void *buf)
464 {
465 int i, base;
466 char *regset = (char *) buf;
467
468 base = find_regno (regcache->tdesc, "vs0h");
469 for (i = 0; i < 32; i++)
470 collect_register (regcache, base + i, &regset[i * 8]);
471 }
472
473 static void
474 ppc_store_vsxregset (struct regcache *regcache, const void *buf)
475 {
476 int i, base;
477 const char *regset = (const char *) buf;
478
479 base = find_regno (regcache->tdesc, "vs0h");
480 for (i = 0; i < 32; i++)
481 supply_register (regcache, base + i, &regset[i * 8]);
482 }
483
484 static void
485 ppc_fill_vrregset (struct regcache *regcache, void *buf)
486 {
487 int i, base;
488 char *regset = (char *) buf;
489
490 base = find_regno (regcache->tdesc, "vr0");
491 for (i = 0; i < 32; i++)
492 collect_register (regcache, base + i, &regset[i * 16]);
493
494 collect_register_by_name (regcache, "vscr", &regset[32 * 16 + 12]);
495 collect_register_by_name (regcache, "vrsave", &regset[33 * 16]);
496 }
497
498 static void
499 ppc_store_vrregset (struct regcache *regcache, const void *buf)
500 {
501 int i, base;
502 const char *regset = (const char *) buf;
503
504 base = find_regno (regcache->tdesc, "vr0");
505 for (i = 0; i < 32; i++)
506 supply_register (regcache, base + i, &regset[i * 16]);
507
508 supply_register_by_name (regcache, "vscr", &regset[32 * 16 + 12]);
509 supply_register_by_name (regcache, "vrsave", &regset[33 * 16]);
510 }
511
512 struct gdb_evrregset_t
513 {
514 unsigned long evr[32];
515 unsigned long long acc;
516 unsigned long spefscr;
517 };
518
519 static void
520 ppc_fill_evrregset (struct regcache *regcache, void *buf)
521 {
522 int i, ev0;
523 struct gdb_evrregset_t *regset = (struct gdb_evrregset_t *) buf;
524
525 ev0 = find_regno (regcache->tdesc, "ev0h");
526 for (i = 0; i < 32; i++)
527 collect_register (regcache, ev0 + i, &regset->evr[i]);
528
529 collect_register_by_name (regcache, "acc", &regset->acc);
530 collect_register_by_name (regcache, "spefscr", &regset->spefscr);
531 }
532
533 static void
534 ppc_store_evrregset (struct regcache *regcache, const void *buf)
535 {
536 int i, ev0;
537 const struct gdb_evrregset_t *regset = (const struct gdb_evrregset_t *) buf;
538
539 ev0 = find_regno (regcache->tdesc, "ev0h");
540 for (i = 0; i < 32; i++)
541 supply_register (regcache, ev0 + i, &regset->evr[i]);
542
543 supply_register_by_name (regcache, "acc", &regset->acc);
544 supply_register_by_name (regcache, "spefscr", &regset->spefscr);
545 }
546
547 /* Support for hardware single step. */
548
549 static int
550 ppc_supports_hardware_single_step (void)
551 {
552 return 1;
553 }
554
555 static struct regset_info ppc_regsets[] = {
556 /* List the extra register sets before GENERAL_REGS. That way we will
557 fetch them every time, but still fall back to PTRACE_PEEKUSER for the
558 general registers. Some kernels support these, but not the newer
559 PPC_PTRACE_GETREGS. */
560 { PTRACE_GETVSXREGS, PTRACE_SETVSXREGS, 0, 0, EXTENDED_REGS,
561 ppc_fill_vsxregset, ppc_store_vsxregset },
562 { PTRACE_GETVRREGS, PTRACE_SETVRREGS, 0, 0, EXTENDED_REGS,
563 ppc_fill_vrregset, ppc_store_vrregset },
564 { PTRACE_GETEVRREGS, PTRACE_SETEVRREGS, 0, 0, EXTENDED_REGS,
565 ppc_fill_evrregset, ppc_store_evrregset },
566 { 0, 0, 0, 0, GENERAL_REGS, ppc_fill_gregset, NULL },
567 NULL_REGSET
568 };
569
570 static struct usrregs_info ppc_usrregs_info =
571 {
572 ppc_num_regs,
573 ppc_regmap,
574 };
575
576 static struct regsets_info ppc_regsets_info =
577 {
578 ppc_regsets, /* regsets */
579 0, /* num_regsets */
580 NULL, /* disabled_regsets */
581 };
582
583 static struct regs_info regs_info =
584 {
585 NULL, /* regset_bitmap */
586 &ppc_usrregs_info,
587 &ppc_regsets_info
588 };
589
590 static const struct regs_info *
591 ppc_regs_info (void)
592 {
593 return &regs_info;
594 }
595
596 static void
597 ppc_arch_setup (void)
598 {
599 const struct target_desc *tdesc;
600 struct regset_info *regset;
601 struct ppc_linux_features features = ppc_linux_no_features;
602
603 int tid = lwpid_of (current_thread);
604
605 features.wordsize = ppc_linux_target_wordsize (tid);
606
607 if (features.wordsize == 4)
608 tdesc = tdesc_powerpc_32l;
609 else
610 tdesc = tdesc_powerpc_64l;
611
612 current_process ()->tdesc = tdesc;
613
614 /* The value of current_process ()->tdesc needs to be set for this
615 call. */
616 ppc_get_auxv (AT_HWCAP, &ppc_hwcap);
617
618 features.isa205 = ppc_linux_has_isa205 (ppc_hwcap);
619
620 if (ppc_hwcap & PPC_FEATURE_HAS_VSX)
621 features.vsx = true;
622
623 if (ppc_hwcap & PPC_FEATURE_HAS_ALTIVEC)
624 features.altivec = true;
625
626 if (ppc_hwcap & PPC_FEATURE_CELL)
627 features.cell = true;
628
629 tdesc = ppc_linux_match_description (features);
630
631 /* On 32-bit machines, check for SPE registers.
632 Set the low target's regmap field as appropriately. */
633 #ifndef __powerpc64__
634 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
635 tdesc = tdesc_powerpc_e500l;
636
637 if (!ppc_regmap_adjusted)
638 {
639 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
640 ppc_usrregs_info.regmap = ppc_regmap_e500;
641
642 /* If the FPSCR is 64-bit wide, we need to fetch the whole
643 64-bit slot and not just its second word. The PT_FPSCR
644 supplied in a 32-bit GDB compilation doesn't reflect
645 this. */
646 if (register_size (tdesc, 70) == 8)
647 ppc_regmap[70] = (48 + 2*32) * sizeof (long);
648
649 ppc_regmap_adjusted = 1;
650 }
651 #endif
652
653 current_process ()->tdesc = tdesc;
654
655 for (regset = ppc_regsets; regset->size >= 0; regset++)
656 switch (regset->get_request)
657 {
658 case PTRACE_GETVRREGS:
659 regset->size = features.altivec ? PPC_LINUX_SIZEOF_VRREGSET : 0;
660 break;
661 case PTRACE_GETVSXREGS:
662 regset->size = features.vsx ? PPC_LINUX_SIZEOF_VSXREGSET : 0;
663 break;
664 case PTRACE_GETEVRREGS:
665 if (ppc_hwcap & PPC_FEATURE_HAS_SPE)
666 regset->size = 32 * 4 + 8 + 4;
667 else
668 regset->size = 0;
669 break;
670 default:
671 break;
672 }
673 }
674
675 /* Implementation of linux_target_ops method "supports_tracepoints". */
676
677 static int
678 ppc_supports_tracepoints (void)
679 {
680 return 1;
681 }
682
683 /* Get the thread area address. This is used to recognize which
684 thread is which when tracing with the in-process agent library. We
685 don't read anything from the address, and treat it as opaque; it's
686 the address itself that we assume is unique per-thread. */
687
688 static int
689 ppc_get_thread_area (int lwpid, CORE_ADDR *addr)
690 {
691 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
692 struct thread_info *thr = get_lwp_thread (lwp);
693 struct regcache *regcache = get_thread_regcache (thr, 1);
694 ULONGEST tp = 0;
695
696 #ifdef __powerpc64__
697 if (register_size (regcache->tdesc, 0) == 8)
698 collect_register_by_name (regcache, "r13", &tp);
699 else
700 #endif
701 collect_register_by_name (regcache, "r2", &tp);
702
703 *addr = tp;
704
705 return 0;
706 }
707
708 #ifdef __powerpc64__
709
710 /* Older glibc doesn't provide this. */
711
712 #ifndef EF_PPC64_ABI
713 #define EF_PPC64_ABI 3
714 #endif
715
716 /* Returns 1 if inferior is using ELFv2 ABI. Undefined for 32-bit
717 inferiors. */
718
719 static int
720 is_elfv2_inferior (void)
721 {
722 /* To be used as fallback if we're unable to determine the right result -
723 assume inferior uses the same ABI as gdbserver. */
724 #if _CALL_ELF == 2
725 const int def_res = 1;
726 #else
727 const int def_res = 0;
728 #endif
729 unsigned long phdr;
730 Elf64_Ehdr ehdr;
731
732 if (!ppc_get_auxv (AT_PHDR, &phdr))
733 return def_res;
734
735 /* Assume ELF header is at the beginning of the page where program headers
736 are located. If it doesn't look like one, bail. */
737
738 read_inferior_memory (phdr & ~0xfff, (unsigned char *) &ehdr, sizeof ehdr);
739 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG))
740 return def_res;
741
742 return (ehdr.e_flags & EF_PPC64_ABI) == 2;
743 }
744
745 #endif
746
747 /* Generate a ds-form instruction in BUF and return the number of bytes written
748
749 0 6 11 16 30 32
750 | OPCD | RST | RA | DS |XO| */
751
752 __attribute__((unused)) /* Maybe unused due to conditional compilation. */
753 static int
754 gen_ds_form (uint32_t *buf, int opcd, int rst, int ra, int ds, int xo)
755 {
756 uint32_t insn;
757
758 gdb_assert ((opcd & ~0x3f) == 0);
759 gdb_assert ((rst & ~0x1f) == 0);
760 gdb_assert ((ra & ~0x1f) == 0);
761 gdb_assert ((xo & ~0x3) == 0);
762
763 insn = (rst << 21) | (ra << 16) | (ds & 0xfffc) | (xo & 0x3);
764 *buf = (opcd << 26) | insn;
765 return 1;
766 }
767
768 /* Followings are frequently used ds-form instructions. */
769
770 #define GEN_STD(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 0)
771 #define GEN_STDU(buf, rs, ra, offset) gen_ds_form (buf, 62, rs, ra, offset, 1)
772 #define GEN_LD(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 0)
773 #define GEN_LDU(buf, rt, ra, offset) gen_ds_form (buf, 58, rt, ra, offset, 1)
774
775 /* Generate a d-form instruction in BUF.
776
777 0 6 11 16 32
778 | OPCD | RST | RA | D | */
779
780 static int
781 gen_d_form (uint32_t *buf, int opcd, int rst, int ra, int si)
782 {
783 uint32_t insn;
784
785 gdb_assert ((opcd & ~0x3f) == 0);
786 gdb_assert ((rst & ~0x1f) == 0);
787 gdb_assert ((ra & ~0x1f) == 0);
788
789 insn = (rst << 21) | (ra << 16) | (si & 0xffff);
790 *buf = (opcd << 26) | insn;
791 return 1;
792 }
793
794 /* Followings are frequently used d-form instructions. */
795
796 #define GEN_ADDI(buf, rt, ra, si) gen_d_form (buf, 14, rt, ra, si)
797 #define GEN_ADDIS(buf, rt, ra, si) gen_d_form (buf, 15, rt, ra, si)
798 #define GEN_LI(buf, rt, si) GEN_ADDI (buf, rt, 0, si)
799 #define GEN_LIS(buf, rt, si) GEN_ADDIS (buf, rt, 0, si)
800 #define GEN_ORI(buf, rt, ra, si) gen_d_form (buf, 24, rt, ra, si)
801 #define GEN_ORIS(buf, rt, ra, si) gen_d_form (buf, 25, rt, ra, si)
802 #define GEN_LWZ(buf, rt, ra, si) gen_d_form (buf, 32, rt, ra, si)
803 #define GEN_STW(buf, rt, ra, si) gen_d_form (buf, 36, rt, ra, si)
804 #define GEN_STWU(buf, rt, ra, si) gen_d_form (buf, 37, rt, ra, si)
805
806 /* Generate a xfx-form instruction in BUF and return the number of bytes
807 written.
808
809 0 6 11 21 31 32
810 | OPCD | RST | RI | XO |/| */
811
812 static int
813 gen_xfx_form (uint32_t *buf, int opcd, int rst, int ri, int xo)
814 {
815 uint32_t insn;
816 unsigned int n = ((ri & 0x1f) << 5) | ((ri >> 5) & 0x1f);
817
818 gdb_assert ((opcd & ~0x3f) == 0);
819 gdb_assert ((rst & ~0x1f) == 0);
820 gdb_assert ((xo & ~0x3ff) == 0);
821
822 insn = (rst << 21) | (n << 11) | (xo << 1);
823 *buf = (opcd << 26) | insn;
824 return 1;
825 }
826
827 /* Followings are frequently used xfx-form instructions. */
828
829 #define GEN_MFSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 339)
830 #define GEN_MTSPR(buf, rt, spr) gen_xfx_form (buf, 31, rt, spr, 467)
831 #define GEN_MFCR(buf, rt) gen_xfx_form (buf, 31, rt, 0, 19)
832 #define GEN_MTCR(buf, rt) gen_xfx_form (buf, 31, rt, 0x3cf, 144)
833 #define GEN_SYNC(buf, L, E) gen_xfx_form (buf, 31, L & 0x3, \
834 E & 0xf, 598)
835 #define GEN_LWSYNC(buf) GEN_SYNC (buf, 1, 0)
836
837
838 /* Generate a x-form instruction in BUF and return the number of bytes written.
839
840 0 6 11 16 21 31 32
841 | OPCD | RST | RA | RB | XO |RC| */
842
843 static int
844 gen_x_form (uint32_t *buf, int opcd, int rst, int ra, int rb, int xo, int rc)
845 {
846 uint32_t insn;
847
848 gdb_assert ((opcd & ~0x3f) == 0);
849 gdb_assert ((rst & ~0x1f) == 0);
850 gdb_assert ((ra & ~0x1f) == 0);
851 gdb_assert ((rb & ~0x1f) == 0);
852 gdb_assert ((xo & ~0x3ff) == 0);
853 gdb_assert ((rc & ~1) == 0);
854
855 insn = (rst << 21) | (ra << 16) | (rb << 11) | (xo << 1) | rc;
856 *buf = (opcd << 26) | insn;
857 return 1;
858 }
859
860 /* Followings are frequently used x-form instructions. */
861
862 #define GEN_OR(buf, ra, rs, rb) gen_x_form (buf, 31, rs, ra, rb, 444, 0)
863 #define GEN_MR(buf, ra, rs) GEN_OR (buf, ra, rs, rs)
864 #define GEN_LWARX(buf, rt, ra, rb) gen_x_form (buf, 31, rt, ra, rb, 20, 0)
865 #define GEN_STWCX(buf, rs, ra, rb) gen_x_form (buf, 31, rs, ra, rb, 150, 1)
866 /* Assume bf = cr7. */
867 #define GEN_CMPW(buf, ra, rb) gen_x_form (buf, 31, 28, ra, rb, 0, 0)
868
869
870 /* Generate a md-form instruction in BUF and return the number of bytes written.
871
872 0 6 11 16 21 27 30 31 32
873 | OPCD | RS | RA | sh | mb | XO |sh|Rc| */
874
875 static int
876 gen_md_form (uint32_t *buf, int opcd, int rs, int ra, int sh, int mb,
877 int xo, int rc)
878 {
879 uint32_t insn;
880 unsigned int n = ((mb & 0x1f) << 1) | ((mb >> 5) & 0x1);
881 unsigned int sh0_4 = sh & 0x1f;
882 unsigned int sh5 = (sh >> 5) & 1;
883
884 gdb_assert ((opcd & ~0x3f) == 0);
885 gdb_assert ((rs & ~0x1f) == 0);
886 gdb_assert ((ra & ~0x1f) == 0);
887 gdb_assert ((sh & ~0x3f) == 0);
888 gdb_assert ((mb & ~0x3f) == 0);
889 gdb_assert ((xo & ~0x7) == 0);
890 gdb_assert ((rc & ~0x1) == 0);
891
892 insn = (rs << 21) | (ra << 16) | (sh0_4 << 11) | (n << 5)
893 | (sh5 << 1) | (xo << 2) | (rc & 1);
894 *buf = (opcd << 26) | insn;
895 return 1;
896 }
897
898 /* The following are frequently used md-form instructions. */
899
900 #define GEN_RLDICL(buf, ra, rs ,sh, mb) \
901 gen_md_form (buf, 30, rs, ra, sh, mb, 0, 0)
902 #define GEN_RLDICR(buf, ra, rs ,sh, mb) \
903 gen_md_form (buf, 30, rs, ra, sh, mb, 1, 0)
904
905 /* Generate a i-form instruction in BUF and return the number of bytes written.
906
907 0 6 30 31 32
908 | OPCD | LI |AA|LK| */
909
910 static int
911 gen_i_form (uint32_t *buf, int opcd, int li, int aa, int lk)
912 {
913 uint32_t insn;
914
915 gdb_assert ((opcd & ~0x3f) == 0);
916
917 insn = (li & 0x3fffffc) | (aa & 1) | (lk & 1);
918 *buf = (opcd << 26) | insn;
919 return 1;
920 }
921
922 /* The following are frequently used i-form instructions. */
923
924 #define GEN_B(buf, li) gen_i_form (buf, 18, li, 0, 0)
925 #define GEN_BL(buf, li) gen_i_form (buf, 18, li, 0, 1)
926
927 /* Generate a b-form instruction in BUF and return the number of bytes written.
928
929 0 6 11 16 30 31 32
930 | OPCD | BO | BI | BD |AA|LK| */
931
932 static int
933 gen_b_form (uint32_t *buf, int opcd, int bo, int bi, int bd,
934 int aa, int lk)
935 {
936 uint32_t insn;
937
938 gdb_assert ((opcd & ~0x3f) == 0);
939 gdb_assert ((bo & ~0x1f) == 0);
940 gdb_assert ((bi & ~0x1f) == 0);
941
942 insn = (bo << 21) | (bi << 16) | (bd & 0xfffc) | (aa & 1) | (lk & 1);
943 *buf = (opcd << 26) | insn;
944 return 1;
945 }
946
947 /* The following are frequently used b-form instructions. */
948 /* Assume bi = cr7. */
949 #define GEN_BNE(buf, bd) gen_b_form (buf, 16, 0x4, (7 << 2) | 2, bd, 0 ,0)
950
951 /* GEN_LOAD and GEN_STORE generate 64- or 32-bit load/store for ppc64 or ppc32
952 respectively. They are primary used for save/restore GPRs in jump-pad,
953 not used for bytecode compiling. */
954
955 #ifdef __powerpc64__
956 #define GEN_LOAD(buf, rt, ra, si, is_64) (is_64 ? \
957 GEN_LD (buf, rt, ra, si) : \
958 GEN_LWZ (buf, rt, ra, si))
959 #define GEN_STORE(buf, rt, ra, si, is_64) (is_64 ? \
960 GEN_STD (buf, rt, ra, si) : \
961 GEN_STW (buf, rt, ra, si))
962 #else
963 #define GEN_LOAD(buf, rt, ra, si, is_64) GEN_LWZ (buf, rt, ra, si)
964 #define GEN_STORE(buf, rt, ra, si, is_64) GEN_STW (buf, rt, ra, si)
965 #endif
966
967 /* Generate a sequence of instructions to load IMM in the register REG.
968 Write the instructions in BUF and return the number of bytes written. */
969
970 static int
971 gen_limm (uint32_t *buf, int reg, uint64_t imm, int is_64)
972 {
973 uint32_t *p = buf;
974
975 if ((imm + 32768) < 65536)
976 {
977 /* li reg, imm[15:0] */
978 p += GEN_LI (p, reg, imm);
979 }
980 else if ((imm >> 32) == 0)
981 {
982 /* lis reg, imm[31:16]
983 ori reg, reg, imm[15:0]
984 rldicl reg, reg, 0, 32 */
985 p += GEN_LIS (p, reg, (imm >> 16) & 0xffff);
986 if ((imm & 0xffff) != 0)
987 p += GEN_ORI (p, reg, reg, imm & 0xffff);
988 /* Clear upper 32-bit if sign-bit is set. */
989 if (imm & (1u << 31) && is_64)
990 p += GEN_RLDICL (p, reg, reg, 0, 32);
991 }
992 else
993 {
994 gdb_assert (is_64);
995 /* lis reg, <imm[63:48]>
996 ori reg, reg, <imm[48:32]>
997 rldicr reg, reg, 32, 31
998 oris reg, reg, <imm[31:16]>
999 ori reg, reg, <imm[15:0]> */
1000 p += GEN_LIS (p, reg, ((imm >> 48) & 0xffff));
1001 if (((imm >> 32) & 0xffff) != 0)
1002 p += GEN_ORI (p, reg, reg, ((imm >> 32) & 0xffff));
1003 p += GEN_RLDICR (p, reg, reg, 32, 31);
1004 if (((imm >> 16) & 0xffff) != 0)
1005 p += GEN_ORIS (p, reg, reg, ((imm >> 16) & 0xffff));
1006 if ((imm & 0xffff) != 0)
1007 p += GEN_ORI (p, reg, reg, (imm & 0xffff));
1008 }
1009
1010 return p - buf;
1011 }
1012
1013 /* Generate a sequence for atomically exchange at location LOCK.
1014 This code sequence clobbers r6, r7, r8. LOCK is the location for
1015 the atomic-xchg, OLD_VALUE is expected old value stored in the
1016 location, and R_NEW is a register for the new value. */
1017
1018 static int
1019 gen_atomic_xchg (uint32_t *buf, CORE_ADDR lock, int old_value, int r_new,
1020 int is_64)
1021 {
1022 const int r_lock = 6;
1023 const int r_old = 7;
1024 const int r_tmp = 8;
1025 uint32_t *p = buf;
1026
1027 /*
1028 1: lwarx TMP, 0, LOCK
1029 cmpwi TMP, OLD
1030 bne 1b
1031 stwcx. NEW, 0, LOCK
1032 bne 1b */
1033
1034 p += gen_limm (p, r_lock, lock, is_64);
1035 p += gen_limm (p, r_old, old_value, is_64);
1036
1037 p += GEN_LWARX (p, r_tmp, 0, r_lock);
1038 p += GEN_CMPW (p, r_tmp, r_old);
1039 p += GEN_BNE (p, -8);
1040 p += GEN_STWCX (p, r_new, 0, r_lock);
1041 p += GEN_BNE (p, -16);
1042
1043 return p - buf;
1044 }
1045
1046 /* Generate a sequence of instructions for calling a function
1047 at address of FN. Return the number of bytes are written in BUF. */
1048
1049 static int
1050 gen_call (uint32_t *buf, CORE_ADDR fn, int is_64, int is_opd)
1051 {
1052 uint32_t *p = buf;
1053
1054 /* Must be called by r12 for caller to calculate TOC address. */
1055 p += gen_limm (p, 12, fn, is_64);
1056 if (is_opd)
1057 {
1058 p += GEN_LOAD (p, 11, 12, 16, is_64);
1059 p += GEN_LOAD (p, 2, 12, 8, is_64);
1060 p += GEN_LOAD (p, 12, 12, 0, is_64);
1061 }
1062 p += GEN_MTSPR (p, 12, 9); /* mtctr r12 */
1063 *p++ = 0x4e800421; /* bctrl */
1064
1065 return p - buf;
1066 }
1067
1068 /* Copy the instruction from OLDLOC to *TO, and update *TO to *TO + size
1069 of instruction. This function is used to adjust pc-relative instructions
1070 when copying. */
1071
1072 static void
1073 ppc_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1074 {
1075 uint32_t insn, op6;
1076 long rel, newrel;
1077
1078 read_inferior_memory (oldloc, (unsigned char *) &insn, 4);
1079 op6 = PPC_OP6 (insn);
1080
1081 if (op6 == 18 && (insn & 2) == 0)
1082 {
1083 /* branch && AA = 0 */
1084 rel = PPC_LI (insn);
1085 newrel = (oldloc - *to) + rel;
1086
1087 /* Out of range. Cannot relocate instruction. */
1088 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1089 return;
1090
1091 insn = (insn & ~0x3fffffc) | (newrel & 0x3fffffc);
1092 }
1093 else if (op6 == 16 && (insn & 2) == 0)
1094 {
1095 /* conditional branch && AA = 0 */
1096
1097 /* If the new relocation is too big for even a 26-bit unconditional
1098 branch, there is nothing we can do. Just abort.
1099
1100 Otherwise, if it can be fit in 16-bit conditional branch, just
1101 copy the instruction and relocate the address.
1102
1103 If the it's big for conditional-branch (16-bit), try to invert the
1104 condition and jump with 26-bit branch. For example,
1105
1106 beq .Lgoto
1107 INSN1
1108
1109 =>
1110
1111 bne 1f (+8)
1112 b .Lgoto
1113 1:INSN1
1114
1115 After this transform, we are actually jump from *TO+4 instead of *TO,
1116 so check the relocation again because it will be 1-insn farther then
1117 before if *TO is after OLDLOC.
1118
1119
1120 For BDNZT (or so) is transformed from
1121
1122 bdnzt eq, .Lgoto
1123 INSN1
1124
1125 =>
1126
1127 bdz 1f (+12)
1128 bf eq, 1f (+8)
1129 b .Lgoto
1130 1:INSN1
1131
1132 See also "BO field encodings". */
1133
1134 rel = PPC_BD (insn);
1135 newrel = (oldloc - *to) + rel;
1136
1137 if (newrel < (1 << 15) && newrel >= -(1 << 15))
1138 insn = (insn & ~0xfffc) | (newrel & 0xfffc);
1139 else if ((PPC_BO (insn) & 0x14) == 0x4 || (PPC_BO (insn) & 0x14) == 0x10)
1140 {
1141 newrel -= 4;
1142
1143 /* Out of range. Cannot relocate instruction. */
1144 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1145 return;
1146
1147 if ((PPC_BO (insn) & 0x14) == 0x4)
1148 insn ^= (1 << 24);
1149 else if ((PPC_BO (insn) & 0x14) == 0x10)
1150 insn ^= (1 << 22);
1151
1152 /* Jump over the unconditional branch. */
1153 insn = (insn & ~0xfffc) | 0x8;
1154 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1155 *to += 4;
1156
1157 /* Build a unconditional branch and copy LK bit. */
1158 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1159 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1160 *to += 4;
1161
1162 return;
1163 }
1164 else if ((PPC_BO (insn) & 0x14) == 0)
1165 {
1166 uint32_t bdnz_insn = (16 << 26) | (0x10 << 21) | 12;
1167 uint32_t bf_insn = (16 << 26) | (0x4 << 21) | 8;
1168
1169 newrel -= 8;
1170
1171 /* Out of range. Cannot relocate instruction. */
1172 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1173 return;
1174
1175 /* Copy BI field. */
1176 bf_insn |= (insn & 0x1f0000);
1177
1178 /* Invert condition. */
1179 bdnz_insn |= (insn ^ (1 << 22)) & (1 << 22);
1180 bf_insn |= (insn ^ (1 << 24)) & (1 << 24);
1181
1182 write_inferior_memory (*to, (unsigned char *) &bdnz_insn, 4);
1183 *to += 4;
1184 write_inferior_memory (*to, (unsigned char *) &bf_insn, 4);
1185 *to += 4;
1186
1187 /* Build a unconditional branch and copy LK bit. */
1188 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1189 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1190 *to += 4;
1191
1192 return;
1193 }
1194 else /* (BO & 0x14) == 0x14, branch always. */
1195 {
1196 /* Out of range. Cannot relocate instruction. */
1197 if (newrel >= (1 << 25) || newrel < -(1 << 25))
1198 return;
1199
1200 /* Build a unconditional branch and copy LK bit. */
1201 insn = (18 << 26) | (0x3fffffc & newrel) | (insn & 0x3);
1202 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1203 *to += 4;
1204
1205 return;
1206 }
1207 }
1208
1209 write_inferior_memory (*to, (unsigned char *) &insn, 4);
1210 *to += 4;
1211 }
1212
1213 /* Implement install_fast_tracepoint_jump_pad of target_ops.
1214 See target.h for details. */
1215
1216 static int
1217 ppc_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1218 CORE_ADDR collector,
1219 CORE_ADDR lockaddr,
1220 ULONGEST orig_size,
1221 CORE_ADDR *jump_entry,
1222 CORE_ADDR *trampoline,
1223 ULONGEST *trampoline_size,
1224 unsigned char *jjump_pad_insn,
1225 ULONGEST *jjump_pad_insn_size,
1226 CORE_ADDR *adjusted_insn_addr,
1227 CORE_ADDR *adjusted_insn_addr_end,
1228 char *err)
1229 {
1230 uint32_t buf[256];
1231 uint32_t *p = buf;
1232 int j, offset;
1233 CORE_ADDR buildaddr = *jump_entry;
1234 const CORE_ADDR entryaddr = *jump_entry;
1235 int rsz, min_frame, frame_size, tp_reg;
1236 #ifdef __powerpc64__
1237 struct regcache *regcache = get_thread_regcache (current_thread, 0);
1238 int is_64 = register_size (regcache->tdesc, 0) == 8;
1239 int is_opd = is_64 && !is_elfv2_inferior ();
1240 #else
1241 int is_64 = 0, is_opd = 0;
1242 #endif
1243
1244 #ifdef __powerpc64__
1245 if (is_64)
1246 {
1247 /* Minimum frame size is 32 bytes for ELFv2, and 112 bytes for ELFv1. */
1248 rsz = 8;
1249 min_frame = 112;
1250 frame_size = (40 * rsz) + min_frame;
1251 tp_reg = 13;
1252 }
1253 else
1254 {
1255 #endif
1256 rsz = 4;
1257 min_frame = 16;
1258 frame_size = (40 * rsz) + min_frame;
1259 tp_reg = 2;
1260 #ifdef __powerpc64__
1261 }
1262 #endif
1263
1264 /* Stack frame layout for this jump pad,
1265
1266 High thread_area (r13/r2) |
1267 tpoint - collecting_t obj
1268 PC/<tpaddr> | +36
1269 CTR | +35
1270 LR | +34
1271 XER | +33
1272 CR | +32
1273 R31 |
1274 R29 |
1275 ... |
1276 R1 | +1
1277 R0 - collected registers
1278 ... |
1279 ... |
1280 Low Back-chain -
1281
1282
1283 The code flow of this jump pad,
1284
1285 1. Adjust SP
1286 2. Save GPR and SPR
1287 3. Prepare argument
1288 4. Call gdb_collector
1289 5. Restore GPR and SPR
1290 6. Restore SP
1291 7. Build a jump for back to the program
1292 8. Copy/relocate original instruction
1293 9. Build a jump for replacing orignal instruction. */
1294
1295 /* Adjust stack pointer. */
1296 if (is_64)
1297 p += GEN_STDU (p, 1, 1, -frame_size); /* stdu r1,-frame_size(r1) */
1298 else
1299 p += GEN_STWU (p, 1, 1, -frame_size); /* stwu r1,-frame_size(r1) */
1300
1301 /* Store GPRs. Save R1 later, because it had just been modified, but
1302 we want the original value. */
1303 for (j = 2; j < 32; j++)
1304 p += GEN_STORE (p, j, 1, min_frame + j * rsz, is_64);
1305 p += GEN_STORE (p, 0, 1, min_frame + 0 * rsz, is_64);
1306 /* Set r0 to the original value of r1 before adjusting stack frame,
1307 and then save it. */
1308 p += GEN_ADDI (p, 0, 1, frame_size);
1309 p += GEN_STORE (p, 0, 1, min_frame + 1 * rsz, is_64);
1310
1311 /* Save CR, XER, LR, and CTR. */
1312 p += GEN_MFCR (p, 3); /* mfcr r3 */
1313 p += GEN_MFSPR (p, 4, 1); /* mfxer r4 */
1314 p += GEN_MFSPR (p, 5, 8); /* mflr r5 */
1315 p += GEN_MFSPR (p, 6, 9); /* mfctr r6 */
1316 p += GEN_STORE (p, 3, 1, min_frame + 32 * rsz, is_64);/* std r3, 32(r1) */
1317 p += GEN_STORE (p, 4, 1, min_frame + 33 * rsz, is_64);/* std r4, 33(r1) */
1318 p += GEN_STORE (p, 5, 1, min_frame + 34 * rsz, is_64);/* std r5, 34(r1) */
1319 p += GEN_STORE (p, 6, 1, min_frame + 35 * rsz, is_64);/* std r6, 35(r1) */
1320
1321 /* Save PC<tpaddr> */
1322 p += gen_limm (p, 3, tpaddr, is_64);
1323 p += GEN_STORE (p, 3, 1, min_frame + 36 * rsz, is_64);
1324
1325
1326 /* Setup arguments to collector. */
1327 /* Set r4 to collected registers. */
1328 p += GEN_ADDI (p, 4, 1, min_frame);
1329 /* Set r3 to TPOINT. */
1330 p += gen_limm (p, 3, tpoint, is_64);
1331
1332 /* Prepare collecting_t object for lock. */
1333 p += GEN_STORE (p, 3, 1, min_frame + 37 * rsz, is_64);
1334 p += GEN_STORE (p, tp_reg, 1, min_frame + 38 * rsz, is_64);
1335 /* Set R5 to collecting object. */
1336 p += GEN_ADDI (p, 5, 1, 37 * rsz);
1337
1338 p += GEN_LWSYNC (p);
1339 p += gen_atomic_xchg (p, lockaddr, 0, 5, is_64);
1340 p += GEN_LWSYNC (p);
1341
1342 /* Call to collector. */
1343 p += gen_call (p, collector, is_64, is_opd);
1344
1345 /* Simply write 0 to release the lock. */
1346 p += gen_limm (p, 3, lockaddr, is_64);
1347 p += gen_limm (p, 4, 0, is_64);
1348 p += GEN_LWSYNC (p);
1349 p += GEN_STORE (p, 4, 3, 0, is_64);
1350
1351 /* Restore stack and registers. */
1352 p += GEN_LOAD (p, 3, 1, min_frame + 32 * rsz, is_64); /* ld r3, 32(r1) */
1353 p += GEN_LOAD (p, 4, 1, min_frame + 33 * rsz, is_64); /* ld r4, 33(r1) */
1354 p += GEN_LOAD (p, 5, 1, min_frame + 34 * rsz, is_64); /* ld r5, 34(r1) */
1355 p += GEN_LOAD (p, 6, 1, min_frame + 35 * rsz, is_64); /* ld r6, 35(r1) */
1356 p += GEN_MTCR (p, 3); /* mtcr r3 */
1357 p += GEN_MTSPR (p, 4, 1); /* mtxer r4 */
1358 p += GEN_MTSPR (p, 5, 8); /* mtlr r5 */
1359 p += GEN_MTSPR (p, 6, 9); /* mtctr r6 */
1360
1361 /* Restore GPRs. */
1362 for (j = 2; j < 32; j++)
1363 p += GEN_LOAD (p, j, 1, min_frame + j * rsz, is_64);
1364 p += GEN_LOAD (p, 0, 1, min_frame + 0 * rsz, is_64);
1365 /* Restore SP. */
1366 p += GEN_ADDI (p, 1, 1, frame_size);
1367
1368 /* Flush instructions to inferior memory. */
1369 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1370
1371 /* Now, insert the original instruction to execute in the jump pad. */
1372 *adjusted_insn_addr = buildaddr + (p - buf) * 4;
1373 *adjusted_insn_addr_end = *adjusted_insn_addr;
1374 ppc_relocate_instruction (adjusted_insn_addr_end, tpaddr);
1375
1376 /* Verify the relocation size. If should be 4 for normal copy,
1377 8 or 12 for some conditional branch. */
1378 if ((*adjusted_insn_addr_end - *adjusted_insn_addr == 0)
1379 || (*adjusted_insn_addr_end - *adjusted_insn_addr > 12))
1380 {
1381 sprintf (err, "E.Unexpected instruction length = %d"
1382 "when relocate instruction.",
1383 (int) (*adjusted_insn_addr_end - *adjusted_insn_addr));
1384 return 1;
1385 }
1386
1387 buildaddr = *adjusted_insn_addr_end;
1388 p = buf;
1389 /* Finally, write a jump back to the program. */
1390 offset = (tpaddr + 4) - buildaddr;
1391 if (offset >= (1 << 25) || offset < -(1 << 25))
1392 {
1393 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1394 "(offset 0x%x > 26-bit).", offset);
1395 return 1;
1396 }
1397 /* b <tpaddr+4> */
1398 p += GEN_B (p, offset);
1399 write_inferior_memory (buildaddr, (unsigned char *) buf, (p - buf) * 4);
1400 *jump_entry = buildaddr + (p - buf) * 4;
1401
1402 /* The jump pad is now built. Wire in a jump to our jump pad. This
1403 is always done last (by our caller actually), so that we can
1404 install fast tracepoints with threads running. This relies on
1405 the agent's atomic write support. */
1406 offset = entryaddr - tpaddr;
1407 if (offset >= (1 << 25) || offset < -(1 << 25))
1408 {
1409 sprintf (err, "E.Jump back from jump pad too far from tracepoint "
1410 "(offset 0x%x > 26-bit).", offset);
1411 return 1;
1412 }
1413 /* b <jentry> */
1414 GEN_B ((uint32_t *) jjump_pad_insn, offset);
1415 *jjump_pad_insn_size = 4;
1416
1417 return 0;
1418 }
1419
1420 /* Returns the minimum instruction length for installing a tracepoint. */
1421
1422 static int
1423 ppc_get_min_fast_tracepoint_insn_len (void)
1424 {
1425 return 4;
1426 }
1427
1428 /* Emits a given buffer into the target at current_insn_ptr. Length
1429 is in units of 32-bit words. */
1430
1431 static void
1432 emit_insns (uint32_t *buf, int n)
1433 {
1434 n = n * sizeof (uint32_t);
1435 write_inferior_memory (current_insn_ptr, (unsigned char *) buf, n);
1436 current_insn_ptr += n;
1437 }
1438
1439 #define __EMIT_ASM(NAME, INSNS) \
1440 do \
1441 { \
1442 extern uint32_t start_bcax_ ## NAME []; \
1443 extern uint32_t end_bcax_ ## NAME []; \
1444 emit_insns (start_bcax_ ## NAME, \
1445 end_bcax_ ## NAME - start_bcax_ ## NAME); \
1446 __asm__ (".section .text.__ppcbcax\n\t" \
1447 "start_bcax_" #NAME ":\n\t" \
1448 INSNS "\n\t" \
1449 "end_bcax_" #NAME ":\n\t" \
1450 ".previous\n\t"); \
1451 } while (0)
1452
1453 #define _EMIT_ASM(NAME, INSNS) __EMIT_ASM (NAME, INSNS)
1454 #define EMIT_ASM(INSNS) _EMIT_ASM (__LINE__, INSNS)
1455
1456 /*
1457
1458 Bytecode execution stack frame - 32-bit
1459
1460 | LR save area (SP + 4)
1461 SP' -> +- Back chain (SP + 0)
1462 | Save r31 for access saved arguments
1463 | Save r30 for bytecode stack pointer
1464 | Save r4 for incoming argument *value
1465 | Save r3 for incoming argument regs
1466 r30 -> +- Bytecode execution stack
1467 |
1468 | 64-byte (8 doublewords) at initial.
1469 | Expand stack as needed.
1470 |
1471 +-
1472 | Some padding for minimum stack frame and 16-byte alignment.
1473 | 16 bytes.
1474 SP +- Back-chain (SP')
1475
1476 initial frame size
1477 = 16 + (4 * 4) + 64
1478 = 96
1479
1480 r30 is the stack-pointer for bytecode machine.
1481 It should point to next-empty, so we can use LDU for pop.
1482 r3 is used for cache of the high part of TOP value.
1483 It was the first argument, pointer to regs.
1484 r4 is used for cache of the low part of TOP value.
1485 It was the second argument, pointer to the result.
1486 We should set *result = TOP after leaving this function.
1487
1488 Note:
1489 * To restore stack at epilogue
1490 => sp = r31
1491 * To check stack is big enough for bytecode execution.
1492 => r30 - 8 > SP + 8
1493 * To return execution result.
1494 => 0(r4) = TOP
1495
1496 */
1497
1498 /* Regardless of endian, register 3 is always high part, 4 is low part.
1499 These defines are used when the register pair is stored/loaded.
1500 Likewise, to simplify code, have a similiar define for 5:6. */
1501
1502 #if __BYTE_ORDER == __LITTLE_ENDIAN
1503 #define TOP_FIRST "4"
1504 #define TOP_SECOND "3"
1505 #define TMP_FIRST "6"
1506 #define TMP_SECOND "5"
1507 #else
1508 #define TOP_FIRST "3"
1509 #define TOP_SECOND "4"
1510 #define TMP_FIRST "5"
1511 #define TMP_SECOND "6"
1512 #endif
1513
1514 /* Emit prologue in inferior memory. See above comments. */
1515
1516 static void
1517 ppc_emit_prologue (void)
1518 {
1519 EMIT_ASM (/* Save return address. */
1520 "mflr 0 \n"
1521 "stw 0, 4(1) \n"
1522 /* Adjust SP. 96 is the initial frame size. */
1523 "stwu 1, -96(1) \n"
1524 /* Save r30 and incoming arguments. */
1525 "stw 31, 96-4(1) \n"
1526 "stw 30, 96-8(1) \n"
1527 "stw 4, 96-12(1) \n"
1528 "stw 3, 96-16(1) \n"
1529 /* Point r31 to original r1 for access arguments. */
1530 "addi 31, 1, 96 \n"
1531 /* Set r30 to pointing stack-top. */
1532 "addi 30, 1, 64 \n"
1533 /* Initial r3/TOP to 0. */
1534 "li 3, 0 \n"
1535 "li 4, 0 \n");
1536 }
1537
1538 /* Emit epilogue in inferior memory. See above comments. */
1539
1540 static void
1541 ppc_emit_epilogue (void)
1542 {
1543 EMIT_ASM (/* *result = TOP */
1544 "lwz 5, -12(31) \n"
1545 "stw " TOP_FIRST ", 0(5) \n"
1546 "stw " TOP_SECOND ", 4(5) \n"
1547 /* Restore registers. */
1548 "lwz 31, -4(31) \n"
1549 "lwz 30, -8(31) \n"
1550 /* Restore SP. */
1551 "lwz 1, 0(1) \n"
1552 /* Restore LR. */
1553 "lwz 0, 4(1) \n"
1554 /* Return 0 for no-error. */
1555 "li 3, 0 \n"
1556 "mtlr 0 \n"
1557 "blr \n");
1558 }
1559
1560 /* TOP = stack[--sp] + TOP */
1561
1562 static void
1563 ppc_emit_add (void)
1564 {
1565 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1566 "lwz " TMP_SECOND ", 4(30)\n"
1567 "addc 4, 6, 4 \n"
1568 "adde 3, 5, 3 \n");
1569 }
1570
1571 /* TOP = stack[--sp] - TOP */
1572
1573 static void
1574 ppc_emit_sub (void)
1575 {
1576 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1577 "lwz " TMP_SECOND ", 4(30) \n"
1578 "subfc 4, 4, 6 \n"
1579 "subfe 3, 3, 5 \n");
1580 }
1581
1582 /* TOP = stack[--sp] * TOP */
1583
1584 static void
1585 ppc_emit_mul (void)
1586 {
1587 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1588 "lwz " TMP_SECOND ", 4(30) \n"
1589 "mulhwu 7, 6, 4 \n"
1590 "mullw 3, 6, 3 \n"
1591 "mullw 5, 4, 5 \n"
1592 "mullw 4, 6, 4 \n"
1593 "add 3, 5, 3 \n"
1594 "add 3, 7, 3 \n");
1595 }
1596
1597 /* TOP = stack[--sp] << TOP */
1598
1599 static void
1600 ppc_emit_lsh (void)
1601 {
1602 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1603 "lwz " TMP_SECOND ", 4(30) \n"
1604 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1605 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1606 "slw 5, 5, 4\n" /* Shift high part left */
1607 "slw 4, 6, 4\n" /* Shift low part left */
1608 "srw 3, 6, 3\n" /* Shift low to high if shift < 32 */
1609 "slw 7, 6, 7\n" /* Shift low to high if shift >= 32 */
1610 "or 3, 5, 3\n"
1611 "or 3, 7, 3\n"); /* Assemble high part */
1612 }
1613
1614 /* Top = stack[--sp] >> TOP
1615 (Arithmetic shift right) */
1616
1617 static void
1618 ppc_emit_rsh_signed (void)
1619 {
1620 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1621 "lwz " TMP_SECOND ", 4(30) \n"
1622 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1623 "sraw 3, 5, 4\n" /* Shift high part right */
1624 "cmpwi 7, 1\n"
1625 "blt 0, 1f\n" /* If shift <= 32, goto 1: */
1626 "sraw 4, 5, 7\n" /* Shift high to low */
1627 "b 2f\n"
1628 "1:\n"
1629 "subfic 7, 4, 32\n" /* r7 = 32 - TOP */
1630 "srw 4, 6, 4\n" /* Shift low part right */
1631 "slw 5, 5, 7\n" /* Shift high to low */
1632 "or 4, 4, 5\n" /* Assemble low part */
1633 "2:\n");
1634 }
1635
1636 /* Top = stack[--sp] >> TOP
1637 (Logical shift right) */
1638
1639 static void
1640 ppc_emit_rsh_unsigned (void)
1641 {
1642 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1643 "lwz " TMP_SECOND ", 4(30) \n"
1644 "subfic 3, 4, 32\n" /* r3 = 32 - TOP */
1645 "addi 7, 4, -32\n" /* r7 = TOP - 32 */
1646 "srw 6, 6, 4\n" /* Shift low part right */
1647 "slw 3, 5, 3\n" /* Shift high to low if shift < 32 */
1648 "srw 7, 5, 7\n" /* Shift high to low if shift >= 32 */
1649 "or 6, 6, 3\n"
1650 "srw 3, 5, 4\n" /* Shift high part right */
1651 "or 4, 6, 7\n"); /* Assemble low part */
1652 }
1653
1654 /* Emit code for signed-extension specified by ARG. */
1655
1656 static void
1657 ppc_emit_ext (int arg)
1658 {
1659 switch (arg)
1660 {
1661 case 8:
1662 EMIT_ASM ("extsb 4, 4\n"
1663 "srawi 3, 4, 31");
1664 break;
1665 case 16:
1666 EMIT_ASM ("extsh 4, 4\n"
1667 "srawi 3, 4, 31");
1668 break;
1669 case 32:
1670 EMIT_ASM ("srawi 3, 4, 31");
1671 break;
1672 default:
1673 emit_error = 1;
1674 }
1675 }
1676
1677 /* Emit code for zero-extension specified by ARG. */
1678
1679 static void
1680 ppc_emit_zero_ext (int arg)
1681 {
1682 switch (arg)
1683 {
1684 case 8:
1685 EMIT_ASM ("clrlwi 4,4,24\n"
1686 "li 3, 0\n");
1687 break;
1688 case 16:
1689 EMIT_ASM ("clrlwi 4,4,16\n"
1690 "li 3, 0\n");
1691 break;
1692 case 32:
1693 EMIT_ASM ("li 3, 0");
1694 break;
1695 default:
1696 emit_error = 1;
1697 }
1698 }
1699
1700 /* TOP = !TOP
1701 i.e., TOP = (TOP == 0) ? 1 : 0; */
1702
1703 static void
1704 ppc_emit_log_not (void)
1705 {
1706 EMIT_ASM ("or 4, 3, 4 \n"
1707 "cntlzw 4, 4 \n"
1708 "srwi 4, 4, 5 \n"
1709 "li 3, 0 \n");
1710 }
1711
1712 /* TOP = stack[--sp] & TOP */
1713
1714 static void
1715 ppc_emit_bit_and (void)
1716 {
1717 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1718 "lwz " TMP_SECOND ", 4(30) \n"
1719 "and 4, 6, 4 \n"
1720 "and 3, 5, 3 \n");
1721 }
1722
1723 /* TOP = stack[--sp] | TOP */
1724
1725 static void
1726 ppc_emit_bit_or (void)
1727 {
1728 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1729 "lwz " TMP_SECOND ", 4(30) \n"
1730 "or 4, 6, 4 \n"
1731 "or 3, 5, 3 \n");
1732 }
1733
1734 /* TOP = stack[--sp] ^ TOP */
1735
1736 static void
1737 ppc_emit_bit_xor (void)
1738 {
1739 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1740 "lwz " TMP_SECOND ", 4(30) \n"
1741 "xor 4, 6, 4 \n"
1742 "xor 3, 5, 3 \n");
1743 }
1744
1745 /* TOP = ~TOP
1746 i.e., TOP = ~(TOP | TOP) */
1747
1748 static void
1749 ppc_emit_bit_not (void)
1750 {
1751 EMIT_ASM ("nor 3, 3, 3 \n"
1752 "nor 4, 4, 4 \n");
1753 }
1754
1755 /* TOP = stack[--sp] == TOP */
1756
1757 static void
1758 ppc_emit_equal (void)
1759 {
1760 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1761 "lwz " TMP_SECOND ", 4(30) \n"
1762 "xor 4, 6, 4 \n"
1763 "xor 3, 5, 3 \n"
1764 "or 4, 3, 4 \n"
1765 "cntlzw 4, 4 \n"
1766 "srwi 4, 4, 5 \n"
1767 "li 3, 0 \n");
1768 }
1769
1770 /* TOP = stack[--sp] < TOP
1771 (Signed comparison) */
1772
1773 static void
1774 ppc_emit_less_signed (void)
1775 {
1776 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1777 "lwz " TMP_SECOND ", 4(30) \n"
1778 "cmplw 6, 6, 4 \n"
1779 "cmpw 7, 5, 3 \n"
1780 /* CR6 bit 0 = low less and high equal */
1781 "crand 6*4+0, 6*4+0, 7*4+2\n"
1782 /* CR7 bit 0 = (low less and high equal) or high less */
1783 "cror 7*4+0, 7*4+0, 6*4+0\n"
1784 "mfcr 4 \n"
1785 "rlwinm 4, 4, 29, 31, 31 \n"
1786 "li 3, 0 \n");
1787 }
1788
1789 /* TOP = stack[--sp] < TOP
1790 (Unsigned comparison) */
1791
1792 static void
1793 ppc_emit_less_unsigned (void)
1794 {
1795 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
1796 "lwz " TMP_SECOND ", 4(30) \n"
1797 "cmplw 6, 6, 4 \n"
1798 "cmplw 7, 5, 3 \n"
1799 /* CR6 bit 0 = low less and high equal */
1800 "crand 6*4+0, 6*4+0, 7*4+2\n"
1801 /* CR7 bit 0 = (low less and high equal) or high less */
1802 "cror 7*4+0, 7*4+0, 6*4+0\n"
1803 "mfcr 4 \n"
1804 "rlwinm 4, 4, 29, 31, 31 \n"
1805 "li 3, 0 \n");
1806 }
1807
1808 /* Access the memory address in TOP in size of SIZE.
1809 Zero-extend the read value. */
1810
1811 static void
1812 ppc_emit_ref (int size)
1813 {
1814 switch (size)
1815 {
1816 case 1:
1817 EMIT_ASM ("lbz 4, 0(4)\n"
1818 "li 3, 0");
1819 break;
1820 case 2:
1821 EMIT_ASM ("lhz 4, 0(4)\n"
1822 "li 3, 0");
1823 break;
1824 case 4:
1825 EMIT_ASM ("lwz 4, 0(4)\n"
1826 "li 3, 0");
1827 break;
1828 case 8:
1829 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1830 EMIT_ASM ("lwz 3, 4(4)\n"
1831 "lwz 4, 0(4)");
1832 else
1833 EMIT_ASM ("lwz 3, 0(4)\n"
1834 "lwz 4, 4(4)");
1835 break;
1836 }
1837 }
1838
1839 /* TOP = NUM */
1840
1841 static void
1842 ppc_emit_const (LONGEST num)
1843 {
1844 uint32_t buf[10];
1845 uint32_t *p = buf;
1846
1847 p += gen_limm (p, 3, num >> 32 & 0xffffffff, 0);
1848 p += gen_limm (p, 4, num & 0xffffffff, 0);
1849
1850 emit_insns (buf, p - buf);
1851 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1852 }
1853
1854 /* Set TOP to the value of register REG by calling get_raw_reg function
1855 with two argument, collected buffer and register number. */
1856
1857 static void
1858 ppc_emit_reg (int reg)
1859 {
1860 uint32_t buf[13];
1861 uint32_t *p = buf;
1862
1863 /* fctx->regs is passed in r3 and then saved in -16(31). */
1864 p += GEN_LWZ (p, 3, 31, -16);
1865 p += GEN_LI (p, 4, reg); /* li r4, reg */
1866 p += gen_call (p, get_raw_reg_func_addr (), 0, 0);
1867
1868 emit_insns (buf, p - buf);
1869 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1870
1871 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1872 {
1873 EMIT_ASM ("mr 5, 4\n"
1874 "mr 4, 3\n"
1875 "mr 3, 5\n");
1876 }
1877 }
1878
1879 /* TOP = stack[--sp] */
1880
1881 static void
1882 ppc_emit_pop (void)
1883 {
1884 EMIT_ASM ("lwzu " TOP_FIRST ", 8(30) \n"
1885 "lwz " TOP_SECOND ", 4(30) \n");
1886 }
1887
1888 /* stack[sp++] = TOP
1889
1890 Because we may use up bytecode stack, expand 8 doublewords more
1891 if needed. */
1892
1893 static void
1894 ppc_emit_stack_flush (void)
1895 {
1896 /* Make sure bytecode stack is big enough before push.
1897 Otherwise, expand 64-byte more. */
1898
1899 EMIT_ASM (" stw " TOP_FIRST ", 0(30) \n"
1900 " stw " TOP_SECOND ", 4(30)\n"
1901 " addi 5, 30, -(8 + 8) \n"
1902 " cmpw 7, 5, 1 \n"
1903 " bgt 7, 1f \n"
1904 " stwu 31, -64(1) \n"
1905 "1:addi 30, 30, -8 \n");
1906 }
1907
1908 /* Swap TOP and stack[sp-1] */
1909
1910 static void
1911 ppc_emit_swap (void)
1912 {
1913 EMIT_ASM ("lwz " TMP_FIRST ", 8(30) \n"
1914 "lwz " TMP_SECOND ", 12(30) \n"
1915 "stw " TOP_FIRST ", 8(30) \n"
1916 "stw " TOP_SECOND ", 12(30) \n"
1917 "mr 3, 5 \n"
1918 "mr 4, 6 \n");
1919 }
1920
1921 /* Discard N elements in the stack. Also used for ppc64. */
1922
1923 static void
1924 ppc_emit_stack_adjust (int n)
1925 {
1926 uint32_t buf[6];
1927 uint32_t *p = buf;
1928
1929 n = n << 3;
1930 if ((n >> 15) != 0)
1931 {
1932 emit_error = 1;
1933 return;
1934 }
1935
1936 p += GEN_ADDI (p, 30, 30, n);
1937
1938 emit_insns (buf, p - buf);
1939 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1940 }
1941
1942 /* Call function FN. */
1943
1944 static void
1945 ppc_emit_call (CORE_ADDR fn)
1946 {
1947 uint32_t buf[11];
1948 uint32_t *p = buf;
1949
1950 p += gen_call (p, fn, 0, 0);
1951
1952 emit_insns (buf, p - buf);
1953 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1954 }
1955
1956 /* FN's prototype is `LONGEST(*fn)(int)'.
1957 TOP = fn (arg1)
1958 */
1959
1960 static void
1961 ppc_emit_int_call_1 (CORE_ADDR fn, int arg1)
1962 {
1963 uint32_t buf[15];
1964 uint32_t *p = buf;
1965
1966 /* Setup argument. arg1 is a 16-bit value. */
1967 p += gen_limm (p, 3, (uint32_t) arg1, 0);
1968 p += gen_call (p, fn, 0, 0);
1969
1970 emit_insns (buf, p - buf);
1971 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
1972
1973 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1974 {
1975 EMIT_ASM ("mr 5, 4\n"
1976 "mr 4, 3\n"
1977 "mr 3, 5\n");
1978 }
1979 }
1980
1981 /* FN's prototype is `void(*fn)(int,LONGEST)'.
1982 fn (arg1, TOP)
1983
1984 TOP should be preserved/restored before/after the call. */
1985
1986 static void
1987 ppc_emit_void_call_2 (CORE_ADDR fn, int arg1)
1988 {
1989 uint32_t buf[21];
1990 uint32_t *p = buf;
1991
1992 /* Save TOP. 0(30) is next-empty. */
1993 p += GEN_STW (p, 3, 30, 0);
1994 p += GEN_STW (p, 4, 30, 4);
1995
1996 /* Setup argument. arg1 is a 16-bit value. */
1997 if (__BYTE_ORDER == __LITTLE_ENDIAN)
1998 {
1999 p += GEN_MR (p, 5, 4);
2000 p += GEN_MR (p, 6, 3);
2001 }
2002 else
2003 {
2004 p += GEN_MR (p, 5, 3);
2005 p += GEN_MR (p, 6, 4);
2006 }
2007 p += gen_limm (p, 3, (uint32_t) arg1, 0);
2008 p += gen_call (p, fn, 0, 0);
2009
2010 /* Restore TOP */
2011 p += GEN_LWZ (p, 3, 30, 0);
2012 p += GEN_LWZ (p, 4, 30, 4);
2013
2014 emit_insns (buf, p - buf);
2015 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2016 }
2017
2018 /* Note in the following goto ops:
2019
2020 When emitting goto, the target address is later relocated by
2021 write_goto_address. OFFSET_P is the offset of the branch instruction
2022 in the code sequence, and SIZE_P is how to relocate the instruction,
2023 recognized by ppc_write_goto_address. In current implementation,
2024 SIZE can be either 24 or 14 for branch of conditional-branch instruction.
2025 */
2026
2027 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2028
2029 static void
2030 ppc_emit_if_goto (int *offset_p, int *size_p)
2031 {
2032 EMIT_ASM ("or. 3, 3, 4 \n"
2033 "lwzu " TOP_FIRST ", 8(30) \n"
2034 "lwz " TOP_SECOND ", 4(30) \n"
2035 "1:bne 0, 1b \n");
2036
2037 if (offset_p)
2038 *offset_p = 12;
2039 if (size_p)
2040 *size_p = 14;
2041 }
2042
2043 /* Unconditional goto. Also used for ppc64. */
2044
2045 static void
2046 ppc_emit_goto (int *offset_p, int *size_p)
2047 {
2048 EMIT_ASM ("1:b 1b");
2049
2050 if (offset_p)
2051 *offset_p = 0;
2052 if (size_p)
2053 *size_p = 24;
2054 }
2055
2056 /* Goto if stack[--sp] == TOP */
2057
2058 static void
2059 ppc_emit_eq_goto (int *offset_p, int *size_p)
2060 {
2061 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2062 "lwz " TMP_SECOND ", 4(30) \n"
2063 "xor 4, 6, 4 \n"
2064 "xor 3, 5, 3 \n"
2065 "or. 3, 3, 4 \n"
2066 "lwzu " TOP_FIRST ", 8(30) \n"
2067 "lwz " TOP_SECOND ", 4(30) \n"
2068 "1:beq 0, 1b \n");
2069
2070 if (offset_p)
2071 *offset_p = 28;
2072 if (size_p)
2073 *size_p = 14;
2074 }
2075
2076 /* Goto if stack[--sp] != TOP */
2077
2078 static void
2079 ppc_emit_ne_goto (int *offset_p, int *size_p)
2080 {
2081 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2082 "lwz " TMP_SECOND ", 4(30) \n"
2083 "xor 4, 6, 4 \n"
2084 "xor 3, 5, 3 \n"
2085 "or. 3, 3, 4 \n"
2086 "lwzu " TOP_FIRST ", 8(30) \n"
2087 "lwz " TOP_SECOND ", 4(30) \n"
2088 "1:bne 0, 1b \n");
2089
2090 if (offset_p)
2091 *offset_p = 28;
2092 if (size_p)
2093 *size_p = 14;
2094 }
2095
2096 /* Goto if stack[--sp] < TOP */
2097
2098 static void
2099 ppc_emit_lt_goto (int *offset_p, int *size_p)
2100 {
2101 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2102 "lwz " TMP_SECOND ", 4(30) \n"
2103 "cmplw 6, 6, 4 \n"
2104 "cmpw 7, 5, 3 \n"
2105 /* CR6 bit 0 = low less and high equal */
2106 "crand 6*4+0, 6*4+0, 7*4+2\n"
2107 /* CR7 bit 0 = (low less and high equal) or high less */
2108 "cror 7*4+0, 7*4+0, 6*4+0\n"
2109 "lwzu " TOP_FIRST ", 8(30) \n"
2110 "lwz " TOP_SECOND ", 4(30)\n"
2111 "1:blt 7, 1b \n");
2112
2113 if (offset_p)
2114 *offset_p = 32;
2115 if (size_p)
2116 *size_p = 14;
2117 }
2118
2119 /* Goto if stack[--sp] <= TOP */
2120
2121 static void
2122 ppc_emit_le_goto (int *offset_p, int *size_p)
2123 {
2124 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2125 "lwz " TMP_SECOND ", 4(30) \n"
2126 "cmplw 6, 6, 4 \n"
2127 "cmpw 7, 5, 3 \n"
2128 /* CR6 bit 0 = low less/equal and high equal */
2129 "crandc 6*4+0, 7*4+2, 6*4+1\n"
2130 /* CR7 bit 0 = (low less/eq and high equal) or high less */
2131 "cror 7*4+0, 7*4+0, 6*4+0\n"
2132 "lwzu " TOP_FIRST ", 8(30) \n"
2133 "lwz " TOP_SECOND ", 4(30)\n"
2134 "1:blt 7, 1b \n");
2135
2136 if (offset_p)
2137 *offset_p = 32;
2138 if (size_p)
2139 *size_p = 14;
2140 }
2141
2142 /* Goto if stack[--sp] > TOP */
2143
2144 static void
2145 ppc_emit_gt_goto (int *offset_p, int *size_p)
2146 {
2147 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2148 "lwz " TMP_SECOND ", 4(30) \n"
2149 "cmplw 6, 6, 4 \n"
2150 "cmpw 7, 5, 3 \n"
2151 /* CR6 bit 0 = low greater and high equal */
2152 "crand 6*4+0, 6*4+1, 7*4+2\n"
2153 /* CR7 bit 0 = (low greater and high equal) or high greater */
2154 "cror 7*4+0, 7*4+1, 6*4+0\n"
2155 "lwzu " TOP_FIRST ", 8(30) \n"
2156 "lwz " TOP_SECOND ", 4(30)\n"
2157 "1:blt 7, 1b \n");
2158
2159 if (offset_p)
2160 *offset_p = 32;
2161 if (size_p)
2162 *size_p = 14;
2163 }
2164
2165 /* Goto if stack[--sp] >= TOP */
2166
2167 static void
2168 ppc_emit_ge_goto (int *offset_p, int *size_p)
2169 {
2170 EMIT_ASM ("lwzu " TMP_FIRST ", 8(30) \n"
2171 "lwz " TMP_SECOND ", 4(30) \n"
2172 "cmplw 6, 6, 4 \n"
2173 "cmpw 7, 5, 3 \n"
2174 /* CR6 bit 0 = low ge and high equal */
2175 "crandc 6*4+0, 7*4+2, 6*4+0\n"
2176 /* CR7 bit 0 = (low ge and high equal) or high greater */
2177 "cror 7*4+0, 7*4+1, 6*4+0\n"
2178 "lwzu " TOP_FIRST ", 8(30)\n"
2179 "lwz " TOP_SECOND ", 4(30)\n"
2180 "1:blt 7, 1b \n");
2181
2182 if (offset_p)
2183 *offset_p = 32;
2184 if (size_p)
2185 *size_p = 14;
2186 }
2187
2188 /* Relocate previous emitted branch instruction. FROM is the address
2189 of the branch instruction, TO is the goto target address, and SIZE
2190 if the value we set by *SIZE_P before. Currently, it is either
2191 24 or 14 of branch and conditional-branch instruction.
2192 Also used for ppc64. */
2193
2194 static void
2195 ppc_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2196 {
2197 long rel = to - from;
2198 uint32_t insn;
2199 int opcd;
2200
2201 read_inferior_memory (from, (unsigned char *) &insn, 4);
2202 opcd = (insn >> 26) & 0x3f;
2203
2204 switch (size)
2205 {
2206 case 14:
2207 if (opcd != 16
2208 || (rel >= (1 << 15) || rel < -(1 << 15)))
2209 emit_error = 1;
2210 insn = (insn & ~0xfffc) | (rel & 0xfffc);
2211 break;
2212 case 24:
2213 if (opcd != 18
2214 || (rel >= (1 << 25) || rel < -(1 << 25)))
2215 emit_error = 1;
2216 insn = (insn & ~0x3fffffc) | (rel & 0x3fffffc);
2217 break;
2218 default:
2219 emit_error = 1;
2220 }
2221
2222 if (!emit_error)
2223 write_inferior_memory (from, (unsigned char *) &insn, 4);
2224 }
2225
2226 /* Table of emit ops for 32-bit. */
2227
2228 static struct emit_ops ppc_emit_ops_impl =
2229 {
2230 ppc_emit_prologue,
2231 ppc_emit_epilogue,
2232 ppc_emit_add,
2233 ppc_emit_sub,
2234 ppc_emit_mul,
2235 ppc_emit_lsh,
2236 ppc_emit_rsh_signed,
2237 ppc_emit_rsh_unsigned,
2238 ppc_emit_ext,
2239 ppc_emit_log_not,
2240 ppc_emit_bit_and,
2241 ppc_emit_bit_or,
2242 ppc_emit_bit_xor,
2243 ppc_emit_bit_not,
2244 ppc_emit_equal,
2245 ppc_emit_less_signed,
2246 ppc_emit_less_unsigned,
2247 ppc_emit_ref,
2248 ppc_emit_if_goto,
2249 ppc_emit_goto,
2250 ppc_write_goto_address,
2251 ppc_emit_const,
2252 ppc_emit_call,
2253 ppc_emit_reg,
2254 ppc_emit_pop,
2255 ppc_emit_stack_flush,
2256 ppc_emit_zero_ext,
2257 ppc_emit_swap,
2258 ppc_emit_stack_adjust,
2259 ppc_emit_int_call_1,
2260 ppc_emit_void_call_2,
2261 ppc_emit_eq_goto,
2262 ppc_emit_ne_goto,
2263 ppc_emit_lt_goto,
2264 ppc_emit_le_goto,
2265 ppc_emit_gt_goto,
2266 ppc_emit_ge_goto
2267 };
2268
2269 #ifdef __powerpc64__
2270
2271 /*
2272
2273 Bytecode execution stack frame - 64-bit
2274
2275 | LR save area (SP + 16)
2276 | CR save area (SP + 8)
2277 SP' -> +- Back chain (SP + 0)
2278 | Save r31 for access saved arguments
2279 | Save r30 for bytecode stack pointer
2280 | Save r4 for incoming argument *value
2281 | Save r3 for incoming argument regs
2282 r30 -> +- Bytecode execution stack
2283 |
2284 | 64-byte (8 doublewords) at initial.
2285 | Expand stack as needed.
2286 |
2287 +-
2288 | Some padding for minimum stack frame.
2289 | 112 for ELFv1.
2290 SP +- Back-chain (SP')
2291
2292 initial frame size
2293 = 112 + (4 * 8) + 64
2294 = 208
2295
2296 r30 is the stack-pointer for bytecode machine.
2297 It should point to next-empty, so we can use LDU for pop.
2298 r3 is used for cache of TOP value.
2299 It was the first argument, pointer to regs.
2300 r4 is the second argument, pointer to the result.
2301 We should set *result = TOP after leaving this function.
2302
2303 Note:
2304 * To restore stack at epilogue
2305 => sp = r31
2306 * To check stack is big enough for bytecode execution.
2307 => r30 - 8 > SP + 112
2308 * To return execution result.
2309 => 0(r4) = TOP
2310
2311 */
2312
2313 /* Emit prologue in inferior memory. See above comments. */
2314
2315 static void
2316 ppc64v1_emit_prologue (void)
2317 {
2318 /* On ELFv1, function pointers really point to function descriptor,
2319 so emit one here. We don't care about contents of words 1 and 2,
2320 so let them just overlap out code. */
2321 uint64_t opd = current_insn_ptr + 8;
2322 uint32_t buf[2];
2323
2324 /* Mind the strict aliasing rules. */
2325 memcpy (buf, &opd, sizeof buf);
2326 emit_insns(buf, 2);
2327 EMIT_ASM (/* Save return address. */
2328 "mflr 0 \n"
2329 "std 0, 16(1) \n"
2330 /* Save r30 and incoming arguments. */
2331 "std 31, -8(1) \n"
2332 "std 30, -16(1) \n"
2333 "std 4, -24(1) \n"
2334 "std 3, -32(1) \n"
2335 /* Point r31 to current r1 for access arguments. */
2336 "mr 31, 1 \n"
2337 /* Adjust SP. 208 is the initial frame size. */
2338 "stdu 1, -208(1) \n"
2339 /* Set r30 to pointing stack-top. */
2340 "addi 30, 1, 168 \n"
2341 /* Initial r3/TOP to 0. */
2342 "li 3, 0 \n");
2343 }
2344
2345 /* Emit prologue in inferior memory. See above comments. */
2346
2347 static void
2348 ppc64v2_emit_prologue (void)
2349 {
2350 EMIT_ASM (/* Save return address. */
2351 "mflr 0 \n"
2352 "std 0, 16(1) \n"
2353 /* Save r30 and incoming arguments. */
2354 "std 31, -8(1) \n"
2355 "std 30, -16(1) \n"
2356 "std 4, -24(1) \n"
2357 "std 3, -32(1) \n"
2358 /* Point r31 to current r1 for access arguments. */
2359 "mr 31, 1 \n"
2360 /* Adjust SP. 208 is the initial frame size. */
2361 "stdu 1, -208(1) \n"
2362 /* Set r30 to pointing stack-top. */
2363 "addi 30, 1, 168 \n"
2364 /* Initial r3/TOP to 0. */
2365 "li 3, 0 \n");
2366 }
2367
2368 /* Emit epilogue in inferior memory. See above comments. */
2369
2370 static void
2371 ppc64_emit_epilogue (void)
2372 {
2373 EMIT_ASM (/* Restore SP. */
2374 "ld 1, 0(1) \n"
2375 /* *result = TOP */
2376 "ld 4, -24(1) \n"
2377 "std 3, 0(4) \n"
2378 /* Restore registers. */
2379 "ld 31, -8(1) \n"
2380 "ld 30, -16(1) \n"
2381 /* Restore LR. */
2382 "ld 0, 16(1) \n"
2383 /* Return 0 for no-error. */
2384 "li 3, 0 \n"
2385 "mtlr 0 \n"
2386 "blr \n");
2387 }
2388
2389 /* TOP = stack[--sp] + TOP */
2390
2391 static void
2392 ppc64_emit_add (void)
2393 {
2394 EMIT_ASM ("ldu 4, 8(30) \n"
2395 "add 3, 4, 3 \n");
2396 }
2397
2398 /* TOP = stack[--sp] - TOP */
2399
2400 static void
2401 ppc64_emit_sub (void)
2402 {
2403 EMIT_ASM ("ldu 4, 8(30) \n"
2404 "sub 3, 4, 3 \n");
2405 }
2406
2407 /* TOP = stack[--sp] * TOP */
2408
2409 static void
2410 ppc64_emit_mul (void)
2411 {
2412 EMIT_ASM ("ldu 4, 8(30) \n"
2413 "mulld 3, 4, 3 \n");
2414 }
2415
2416 /* TOP = stack[--sp] << TOP */
2417
2418 static void
2419 ppc64_emit_lsh (void)
2420 {
2421 EMIT_ASM ("ldu 4, 8(30) \n"
2422 "sld 3, 4, 3 \n");
2423 }
2424
2425 /* Top = stack[--sp] >> TOP
2426 (Arithmetic shift right) */
2427
2428 static void
2429 ppc64_emit_rsh_signed (void)
2430 {
2431 EMIT_ASM ("ldu 4, 8(30) \n"
2432 "srad 3, 4, 3 \n");
2433 }
2434
2435 /* Top = stack[--sp] >> TOP
2436 (Logical shift right) */
2437
2438 static void
2439 ppc64_emit_rsh_unsigned (void)
2440 {
2441 EMIT_ASM ("ldu 4, 8(30) \n"
2442 "srd 3, 4, 3 \n");
2443 }
2444
2445 /* Emit code for signed-extension specified by ARG. */
2446
2447 static void
2448 ppc64_emit_ext (int arg)
2449 {
2450 switch (arg)
2451 {
2452 case 8:
2453 EMIT_ASM ("extsb 3, 3");
2454 break;
2455 case 16:
2456 EMIT_ASM ("extsh 3, 3");
2457 break;
2458 case 32:
2459 EMIT_ASM ("extsw 3, 3");
2460 break;
2461 default:
2462 emit_error = 1;
2463 }
2464 }
2465
2466 /* Emit code for zero-extension specified by ARG. */
2467
2468 static void
2469 ppc64_emit_zero_ext (int arg)
2470 {
2471 switch (arg)
2472 {
2473 case 8:
2474 EMIT_ASM ("rldicl 3,3,0,56");
2475 break;
2476 case 16:
2477 EMIT_ASM ("rldicl 3,3,0,48");
2478 break;
2479 case 32:
2480 EMIT_ASM ("rldicl 3,3,0,32");
2481 break;
2482 default:
2483 emit_error = 1;
2484 }
2485 }
2486
2487 /* TOP = !TOP
2488 i.e., TOP = (TOP == 0) ? 1 : 0; */
2489
2490 static void
2491 ppc64_emit_log_not (void)
2492 {
2493 EMIT_ASM ("cntlzd 3, 3 \n"
2494 "srdi 3, 3, 6 \n");
2495 }
2496
2497 /* TOP = stack[--sp] & TOP */
2498
2499 static void
2500 ppc64_emit_bit_and (void)
2501 {
2502 EMIT_ASM ("ldu 4, 8(30) \n"
2503 "and 3, 4, 3 \n");
2504 }
2505
2506 /* TOP = stack[--sp] | TOP */
2507
2508 static void
2509 ppc64_emit_bit_or (void)
2510 {
2511 EMIT_ASM ("ldu 4, 8(30) \n"
2512 "or 3, 4, 3 \n");
2513 }
2514
2515 /* TOP = stack[--sp] ^ TOP */
2516
2517 static void
2518 ppc64_emit_bit_xor (void)
2519 {
2520 EMIT_ASM ("ldu 4, 8(30) \n"
2521 "xor 3, 4, 3 \n");
2522 }
2523
2524 /* TOP = ~TOP
2525 i.e., TOP = ~(TOP | TOP) */
2526
2527 static void
2528 ppc64_emit_bit_not (void)
2529 {
2530 EMIT_ASM ("nor 3, 3, 3 \n");
2531 }
2532
2533 /* TOP = stack[--sp] == TOP */
2534
2535 static void
2536 ppc64_emit_equal (void)
2537 {
2538 EMIT_ASM ("ldu 4, 8(30) \n"
2539 "xor 3, 3, 4 \n"
2540 "cntlzd 3, 3 \n"
2541 "srdi 3, 3, 6 \n");
2542 }
2543
2544 /* TOP = stack[--sp] < TOP
2545 (Signed comparison) */
2546
2547 static void
2548 ppc64_emit_less_signed (void)
2549 {
2550 EMIT_ASM ("ldu 4, 8(30) \n"
2551 "cmpd 7, 4, 3 \n"
2552 "mfcr 3 \n"
2553 "rlwinm 3, 3, 29, 31, 31 \n");
2554 }
2555
2556 /* TOP = stack[--sp] < TOP
2557 (Unsigned comparison) */
2558
2559 static void
2560 ppc64_emit_less_unsigned (void)
2561 {
2562 EMIT_ASM ("ldu 4, 8(30) \n"
2563 "cmpld 7, 4, 3 \n"
2564 "mfcr 3 \n"
2565 "rlwinm 3, 3, 29, 31, 31 \n");
2566 }
2567
2568 /* Access the memory address in TOP in size of SIZE.
2569 Zero-extend the read value. */
2570
2571 static void
2572 ppc64_emit_ref (int size)
2573 {
2574 switch (size)
2575 {
2576 case 1:
2577 EMIT_ASM ("lbz 3, 0(3)");
2578 break;
2579 case 2:
2580 EMIT_ASM ("lhz 3, 0(3)");
2581 break;
2582 case 4:
2583 EMIT_ASM ("lwz 3, 0(3)");
2584 break;
2585 case 8:
2586 EMIT_ASM ("ld 3, 0(3)");
2587 break;
2588 }
2589 }
2590
2591 /* TOP = NUM */
2592
2593 static void
2594 ppc64_emit_const (LONGEST num)
2595 {
2596 uint32_t buf[5];
2597 uint32_t *p = buf;
2598
2599 p += gen_limm (p, 3, num, 1);
2600
2601 emit_insns (buf, p - buf);
2602 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2603 }
2604
2605 /* Set TOP to the value of register REG by calling get_raw_reg function
2606 with two argument, collected buffer and register number. */
2607
2608 static void
2609 ppc64v1_emit_reg (int reg)
2610 {
2611 uint32_t buf[15];
2612 uint32_t *p = buf;
2613
2614 /* fctx->regs is passed in r3 and then saved in 176(1). */
2615 p += GEN_LD (p, 3, 31, -32);
2616 p += GEN_LI (p, 4, reg);
2617 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2618 p += gen_call (p, get_raw_reg_func_addr (), 1, 1);
2619 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2620
2621 emit_insns (buf, p - buf);
2622 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2623 }
2624
2625 /* Likewise, for ELFv2. */
2626
2627 static void
2628 ppc64v2_emit_reg (int reg)
2629 {
2630 uint32_t buf[12];
2631 uint32_t *p = buf;
2632
2633 /* fctx->regs is passed in r3 and then saved in 176(1). */
2634 p += GEN_LD (p, 3, 31, -32);
2635 p += GEN_LI (p, 4, reg);
2636 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2637 p += gen_call (p, get_raw_reg_func_addr (), 1, 0);
2638 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2639
2640 emit_insns (buf, p - buf);
2641 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2642 }
2643
2644 /* TOP = stack[--sp] */
2645
2646 static void
2647 ppc64_emit_pop (void)
2648 {
2649 EMIT_ASM ("ldu 3, 8(30)");
2650 }
2651
2652 /* stack[sp++] = TOP
2653
2654 Because we may use up bytecode stack, expand 8 doublewords more
2655 if needed. */
2656
2657 static void
2658 ppc64_emit_stack_flush (void)
2659 {
2660 /* Make sure bytecode stack is big enough before push.
2661 Otherwise, expand 64-byte more. */
2662
2663 EMIT_ASM (" std 3, 0(30) \n"
2664 " addi 4, 30, -(112 + 8) \n"
2665 " cmpd 7, 4, 1 \n"
2666 " bgt 7, 1f \n"
2667 " stdu 31, -64(1) \n"
2668 "1:addi 30, 30, -8 \n");
2669 }
2670
2671 /* Swap TOP and stack[sp-1] */
2672
2673 static void
2674 ppc64_emit_swap (void)
2675 {
2676 EMIT_ASM ("ld 4, 8(30) \n"
2677 "std 3, 8(30) \n"
2678 "mr 3, 4 \n");
2679 }
2680
2681 /* Call function FN - ELFv1. */
2682
2683 static void
2684 ppc64v1_emit_call (CORE_ADDR fn)
2685 {
2686 uint32_t buf[13];
2687 uint32_t *p = buf;
2688
2689 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2690 p += gen_call (p, fn, 1, 1);
2691 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2692
2693 emit_insns (buf, p - buf);
2694 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2695 }
2696
2697 /* Call function FN - ELFv2. */
2698
2699 static void
2700 ppc64v2_emit_call (CORE_ADDR fn)
2701 {
2702 uint32_t buf[10];
2703 uint32_t *p = buf;
2704
2705 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2706 p += gen_call (p, fn, 1, 0);
2707 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2708
2709 emit_insns (buf, p - buf);
2710 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2711 }
2712
2713 /* FN's prototype is `LONGEST(*fn)(int)'.
2714 TOP = fn (arg1)
2715 */
2716
2717 static void
2718 ppc64v1_emit_int_call_1 (CORE_ADDR fn, int arg1)
2719 {
2720 uint32_t buf[13];
2721 uint32_t *p = buf;
2722
2723 /* Setup argument. arg1 is a 16-bit value. */
2724 p += gen_limm (p, 3, arg1, 1);
2725 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2726 p += gen_call (p, fn, 1, 1);
2727 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2728
2729 emit_insns (buf, p - buf);
2730 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2731 }
2732
2733 /* Likewise for ELFv2. */
2734
2735 static void
2736 ppc64v2_emit_int_call_1 (CORE_ADDR fn, int arg1)
2737 {
2738 uint32_t buf[10];
2739 uint32_t *p = buf;
2740
2741 /* Setup argument. arg1 is a 16-bit value. */
2742 p += gen_limm (p, 3, arg1, 1);
2743 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2744 p += gen_call (p, fn, 1, 0);
2745 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2746
2747 emit_insns (buf, p - buf);
2748 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2749 }
2750
2751 /* FN's prototype is `void(*fn)(int,LONGEST)'.
2752 fn (arg1, TOP)
2753
2754 TOP should be preserved/restored before/after the call. */
2755
2756 static void
2757 ppc64v1_emit_void_call_2 (CORE_ADDR fn, int arg1)
2758 {
2759 uint32_t buf[17];
2760 uint32_t *p = buf;
2761
2762 /* Save TOP. 0(30) is next-empty. */
2763 p += GEN_STD (p, 3, 30, 0);
2764
2765 /* Setup argument. arg1 is a 16-bit value. */
2766 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2767 p += gen_limm (p, 3, arg1, 1);
2768 p += GEN_STD (p, 2, 1, 40); /* Save TOC. */
2769 p += gen_call (p, fn, 1, 1);
2770 p += GEN_LD (p, 2, 1, 40); /* Restore TOC. */
2771
2772 /* Restore TOP */
2773 p += GEN_LD (p, 3, 30, 0);
2774
2775 emit_insns (buf, p - buf);
2776 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2777 }
2778
2779 /* Likewise for ELFv2. */
2780
2781 static void
2782 ppc64v2_emit_void_call_2 (CORE_ADDR fn, int arg1)
2783 {
2784 uint32_t buf[14];
2785 uint32_t *p = buf;
2786
2787 /* Save TOP. 0(30) is next-empty. */
2788 p += GEN_STD (p, 3, 30, 0);
2789
2790 /* Setup argument. arg1 is a 16-bit value. */
2791 p += GEN_MR (p, 4, 3); /* mr r4, r3 */
2792 p += gen_limm (p, 3, arg1, 1);
2793 p += GEN_STD (p, 2, 1, 24); /* Save TOC. */
2794 p += gen_call (p, fn, 1, 0);
2795 p += GEN_LD (p, 2, 1, 24); /* Restore TOC. */
2796
2797 /* Restore TOP */
2798 p += GEN_LD (p, 3, 30, 0);
2799
2800 emit_insns (buf, p - buf);
2801 gdb_assert ((p - buf) <= (sizeof (buf) / sizeof (*buf)));
2802 }
2803
2804 /* If TOP is true, goto somewhere. Otherwise, just fall-through. */
2805
2806 static void
2807 ppc64_emit_if_goto (int *offset_p, int *size_p)
2808 {
2809 EMIT_ASM ("cmpdi 7, 3, 0 \n"
2810 "ldu 3, 8(30) \n"
2811 "1:bne 7, 1b \n");
2812
2813 if (offset_p)
2814 *offset_p = 8;
2815 if (size_p)
2816 *size_p = 14;
2817 }
2818
2819 /* Goto if stack[--sp] == TOP */
2820
2821 static void
2822 ppc64_emit_eq_goto (int *offset_p, int *size_p)
2823 {
2824 EMIT_ASM ("ldu 4, 8(30) \n"
2825 "cmpd 7, 4, 3 \n"
2826 "ldu 3, 8(30) \n"
2827 "1:beq 7, 1b \n");
2828
2829 if (offset_p)
2830 *offset_p = 12;
2831 if (size_p)
2832 *size_p = 14;
2833 }
2834
2835 /* Goto if stack[--sp] != TOP */
2836
2837 static void
2838 ppc64_emit_ne_goto (int *offset_p, int *size_p)
2839 {
2840 EMIT_ASM ("ldu 4, 8(30) \n"
2841 "cmpd 7, 4, 3 \n"
2842 "ldu 3, 8(30) \n"
2843 "1:bne 7, 1b \n");
2844
2845 if (offset_p)
2846 *offset_p = 12;
2847 if (size_p)
2848 *size_p = 14;
2849 }
2850
2851 /* Goto if stack[--sp] < TOP */
2852
2853 static void
2854 ppc64_emit_lt_goto (int *offset_p, int *size_p)
2855 {
2856 EMIT_ASM ("ldu 4, 8(30) \n"
2857 "cmpd 7, 4, 3 \n"
2858 "ldu 3, 8(30) \n"
2859 "1:blt 7, 1b \n");
2860
2861 if (offset_p)
2862 *offset_p = 12;
2863 if (size_p)
2864 *size_p = 14;
2865 }
2866
2867 /* Goto if stack[--sp] <= TOP */
2868
2869 static void
2870 ppc64_emit_le_goto (int *offset_p, int *size_p)
2871 {
2872 EMIT_ASM ("ldu 4, 8(30) \n"
2873 "cmpd 7, 4, 3 \n"
2874 "ldu 3, 8(30) \n"
2875 "1:ble 7, 1b \n");
2876
2877 if (offset_p)
2878 *offset_p = 12;
2879 if (size_p)
2880 *size_p = 14;
2881 }
2882
2883 /* Goto if stack[--sp] > TOP */
2884
2885 static void
2886 ppc64_emit_gt_goto (int *offset_p, int *size_p)
2887 {
2888 EMIT_ASM ("ldu 4, 8(30) \n"
2889 "cmpd 7, 4, 3 \n"
2890 "ldu 3, 8(30) \n"
2891 "1:bgt 7, 1b \n");
2892
2893 if (offset_p)
2894 *offset_p = 12;
2895 if (size_p)
2896 *size_p = 14;
2897 }
2898
2899 /* Goto if stack[--sp] >= TOP */
2900
2901 static void
2902 ppc64_emit_ge_goto (int *offset_p, int *size_p)
2903 {
2904 EMIT_ASM ("ldu 4, 8(30) \n"
2905 "cmpd 7, 4, 3 \n"
2906 "ldu 3, 8(30) \n"
2907 "1:bge 7, 1b \n");
2908
2909 if (offset_p)
2910 *offset_p = 12;
2911 if (size_p)
2912 *size_p = 14;
2913 }
2914
2915 /* Table of emit ops for 64-bit ELFv1. */
2916
2917 static struct emit_ops ppc64v1_emit_ops_impl =
2918 {
2919 ppc64v1_emit_prologue,
2920 ppc64_emit_epilogue,
2921 ppc64_emit_add,
2922 ppc64_emit_sub,
2923 ppc64_emit_mul,
2924 ppc64_emit_lsh,
2925 ppc64_emit_rsh_signed,
2926 ppc64_emit_rsh_unsigned,
2927 ppc64_emit_ext,
2928 ppc64_emit_log_not,
2929 ppc64_emit_bit_and,
2930 ppc64_emit_bit_or,
2931 ppc64_emit_bit_xor,
2932 ppc64_emit_bit_not,
2933 ppc64_emit_equal,
2934 ppc64_emit_less_signed,
2935 ppc64_emit_less_unsigned,
2936 ppc64_emit_ref,
2937 ppc64_emit_if_goto,
2938 ppc_emit_goto,
2939 ppc_write_goto_address,
2940 ppc64_emit_const,
2941 ppc64v1_emit_call,
2942 ppc64v1_emit_reg,
2943 ppc64_emit_pop,
2944 ppc64_emit_stack_flush,
2945 ppc64_emit_zero_ext,
2946 ppc64_emit_swap,
2947 ppc_emit_stack_adjust,
2948 ppc64v1_emit_int_call_1,
2949 ppc64v1_emit_void_call_2,
2950 ppc64_emit_eq_goto,
2951 ppc64_emit_ne_goto,
2952 ppc64_emit_lt_goto,
2953 ppc64_emit_le_goto,
2954 ppc64_emit_gt_goto,
2955 ppc64_emit_ge_goto
2956 };
2957
2958 /* Table of emit ops for 64-bit ELFv2. */
2959
2960 static struct emit_ops ppc64v2_emit_ops_impl =
2961 {
2962 ppc64v2_emit_prologue,
2963 ppc64_emit_epilogue,
2964 ppc64_emit_add,
2965 ppc64_emit_sub,
2966 ppc64_emit_mul,
2967 ppc64_emit_lsh,
2968 ppc64_emit_rsh_signed,
2969 ppc64_emit_rsh_unsigned,
2970 ppc64_emit_ext,
2971 ppc64_emit_log_not,
2972 ppc64_emit_bit_and,
2973 ppc64_emit_bit_or,
2974 ppc64_emit_bit_xor,
2975 ppc64_emit_bit_not,
2976 ppc64_emit_equal,
2977 ppc64_emit_less_signed,
2978 ppc64_emit_less_unsigned,
2979 ppc64_emit_ref,
2980 ppc64_emit_if_goto,
2981 ppc_emit_goto,
2982 ppc_write_goto_address,
2983 ppc64_emit_const,
2984 ppc64v2_emit_call,
2985 ppc64v2_emit_reg,
2986 ppc64_emit_pop,
2987 ppc64_emit_stack_flush,
2988 ppc64_emit_zero_ext,
2989 ppc64_emit_swap,
2990 ppc_emit_stack_adjust,
2991 ppc64v2_emit_int_call_1,
2992 ppc64v2_emit_void_call_2,
2993 ppc64_emit_eq_goto,
2994 ppc64_emit_ne_goto,
2995 ppc64_emit_lt_goto,
2996 ppc64_emit_le_goto,
2997 ppc64_emit_gt_goto,
2998 ppc64_emit_ge_goto
2999 };
3000
3001 #endif
3002
3003 /* Implementation of linux_target_ops method "emit_ops". */
3004
3005 static struct emit_ops *
3006 ppc_emit_ops (void)
3007 {
3008 #ifdef __powerpc64__
3009 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3010
3011 if (register_size (regcache->tdesc, 0) == 8)
3012 {
3013 if (is_elfv2_inferior ())
3014 return &ppc64v2_emit_ops_impl;
3015 else
3016 return &ppc64v1_emit_ops_impl;
3017 }
3018 #endif
3019 return &ppc_emit_ops_impl;
3020 }
3021
3022 /* Implementation of linux_target_ops method "get_ipa_tdesc_idx". */
3023
3024 static int
3025 ppc_get_ipa_tdesc_idx (void)
3026 {
3027 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3028 const struct target_desc *tdesc = regcache->tdesc;
3029
3030 #ifdef __powerpc64__
3031 if (tdesc == tdesc_powerpc_64l)
3032 return PPC_TDESC_BASE;
3033 if (tdesc == tdesc_powerpc_altivec64l)
3034 return PPC_TDESC_ALTIVEC;
3035 if (tdesc == tdesc_powerpc_cell64l)
3036 return PPC_TDESC_CELL;
3037 if (tdesc == tdesc_powerpc_vsx64l)
3038 return PPC_TDESC_VSX;
3039 if (tdesc == tdesc_powerpc_isa205_64l)
3040 return PPC_TDESC_ISA205;
3041 if (tdesc == tdesc_powerpc_isa205_altivec64l)
3042 return PPC_TDESC_ISA205_ALTIVEC;
3043 if (tdesc == tdesc_powerpc_isa205_vsx64l)
3044 return PPC_TDESC_ISA205_VSX;
3045 #endif
3046
3047 if (tdesc == tdesc_powerpc_32l)
3048 return PPC_TDESC_BASE;
3049 if (tdesc == tdesc_powerpc_altivec32l)
3050 return PPC_TDESC_ALTIVEC;
3051 if (tdesc == tdesc_powerpc_cell32l)
3052 return PPC_TDESC_CELL;
3053 if (tdesc == tdesc_powerpc_vsx32l)
3054 return PPC_TDESC_VSX;
3055 if (tdesc == tdesc_powerpc_isa205_32l)
3056 return PPC_TDESC_ISA205;
3057 if (tdesc == tdesc_powerpc_isa205_altivec32l)
3058 return PPC_TDESC_ISA205_ALTIVEC;
3059 if (tdesc == tdesc_powerpc_isa205_vsx32l)
3060 return PPC_TDESC_ISA205_VSX;
3061 if (tdesc == tdesc_powerpc_e500l)
3062 return PPC_TDESC_E500;
3063
3064 return 0;
3065 }
3066
3067 struct linux_target_ops the_low_target = {
3068 ppc_arch_setup,
3069 ppc_regs_info,
3070 ppc_cannot_fetch_register,
3071 ppc_cannot_store_register,
3072 NULL, /* fetch_register */
3073 ppc_get_pc,
3074 ppc_set_pc,
3075 NULL, /* breakpoint_kind_from_pc */
3076 ppc_sw_breakpoint_from_kind,
3077 NULL,
3078 0,
3079 ppc_breakpoint_at,
3080 ppc_supports_z_point_type,
3081 ppc_insert_point,
3082 ppc_remove_point,
3083 NULL,
3084 NULL,
3085 ppc_collect_ptrace_register,
3086 ppc_supply_ptrace_register,
3087 NULL, /* siginfo_fixup */
3088 NULL, /* new_process */
3089 NULL, /* delete_process */
3090 NULL, /* new_thread */
3091 NULL, /* delete_thread */
3092 NULL, /* new_fork */
3093 NULL, /* prepare_to_resume */
3094 NULL, /* process_qsupported */
3095 ppc_supports_tracepoints,
3096 ppc_get_thread_area,
3097 ppc_install_fast_tracepoint_jump_pad,
3098 ppc_emit_ops,
3099 ppc_get_min_fast_tracepoint_insn_len,
3100 NULL, /* supports_range_stepping */
3101 NULL, /* breakpoint_kind_from_current_state */
3102 ppc_supports_hardware_single_step,
3103 NULL, /* get_syscall_trapinfo */
3104 ppc_get_ipa_tdesc_idx,
3105 };
3106
3107 void
3108 initialize_low_arch (void)
3109 {
3110 /* Initialize the Linux target descriptions. */
3111
3112 init_registers_powerpc_32l ();
3113 init_registers_powerpc_altivec32l ();
3114 init_registers_powerpc_cell32l ();
3115 init_registers_powerpc_vsx32l ();
3116 init_registers_powerpc_isa205_32l ();
3117 init_registers_powerpc_isa205_altivec32l ();
3118 init_registers_powerpc_isa205_vsx32l ();
3119 init_registers_powerpc_e500l ();
3120 #if __powerpc64__
3121 init_registers_powerpc_64l ();
3122 init_registers_powerpc_altivec64l ();
3123 init_registers_powerpc_cell64l ();
3124 init_registers_powerpc_vsx64l ();
3125 init_registers_powerpc_isa205_64l ();
3126 init_registers_powerpc_isa205_altivec64l ();
3127 init_registers_powerpc_isa205_vsx64l ();
3128 #endif
3129
3130 initialize_regsets_info (&ppc_regsets_info);
3131 }
This page took 0.153983 seconds and 5 git commands to generate.